slab.c revision a06d72c1dcbff015250df6ad9f0b1d18c02113bf
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/slab.h> 90#include <linux/mm.h> 91#include <linux/poison.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/seq_file.h> 99#include <linux/notifier.h> 100#include <linux/kallsyms.h> 101#include <linux/cpu.h> 102#include <linux/sysctl.h> 103#include <linux/module.h> 104#include <linux/rcupdate.h> 105#include <linux/string.h> 106#include <linux/nodemask.h> 107#include <linux/mempolicy.h> 108#include <linux/mutex.h> 109#include <linux/rtmutex.h> 110 111#include <asm/uaccess.h> 112#include <asm/cacheflush.h> 113#include <asm/tlbflush.h> 114#include <asm/page.h> 115 116/* 117 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, 118 * SLAB_RED_ZONE & SLAB_POISON. 119 * 0 for faster, smaller code (especially in the critical paths). 120 * 121 * STATS - 1 to collect stats for /proc/slabinfo. 122 * 0 for faster, smaller code (especially in the critical paths). 123 * 124 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 125 */ 126 127#ifdef CONFIG_DEBUG_SLAB 128#define DEBUG 1 129#define STATS 1 130#define FORCED_DEBUG 1 131#else 132#define DEBUG 0 133#define STATS 0 134#define FORCED_DEBUG 0 135#endif 136 137/* Shouldn't this be in a header file somewhere? */ 138#define BYTES_PER_WORD sizeof(void *) 139 140#ifndef cache_line_size 141#define cache_line_size() L1_CACHE_BYTES 142#endif 143 144#ifndef ARCH_KMALLOC_MINALIGN 145/* 146 * Enforce a minimum alignment for the kmalloc caches. 147 * Usually, the kmalloc caches are cache_line_size() aligned, except when 148 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 149 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 150 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. 151 * Note that this flag disables some debug features. 152 */ 153#define ARCH_KMALLOC_MINALIGN 0 154#endif 155 156#ifndef ARCH_SLAB_MINALIGN 157/* 158 * Enforce a minimum alignment for all caches. 159 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 160 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 161 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 162 * some debug features. 163 */ 164#define ARCH_SLAB_MINALIGN 0 165#endif 166 167#ifndef ARCH_KMALLOC_FLAGS 168#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 169#endif 170 171/* Legal flag mask for kmem_cache_create(). */ 172#if DEBUG 173# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 174 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 175 SLAB_CACHE_DMA | \ 176 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 177 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 178 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 179#else 180# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 181 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 182 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 183 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 184#endif 185 186/* 187 * kmem_bufctl_t: 188 * 189 * Bufctl's are used for linking objs within a slab 190 * linked offsets. 191 * 192 * This implementation relies on "struct page" for locating the cache & 193 * slab an object belongs to. 194 * This allows the bufctl structure to be small (one int), but limits 195 * the number of objects a slab (not a cache) can contain when off-slab 196 * bufctls are used. The limit is the size of the largest general cache 197 * that does not use off-slab slabs. 198 * For 32bit archs with 4 kB pages, is this 56. 199 * This is not serious, as it is only for large objects, when it is unwise 200 * to have too many per slab. 201 * Note: This limit can be raised by introducing a general cache whose size 202 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 203 */ 204 205typedef unsigned int kmem_bufctl_t; 206#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 207#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 208#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 209#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 210 211/* 212 * struct slab 213 * 214 * Manages the objs in a slab. Placed either at the beginning of mem allocated 215 * for a slab, or allocated from an general cache. 216 * Slabs are chained into three list: fully used, partial, fully free slabs. 217 */ 218struct slab { 219 struct list_head list; 220 unsigned long colouroff; 221 void *s_mem; /* including colour offset */ 222 unsigned int inuse; /* num of objs active in slab */ 223 kmem_bufctl_t free; 224 unsigned short nodeid; 225}; 226 227/* 228 * struct slab_rcu 229 * 230 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 231 * arrange for kmem_freepages to be called via RCU. This is useful if 232 * we need to approach a kernel structure obliquely, from its address 233 * obtained without the usual locking. We can lock the structure to 234 * stabilize it and check it's still at the given address, only if we 235 * can be sure that the memory has not been meanwhile reused for some 236 * other kind of object (which our subsystem's lock might corrupt). 237 * 238 * rcu_read_lock before reading the address, then rcu_read_unlock after 239 * taking the spinlock within the structure expected at that address. 240 * 241 * We assume struct slab_rcu can overlay struct slab when destroying. 242 */ 243struct slab_rcu { 244 struct rcu_head head; 245 struct kmem_cache *cachep; 246 void *addr; 247}; 248 249/* 250 * struct array_cache 251 * 252 * Purpose: 253 * - LIFO ordering, to hand out cache-warm objects from _alloc 254 * - reduce the number of linked list operations 255 * - reduce spinlock operations 256 * 257 * The limit is stored in the per-cpu structure to reduce the data cache 258 * footprint. 259 * 260 */ 261struct array_cache { 262 unsigned int avail; 263 unsigned int limit; 264 unsigned int batchcount; 265 unsigned int touched; 266 spinlock_t lock; 267 void *entry[0]; /* 268 * Must have this definition in here for the proper 269 * alignment of array_cache. Also simplifies accessing 270 * the entries. 271 * [0] is for gcc 2.95. It should really be []. 272 */ 273}; 274 275/* 276 * bootstrap: The caches do not work without cpuarrays anymore, but the 277 * cpuarrays are allocated from the generic caches... 278 */ 279#define BOOT_CPUCACHE_ENTRIES 1 280struct arraycache_init { 281 struct array_cache cache; 282 void *entries[BOOT_CPUCACHE_ENTRIES]; 283}; 284 285/* 286 * The slab lists for all objects. 287 */ 288struct kmem_list3 { 289 struct list_head slabs_partial; /* partial list first, better asm code */ 290 struct list_head slabs_full; 291 struct list_head slabs_free; 292 unsigned long free_objects; 293 unsigned int free_limit; 294 unsigned int colour_next; /* Per-node cache coloring */ 295 spinlock_t list_lock; 296 struct array_cache *shared; /* shared per node */ 297 struct array_cache **alien; /* on other nodes */ 298 unsigned long next_reap; /* updated without locking */ 299 int free_touched; /* updated without locking */ 300}; 301 302/* 303 * Need this for bootstrapping a per node allocator. 304 */ 305#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 306struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 307#define CACHE_CACHE 0 308#define SIZE_AC 1 309#define SIZE_L3 (1 + MAX_NUMNODES) 310 311static int drain_freelist(struct kmem_cache *cache, 312 struct kmem_list3 *l3, int tofree); 313static void free_block(struct kmem_cache *cachep, void **objpp, int len, 314 int node); 315static int enable_cpucache(struct kmem_cache *cachep); 316static void cache_reap(struct work_struct *unused); 317 318/* 319 * This function must be completely optimized away if a constant is passed to 320 * it. Mostly the same as what is in linux/slab.h except it returns an index. 321 */ 322static __always_inline int index_of(const size_t size) 323{ 324 extern void __bad_size(void); 325 326 if (__builtin_constant_p(size)) { 327 int i = 0; 328 329#define CACHE(x) \ 330 if (size <=x) \ 331 return i; \ 332 else \ 333 i++; 334#include "linux/kmalloc_sizes.h" 335#undef CACHE 336 __bad_size(); 337 } else 338 __bad_size(); 339 return 0; 340} 341 342static int slab_early_init = 1; 343 344#define INDEX_AC index_of(sizeof(struct arraycache_init)) 345#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 346 347static void kmem_list3_init(struct kmem_list3 *parent) 348{ 349 INIT_LIST_HEAD(&parent->slabs_full); 350 INIT_LIST_HEAD(&parent->slabs_partial); 351 INIT_LIST_HEAD(&parent->slabs_free); 352 parent->shared = NULL; 353 parent->alien = NULL; 354 parent->colour_next = 0; 355 spin_lock_init(&parent->list_lock); 356 parent->free_objects = 0; 357 parent->free_touched = 0; 358} 359 360#define MAKE_LIST(cachep, listp, slab, nodeid) \ 361 do { \ 362 INIT_LIST_HEAD(listp); \ 363 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 364 } while (0) 365 366#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 367 do { \ 368 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 369 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 370 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 371 } while (0) 372 373/* 374 * struct kmem_cache 375 * 376 * manages a cache. 377 */ 378 379struct kmem_cache { 380/* 1) per-cpu data, touched during every alloc/free */ 381 struct array_cache *array[NR_CPUS]; 382/* 2) Cache tunables. Protected by cache_chain_mutex */ 383 unsigned int batchcount; 384 unsigned int limit; 385 unsigned int shared; 386 387 unsigned int buffer_size; 388/* 3) touched by every alloc & free from the backend */ 389 struct kmem_list3 *nodelists[MAX_NUMNODES]; 390 391 unsigned int flags; /* constant flags */ 392 unsigned int num; /* # of objs per slab */ 393 394/* 4) cache_grow/shrink */ 395 /* order of pgs per slab (2^n) */ 396 unsigned int gfporder; 397 398 /* force GFP flags, e.g. GFP_DMA */ 399 gfp_t gfpflags; 400 401 size_t colour; /* cache colouring range */ 402 unsigned int colour_off; /* colour offset */ 403 struct kmem_cache *slabp_cache; 404 unsigned int slab_size; 405 unsigned int dflags; /* dynamic flags */ 406 407 /* constructor func */ 408 void (*ctor) (void *, struct kmem_cache *, unsigned long); 409 410 /* de-constructor func */ 411 void (*dtor) (void *, struct kmem_cache *, unsigned long); 412 413/* 5) cache creation/removal */ 414 const char *name; 415 struct list_head next; 416 417/* 6) statistics */ 418#if STATS 419 unsigned long num_active; 420 unsigned long num_allocations; 421 unsigned long high_mark; 422 unsigned long grown; 423 unsigned long reaped; 424 unsigned long errors; 425 unsigned long max_freeable; 426 unsigned long node_allocs; 427 unsigned long node_frees; 428 unsigned long node_overflow; 429 atomic_t allochit; 430 atomic_t allocmiss; 431 atomic_t freehit; 432 atomic_t freemiss; 433#endif 434#if DEBUG 435 /* 436 * If debugging is enabled, then the allocator can add additional 437 * fields and/or padding to every object. buffer_size contains the total 438 * object size including these internal fields, the following two 439 * variables contain the offset to the user object and its size. 440 */ 441 int obj_offset; 442 int obj_size; 443#endif 444}; 445 446#define CFLGS_OFF_SLAB (0x80000000UL) 447#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 448 449#define BATCHREFILL_LIMIT 16 450/* 451 * Optimization question: fewer reaps means less probability for unnessary 452 * cpucache drain/refill cycles. 453 * 454 * OTOH the cpuarrays can contain lots of objects, 455 * which could lock up otherwise freeable slabs. 456 */ 457#define REAPTIMEOUT_CPUC (2*HZ) 458#define REAPTIMEOUT_LIST3 (4*HZ) 459 460#if STATS 461#define STATS_INC_ACTIVE(x) ((x)->num_active++) 462#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 463#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 464#define STATS_INC_GROWN(x) ((x)->grown++) 465#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 466#define STATS_SET_HIGH(x) \ 467 do { \ 468 if ((x)->num_active > (x)->high_mark) \ 469 (x)->high_mark = (x)->num_active; \ 470 } while (0) 471#define STATS_INC_ERR(x) ((x)->errors++) 472#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 473#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 474#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 475#define STATS_SET_FREEABLE(x, i) \ 476 do { \ 477 if ((x)->max_freeable < i) \ 478 (x)->max_freeable = i; \ 479 } while (0) 480#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 481#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 482#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 483#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 484#else 485#define STATS_INC_ACTIVE(x) do { } while (0) 486#define STATS_DEC_ACTIVE(x) do { } while (0) 487#define STATS_INC_ALLOCED(x) do { } while (0) 488#define STATS_INC_GROWN(x) do { } while (0) 489#define STATS_ADD_REAPED(x,y) do { } while (0) 490#define STATS_SET_HIGH(x) do { } while (0) 491#define STATS_INC_ERR(x) do { } while (0) 492#define STATS_INC_NODEALLOCS(x) do { } while (0) 493#define STATS_INC_NODEFREES(x) do { } while (0) 494#define STATS_INC_ACOVERFLOW(x) do { } while (0) 495#define STATS_SET_FREEABLE(x, i) do { } while (0) 496#define STATS_INC_ALLOCHIT(x) do { } while (0) 497#define STATS_INC_ALLOCMISS(x) do { } while (0) 498#define STATS_INC_FREEHIT(x) do { } while (0) 499#define STATS_INC_FREEMISS(x) do { } while (0) 500#endif 501 502#if DEBUG 503 504/* 505 * memory layout of objects: 506 * 0 : objp 507 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 508 * the end of an object is aligned with the end of the real 509 * allocation. Catches writes behind the end of the allocation. 510 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 511 * redzone word. 512 * cachep->obj_offset: The real object. 513 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 514 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 515 * [BYTES_PER_WORD long] 516 */ 517static int obj_offset(struct kmem_cache *cachep) 518{ 519 return cachep->obj_offset; 520} 521 522static int obj_size(struct kmem_cache *cachep) 523{ 524 return cachep->obj_size; 525} 526 527static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 528{ 529 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 530 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); 531} 532 533static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 534{ 535 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 536 if (cachep->flags & SLAB_STORE_USER) 537 return (unsigned long *)(objp + cachep->buffer_size - 538 2 * BYTES_PER_WORD); 539 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); 540} 541 542static void **dbg_userword(struct kmem_cache *cachep, void *objp) 543{ 544 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 545 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 546} 547 548#else 549 550#define obj_offset(x) 0 551#define obj_size(cachep) (cachep->buffer_size) 552#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 553#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) 554#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 555 556#endif 557 558/* 559 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp 560 * order. 561 */ 562#if defined(CONFIG_LARGE_ALLOCS) 563#define MAX_OBJ_ORDER 13 /* up to 32Mb */ 564#define MAX_GFP_ORDER 13 /* up to 32Mb */ 565#elif defined(CONFIG_MMU) 566#define MAX_OBJ_ORDER 5 /* 32 pages */ 567#define MAX_GFP_ORDER 5 /* 32 pages */ 568#else 569#define MAX_OBJ_ORDER 8 /* up to 1Mb */ 570#define MAX_GFP_ORDER 8 /* up to 1Mb */ 571#endif 572 573/* 574 * Do not go above this order unless 0 objects fit into the slab. 575 */ 576#define BREAK_GFP_ORDER_HI 1 577#define BREAK_GFP_ORDER_LO 0 578static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 579 580/* 581 * Functions for storing/retrieving the cachep and or slab from the page 582 * allocator. These are used to find the slab an obj belongs to. With kfree(), 583 * these are used to find the cache which an obj belongs to. 584 */ 585static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 586{ 587 page->lru.next = (struct list_head *)cache; 588} 589 590static inline struct kmem_cache *page_get_cache(struct page *page) 591{ 592 if (unlikely(PageCompound(page))) 593 page = (struct page *)page_private(page); 594 BUG_ON(!PageSlab(page)); 595 return (struct kmem_cache *)page->lru.next; 596} 597 598static inline void page_set_slab(struct page *page, struct slab *slab) 599{ 600 page->lru.prev = (struct list_head *)slab; 601} 602 603static inline struct slab *page_get_slab(struct page *page) 604{ 605 if (unlikely(PageCompound(page))) 606 page = (struct page *)page_private(page); 607 BUG_ON(!PageSlab(page)); 608 return (struct slab *)page->lru.prev; 609} 610 611static inline struct kmem_cache *virt_to_cache(const void *obj) 612{ 613 struct page *page = virt_to_page(obj); 614 return page_get_cache(page); 615} 616 617static inline struct slab *virt_to_slab(const void *obj) 618{ 619 struct page *page = virt_to_page(obj); 620 return page_get_slab(page); 621} 622 623static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 624 unsigned int idx) 625{ 626 return slab->s_mem + cache->buffer_size * idx; 627} 628 629static inline unsigned int obj_to_index(struct kmem_cache *cache, 630 struct slab *slab, void *obj) 631{ 632 return (unsigned)(obj - slab->s_mem) / cache->buffer_size; 633} 634 635/* 636 * These are the default caches for kmalloc. Custom caches can have other sizes. 637 */ 638struct cache_sizes malloc_sizes[] = { 639#define CACHE(x) { .cs_size = (x) }, 640#include <linux/kmalloc_sizes.h> 641 CACHE(ULONG_MAX) 642#undef CACHE 643}; 644EXPORT_SYMBOL(malloc_sizes); 645 646/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 647struct cache_names { 648 char *name; 649 char *name_dma; 650}; 651 652static struct cache_names __initdata cache_names[] = { 653#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 654#include <linux/kmalloc_sizes.h> 655 {NULL,} 656#undef CACHE 657}; 658 659static struct arraycache_init initarray_cache __initdata = 660 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 661static struct arraycache_init initarray_generic = 662 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 663 664/* internal cache of cache description objs */ 665static struct kmem_cache cache_cache = { 666 .batchcount = 1, 667 .limit = BOOT_CPUCACHE_ENTRIES, 668 .shared = 1, 669 .buffer_size = sizeof(struct kmem_cache), 670 .name = "kmem_cache", 671#if DEBUG 672 .obj_size = sizeof(struct kmem_cache), 673#endif 674}; 675 676#define BAD_ALIEN_MAGIC 0x01020304ul 677 678#ifdef CONFIG_LOCKDEP 679 680/* 681 * Slab sometimes uses the kmalloc slabs to store the slab headers 682 * for other slabs "off slab". 683 * The locking for this is tricky in that it nests within the locks 684 * of all other slabs in a few places; to deal with this special 685 * locking we put on-slab caches into a separate lock-class. 686 * 687 * We set lock class for alien array caches which are up during init. 688 * The lock annotation will be lost if all cpus of a node goes down and 689 * then comes back up during hotplug 690 */ 691static struct lock_class_key on_slab_l3_key; 692static struct lock_class_key on_slab_alc_key; 693 694static inline void init_lock_keys(void) 695 696{ 697 int q; 698 struct cache_sizes *s = malloc_sizes; 699 700 while (s->cs_size != ULONG_MAX) { 701 for_each_node(q) { 702 struct array_cache **alc; 703 int r; 704 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 705 if (!l3 || OFF_SLAB(s->cs_cachep)) 706 continue; 707 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 708 alc = l3->alien; 709 /* 710 * FIXME: This check for BAD_ALIEN_MAGIC 711 * should go away when common slab code is taught to 712 * work even without alien caches. 713 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 714 * for alloc_alien_cache, 715 */ 716 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 717 continue; 718 for_each_node(r) { 719 if (alc[r]) 720 lockdep_set_class(&alc[r]->lock, 721 &on_slab_alc_key); 722 } 723 } 724 s++; 725 } 726} 727#else 728static inline void init_lock_keys(void) 729{ 730} 731#endif 732 733/* 734 * 1. Guard access to the cache-chain. 735 * 2. Protect sanity of cpu_online_map against cpu hotplug events 736 */ 737static DEFINE_MUTEX(cache_chain_mutex); 738static struct list_head cache_chain; 739 740/* 741 * chicken and egg problem: delay the per-cpu array allocation 742 * until the general caches are up. 743 */ 744static enum { 745 NONE, 746 PARTIAL_AC, 747 PARTIAL_L3, 748 FULL 749} g_cpucache_up; 750 751/* 752 * used by boot code to determine if it can use slab based allocator 753 */ 754int slab_is_available(void) 755{ 756 return g_cpucache_up == FULL; 757} 758 759static DEFINE_PER_CPU(struct delayed_work, reap_work); 760 761static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 762{ 763 return cachep->array[smp_processor_id()]; 764} 765 766static inline struct kmem_cache *__find_general_cachep(size_t size, 767 gfp_t gfpflags) 768{ 769 struct cache_sizes *csizep = malloc_sizes; 770 771#if DEBUG 772 /* This happens if someone tries to call 773 * kmem_cache_create(), or __kmalloc(), before 774 * the generic caches are initialized. 775 */ 776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 777#endif 778 while (size > csizep->cs_size) 779 csizep++; 780 781 /* 782 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 783 * has cs_{dma,}cachep==NULL. Thus no special case 784 * for large kmalloc calls required. 785 */ 786 if (unlikely(gfpflags & GFP_DMA)) 787 return csizep->cs_dmacachep; 788 return csizep->cs_cachep; 789} 790 791static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 792{ 793 return __find_general_cachep(size, gfpflags); 794} 795 796static size_t slab_mgmt_size(size_t nr_objs, size_t align) 797{ 798 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 799} 800 801/* 802 * Calculate the number of objects and left-over bytes for a given buffer size. 803 */ 804static void cache_estimate(unsigned long gfporder, size_t buffer_size, 805 size_t align, int flags, size_t *left_over, 806 unsigned int *num) 807{ 808 int nr_objs; 809 size_t mgmt_size; 810 size_t slab_size = PAGE_SIZE << gfporder; 811 812 /* 813 * The slab management structure can be either off the slab or 814 * on it. For the latter case, the memory allocated for a 815 * slab is used for: 816 * 817 * - The struct slab 818 * - One kmem_bufctl_t for each object 819 * - Padding to respect alignment of @align 820 * - @buffer_size bytes for each object 821 * 822 * If the slab management structure is off the slab, then the 823 * alignment will already be calculated into the size. Because 824 * the slabs are all pages aligned, the objects will be at the 825 * correct alignment when allocated. 826 */ 827 if (flags & CFLGS_OFF_SLAB) { 828 mgmt_size = 0; 829 nr_objs = slab_size / buffer_size; 830 831 if (nr_objs > SLAB_LIMIT) 832 nr_objs = SLAB_LIMIT; 833 } else { 834 /* 835 * Ignore padding for the initial guess. The padding 836 * is at most @align-1 bytes, and @buffer_size is at 837 * least @align. In the worst case, this result will 838 * be one greater than the number of objects that fit 839 * into the memory allocation when taking the padding 840 * into account. 841 */ 842 nr_objs = (slab_size - sizeof(struct slab)) / 843 (buffer_size + sizeof(kmem_bufctl_t)); 844 845 /* 846 * This calculated number will be either the right 847 * amount, or one greater than what we want. 848 */ 849 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 850 > slab_size) 851 nr_objs--; 852 853 if (nr_objs > SLAB_LIMIT) 854 nr_objs = SLAB_LIMIT; 855 856 mgmt_size = slab_mgmt_size(nr_objs, align); 857 } 858 *num = nr_objs; 859 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 860} 861 862#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 863 864static void __slab_error(const char *function, struct kmem_cache *cachep, 865 char *msg) 866{ 867 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 868 function, cachep->name, msg); 869 dump_stack(); 870} 871 872/* 873 * By default on NUMA we use alien caches to stage the freeing of 874 * objects allocated from other nodes. This causes massive memory 875 * inefficiencies when using fake NUMA setup to split memory into a 876 * large number of small nodes, so it can be disabled on the command 877 * line 878 */ 879 880static int use_alien_caches __read_mostly = 1; 881static int __init noaliencache_setup(char *s) 882{ 883 use_alien_caches = 0; 884 return 1; 885} 886__setup("noaliencache", noaliencache_setup); 887 888#ifdef CONFIG_NUMA 889/* 890 * Special reaping functions for NUMA systems called from cache_reap(). 891 * These take care of doing round robin flushing of alien caches (containing 892 * objects freed on different nodes from which they were allocated) and the 893 * flushing of remote pcps by calling drain_node_pages. 894 */ 895static DEFINE_PER_CPU(unsigned long, reap_node); 896 897static void init_reap_node(int cpu) 898{ 899 int node; 900 901 node = next_node(cpu_to_node(cpu), node_online_map); 902 if (node == MAX_NUMNODES) 903 node = first_node(node_online_map); 904 905 per_cpu(reap_node, cpu) = node; 906} 907 908static void next_reap_node(void) 909{ 910 int node = __get_cpu_var(reap_node); 911 912 /* 913 * Also drain per cpu pages on remote zones 914 */ 915 if (node != numa_node_id()) 916 drain_node_pages(node); 917 918 node = next_node(node, node_online_map); 919 if (unlikely(node >= MAX_NUMNODES)) 920 node = first_node(node_online_map); 921 __get_cpu_var(reap_node) = node; 922} 923 924#else 925#define init_reap_node(cpu) do { } while (0) 926#define next_reap_node(void) do { } while (0) 927#endif 928 929/* 930 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 931 * via the workqueue/eventd. 932 * Add the CPU number into the expiration time to minimize the possibility of 933 * the CPUs getting into lockstep and contending for the global cache chain 934 * lock. 935 */ 936static void __devinit start_cpu_timer(int cpu) 937{ 938 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 939 940 /* 941 * When this gets called from do_initcalls via cpucache_init(), 942 * init_workqueues() has already run, so keventd will be setup 943 * at that time. 944 */ 945 if (keventd_up() && reap_work->work.func == NULL) { 946 init_reap_node(cpu); 947 INIT_DELAYED_WORK(reap_work, cache_reap); 948 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 949 } 950} 951 952static struct array_cache *alloc_arraycache(int node, int entries, 953 int batchcount) 954{ 955 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 956 struct array_cache *nc = NULL; 957 958 nc = kmalloc_node(memsize, GFP_KERNEL, node); 959 if (nc) { 960 nc->avail = 0; 961 nc->limit = entries; 962 nc->batchcount = batchcount; 963 nc->touched = 0; 964 spin_lock_init(&nc->lock); 965 } 966 return nc; 967} 968 969/* 970 * Transfer objects in one arraycache to another. 971 * Locking must be handled by the caller. 972 * 973 * Return the number of entries transferred. 974 */ 975static int transfer_objects(struct array_cache *to, 976 struct array_cache *from, unsigned int max) 977{ 978 /* Figure out how many entries to transfer */ 979 int nr = min(min(from->avail, max), to->limit - to->avail); 980 981 if (!nr) 982 return 0; 983 984 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 985 sizeof(void *) *nr); 986 987 from->avail -= nr; 988 to->avail += nr; 989 to->touched = 1; 990 return nr; 991} 992 993#ifndef CONFIG_NUMA 994 995#define drain_alien_cache(cachep, alien) do { } while (0) 996#define reap_alien(cachep, l3) do { } while (0) 997 998static inline struct array_cache **alloc_alien_cache(int node, int limit) 999{ 1000 return (struct array_cache **)BAD_ALIEN_MAGIC; 1001} 1002 1003static inline void free_alien_cache(struct array_cache **ac_ptr) 1004{ 1005} 1006 1007static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1008{ 1009 return 0; 1010} 1011 1012static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1013 gfp_t flags) 1014{ 1015 return NULL; 1016} 1017 1018static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1019 gfp_t flags, int nodeid) 1020{ 1021 return NULL; 1022} 1023 1024#else /* CONFIG_NUMA */ 1025 1026static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1027static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1028 1029static struct array_cache **alloc_alien_cache(int node, int limit) 1030{ 1031 struct array_cache **ac_ptr; 1032 int memsize = sizeof(void *) * MAX_NUMNODES; 1033 int i; 1034 1035 if (limit > 1) 1036 limit = 12; 1037 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1038 if (ac_ptr) { 1039 for_each_node(i) { 1040 if (i == node || !node_online(i)) { 1041 ac_ptr[i] = NULL; 1042 continue; 1043 } 1044 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1045 if (!ac_ptr[i]) { 1046 for (i--; i <= 0; i--) 1047 kfree(ac_ptr[i]); 1048 kfree(ac_ptr); 1049 return NULL; 1050 } 1051 } 1052 } 1053 return ac_ptr; 1054} 1055 1056static void free_alien_cache(struct array_cache **ac_ptr) 1057{ 1058 int i; 1059 1060 if (!ac_ptr) 1061 return; 1062 for_each_node(i) 1063 kfree(ac_ptr[i]); 1064 kfree(ac_ptr); 1065} 1066 1067static void __drain_alien_cache(struct kmem_cache *cachep, 1068 struct array_cache *ac, int node) 1069{ 1070 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1071 1072 if (ac->avail) { 1073 spin_lock(&rl3->list_lock); 1074 /* 1075 * Stuff objects into the remote nodes shared array first. 1076 * That way we could avoid the overhead of putting the objects 1077 * into the free lists and getting them back later. 1078 */ 1079 if (rl3->shared) 1080 transfer_objects(rl3->shared, ac, ac->limit); 1081 1082 free_block(cachep, ac->entry, ac->avail, node); 1083 ac->avail = 0; 1084 spin_unlock(&rl3->list_lock); 1085 } 1086} 1087 1088/* 1089 * Called from cache_reap() to regularly drain alien caches round robin. 1090 */ 1091static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1092{ 1093 int node = __get_cpu_var(reap_node); 1094 1095 if (l3->alien) { 1096 struct array_cache *ac = l3->alien[node]; 1097 1098 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1099 __drain_alien_cache(cachep, ac, node); 1100 spin_unlock_irq(&ac->lock); 1101 } 1102 } 1103} 1104 1105static void drain_alien_cache(struct kmem_cache *cachep, 1106 struct array_cache **alien) 1107{ 1108 int i = 0; 1109 struct array_cache *ac; 1110 unsigned long flags; 1111 1112 for_each_online_node(i) { 1113 ac = alien[i]; 1114 if (ac) { 1115 spin_lock_irqsave(&ac->lock, flags); 1116 __drain_alien_cache(cachep, ac, i); 1117 spin_unlock_irqrestore(&ac->lock, flags); 1118 } 1119 } 1120} 1121 1122static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1123{ 1124 struct slab *slabp = virt_to_slab(objp); 1125 int nodeid = slabp->nodeid; 1126 struct kmem_list3 *l3; 1127 struct array_cache *alien = NULL; 1128 int node; 1129 1130 node = numa_node_id(); 1131 1132 /* 1133 * Make sure we are not freeing a object from another node to the array 1134 * cache on this cpu. 1135 */ 1136 if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches)) 1137 return 0; 1138 1139 l3 = cachep->nodelists[node]; 1140 STATS_INC_NODEFREES(cachep); 1141 if (l3->alien && l3->alien[nodeid]) { 1142 alien = l3->alien[nodeid]; 1143 spin_lock(&alien->lock); 1144 if (unlikely(alien->avail == alien->limit)) { 1145 STATS_INC_ACOVERFLOW(cachep); 1146 __drain_alien_cache(cachep, alien, nodeid); 1147 } 1148 alien->entry[alien->avail++] = objp; 1149 spin_unlock(&alien->lock); 1150 } else { 1151 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1152 free_block(cachep, &objp, 1, nodeid); 1153 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1154 } 1155 return 1; 1156} 1157#endif 1158 1159static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1160 unsigned long action, void *hcpu) 1161{ 1162 long cpu = (long)hcpu; 1163 struct kmem_cache *cachep; 1164 struct kmem_list3 *l3 = NULL; 1165 int node = cpu_to_node(cpu); 1166 int memsize = sizeof(struct kmem_list3); 1167 1168 switch (action) { 1169 case CPU_UP_PREPARE: 1170 mutex_lock(&cache_chain_mutex); 1171 /* 1172 * We need to do this right in the beginning since 1173 * alloc_arraycache's are going to use this list. 1174 * kmalloc_node allows us to add the slab to the right 1175 * kmem_list3 and not this cpu's kmem_list3 1176 */ 1177 1178 list_for_each_entry(cachep, &cache_chain, next) { 1179 /* 1180 * Set up the size64 kmemlist for cpu before we can 1181 * begin anything. Make sure some other cpu on this 1182 * node has not already allocated this 1183 */ 1184 if (!cachep->nodelists[node]) { 1185 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1186 if (!l3) 1187 goto bad; 1188 kmem_list3_init(l3); 1189 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1190 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1191 1192 /* 1193 * The l3s don't come and go as CPUs come and 1194 * go. cache_chain_mutex is sufficient 1195 * protection here. 1196 */ 1197 cachep->nodelists[node] = l3; 1198 } 1199 1200 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1201 cachep->nodelists[node]->free_limit = 1202 (1 + nr_cpus_node(node)) * 1203 cachep->batchcount + cachep->num; 1204 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1205 } 1206 1207 /* 1208 * Now we can go ahead with allocating the shared arrays and 1209 * array caches 1210 */ 1211 list_for_each_entry(cachep, &cache_chain, next) { 1212 struct array_cache *nc; 1213 struct array_cache *shared; 1214 struct array_cache **alien = NULL; 1215 1216 nc = alloc_arraycache(node, cachep->limit, 1217 cachep->batchcount); 1218 if (!nc) 1219 goto bad; 1220 shared = alloc_arraycache(node, 1221 cachep->shared * cachep->batchcount, 1222 0xbaadf00d); 1223 if (!shared) 1224 goto bad; 1225 1226 if (use_alien_caches) { 1227 alien = alloc_alien_cache(node, cachep->limit); 1228 if (!alien) 1229 goto bad; 1230 } 1231 cachep->array[cpu] = nc; 1232 l3 = cachep->nodelists[node]; 1233 BUG_ON(!l3); 1234 1235 spin_lock_irq(&l3->list_lock); 1236 if (!l3->shared) { 1237 /* 1238 * We are serialised from CPU_DEAD or 1239 * CPU_UP_CANCELLED by the cpucontrol lock 1240 */ 1241 l3->shared = shared; 1242 shared = NULL; 1243 } 1244#ifdef CONFIG_NUMA 1245 if (!l3->alien) { 1246 l3->alien = alien; 1247 alien = NULL; 1248 } 1249#endif 1250 spin_unlock_irq(&l3->list_lock); 1251 kfree(shared); 1252 free_alien_cache(alien); 1253 } 1254 break; 1255 case CPU_ONLINE: 1256 mutex_unlock(&cache_chain_mutex); 1257 start_cpu_timer(cpu); 1258 break; 1259#ifdef CONFIG_HOTPLUG_CPU 1260 case CPU_DOWN_PREPARE: 1261 mutex_lock(&cache_chain_mutex); 1262 break; 1263 case CPU_DOWN_FAILED: 1264 mutex_unlock(&cache_chain_mutex); 1265 break; 1266 case CPU_DEAD: 1267 /* 1268 * Even if all the cpus of a node are down, we don't free the 1269 * kmem_list3 of any cache. This to avoid a race between 1270 * cpu_down, and a kmalloc allocation from another cpu for 1271 * memory from the node of the cpu going down. The list3 1272 * structure is usually allocated from kmem_cache_create() and 1273 * gets destroyed at kmem_cache_destroy(). 1274 */ 1275 /* fall thru */ 1276#endif 1277 case CPU_UP_CANCELED: 1278 list_for_each_entry(cachep, &cache_chain, next) { 1279 struct array_cache *nc; 1280 struct array_cache *shared; 1281 struct array_cache **alien; 1282 cpumask_t mask; 1283 1284 mask = node_to_cpumask(node); 1285 /* cpu is dead; no one can alloc from it. */ 1286 nc = cachep->array[cpu]; 1287 cachep->array[cpu] = NULL; 1288 l3 = cachep->nodelists[node]; 1289 1290 if (!l3) 1291 goto free_array_cache; 1292 1293 spin_lock_irq(&l3->list_lock); 1294 1295 /* Free limit for this kmem_list3 */ 1296 l3->free_limit -= cachep->batchcount; 1297 if (nc) 1298 free_block(cachep, nc->entry, nc->avail, node); 1299 1300 if (!cpus_empty(mask)) { 1301 spin_unlock_irq(&l3->list_lock); 1302 goto free_array_cache; 1303 } 1304 1305 shared = l3->shared; 1306 if (shared) { 1307 free_block(cachep, l3->shared->entry, 1308 l3->shared->avail, node); 1309 l3->shared = NULL; 1310 } 1311 1312 alien = l3->alien; 1313 l3->alien = NULL; 1314 1315 spin_unlock_irq(&l3->list_lock); 1316 1317 kfree(shared); 1318 if (alien) { 1319 drain_alien_cache(cachep, alien); 1320 free_alien_cache(alien); 1321 } 1322free_array_cache: 1323 kfree(nc); 1324 } 1325 /* 1326 * In the previous loop, all the objects were freed to 1327 * the respective cache's slabs, now we can go ahead and 1328 * shrink each nodelist to its limit. 1329 */ 1330 list_for_each_entry(cachep, &cache_chain, next) { 1331 l3 = cachep->nodelists[node]; 1332 if (!l3) 1333 continue; 1334 drain_freelist(cachep, l3, l3->free_objects); 1335 } 1336 mutex_unlock(&cache_chain_mutex); 1337 break; 1338 } 1339 return NOTIFY_OK; 1340bad: 1341 return NOTIFY_BAD; 1342} 1343 1344static struct notifier_block __cpuinitdata cpucache_notifier = { 1345 &cpuup_callback, NULL, 0 1346}; 1347 1348/* 1349 * swap the static kmem_list3 with kmalloced memory 1350 */ 1351static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1352 int nodeid) 1353{ 1354 struct kmem_list3 *ptr; 1355 1356 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1357 BUG_ON(!ptr); 1358 1359 local_irq_disable(); 1360 memcpy(ptr, list, sizeof(struct kmem_list3)); 1361 /* 1362 * Do not assume that spinlocks can be initialized via memcpy: 1363 */ 1364 spin_lock_init(&ptr->list_lock); 1365 1366 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1367 cachep->nodelists[nodeid] = ptr; 1368 local_irq_enable(); 1369} 1370 1371/* 1372 * Initialisation. Called after the page allocator have been initialised and 1373 * before smp_init(). 1374 */ 1375void __init kmem_cache_init(void) 1376{ 1377 size_t left_over; 1378 struct cache_sizes *sizes; 1379 struct cache_names *names; 1380 int i; 1381 int order; 1382 int node; 1383 1384 for (i = 0; i < NUM_INIT_LISTS; i++) { 1385 kmem_list3_init(&initkmem_list3[i]); 1386 if (i < MAX_NUMNODES) 1387 cache_cache.nodelists[i] = NULL; 1388 } 1389 1390 /* 1391 * Fragmentation resistance on low memory - only use bigger 1392 * page orders on machines with more than 32MB of memory. 1393 */ 1394 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1395 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1396 1397 /* Bootstrap is tricky, because several objects are allocated 1398 * from caches that do not exist yet: 1399 * 1) initialize the cache_cache cache: it contains the struct 1400 * kmem_cache structures of all caches, except cache_cache itself: 1401 * cache_cache is statically allocated. 1402 * Initially an __init data area is used for the head array and the 1403 * kmem_list3 structures, it's replaced with a kmalloc allocated 1404 * array at the end of the bootstrap. 1405 * 2) Create the first kmalloc cache. 1406 * The struct kmem_cache for the new cache is allocated normally. 1407 * An __init data area is used for the head array. 1408 * 3) Create the remaining kmalloc caches, with minimally sized 1409 * head arrays. 1410 * 4) Replace the __init data head arrays for cache_cache and the first 1411 * kmalloc cache with kmalloc allocated arrays. 1412 * 5) Replace the __init data for kmem_list3 for cache_cache and 1413 * the other cache's with kmalloc allocated memory. 1414 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1415 */ 1416 1417 node = numa_node_id(); 1418 1419 /* 1) create the cache_cache */ 1420 INIT_LIST_HEAD(&cache_chain); 1421 list_add(&cache_cache.next, &cache_chain); 1422 cache_cache.colour_off = cache_line_size(); 1423 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1424 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1425 1426 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1427 cache_line_size()); 1428 1429 for (order = 0; order < MAX_ORDER; order++) { 1430 cache_estimate(order, cache_cache.buffer_size, 1431 cache_line_size(), 0, &left_over, &cache_cache.num); 1432 if (cache_cache.num) 1433 break; 1434 } 1435 BUG_ON(!cache_cache.num); 1436 cache_cache.gfporder = order; 1437 cache_cache.colour = left_over / cache_cache.colour_off; 1438 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1439 sizeof(struct slab), cache_line_size()); 1440 1441 /* 2+3) create the kmalloc caches */ 1442 sizes = malloc_sizes; 1443 names = cache_names; 1444 1445 /* 1446 * Initialize the caches that provide memory for the array cache and the 1447 * kmem_list3 structures first. Without this, further allocations will 1448 * bug. 1449 */ 1450 1451 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1452 sizes[INDEX_AC].cs_size, 1453 ARCH_KMALLOC_MINALIGN, 1454 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1455 NULL, NULL); 1456 1457 if (INDEX_AC != INDEX_L3) { 1458 sizes[INDEX_L3].cs_cachep = 1459 kmem_cache_create(names[INDEX_L3].name, 1460 sizes[INDEX_L3].cs_size, 1461 ARCH_KMALLOC_MINALIGN, 1462 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1463 NULL, NULL); 1464 } 1465 1466 slab_early_init = 0; 1467 1468 while (sizes->cs_size != ULONG_MAX) { 1469 /* 1470 * For performance, all the general caches are L1 aligned. 1471 * This should be particularly beneficial on SMP boxes, as it 1472 * eliminates "false sharing". 1473 * Note for systems short on memory removing the alignment will 1474 * allow tighter packing of the smaller caches. 1475 */ 1476 if (!sizes->cs_cachep) { 1477 sizes->cs_cachep = kmem_cache_create(names->name, 1478 sizes->cs_size, 1479 ARCH_KMALLOC_MINALIGN, 1480 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1481 NULL, NULL); 1482 } 1483 1484 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1485 sizes->cs_size, 1486 ARCH_KMALLOC_MINALIGN, 1487 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1488 SLAB_PANIC, 1489 NULL, NULL); 1490 sizes++; 1491 names++; 1492 } 1493 /* 4) Replace the bootstrap head arrays */ 1494 { 1495 struct array_cache *ptr; 1496 1497 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1498 1499 local_irq_disable(); 1500 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1501 memcpy(ptr, cpu_cache_get(&cache_cache), 1502 sizeof(struct arraycache_init)); 1503 /* 1504 * Do not assume that spinlocks can be initialized via memcpy: 1505 */ 1506 spin_lock_init(&ptr->lock); 1507 1508 cache_cache.array[smp_processor_id()] = ptr; 1509 local_irq_enable(); 1510 1511 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1512 1513 local_irq_disable(); 1514 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1515 != &initarray_generic.cache); 1516 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1517 sizeof(struct arraycache_init)); 1518 /* 1519 * Do not assume that spinlocks can be initialized via memcpy: 1520 */ 1521 spin_lock_init(&ptr->lock); 1522 1523 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1524 ptr; 1525 local_irq_enable(); 1526 } 1527 /* 5) Replace the bootstrap kmem_list3's */ 1528 { 1529 int nid; 1530 1531 /* Replace the static kmem_list3 structures for the boot cpu */ 1532 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); 1533 1534 for_each_online_node(nid) { 1535 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1536 &initkmem_list3[SIZE_AC + nid], nid); 1537 1538 if (INDEX_AC != INDEX_L3) { 1539 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1540 &initkmem_list3[SIZE_L3 + nid], nid); 1541 } 1542 } 1543 } 1544 1545 /* 6) resize the head arrays to their final sizes */ 1546 { 1547 struct kmem_cache *cachep; 1548 mutex_lock(&cache_chain_mutex); 1549 list_for_each_entry(cachep, &cache_chain, next) 1550 if (enable_cpucache(cachep)) 1551 BUG(); 1552 mutex_unlock(&cache_chain_mutex); 1553 } 1554 1555 /* Annotate slab for lockdep -- annotate the malloc caches */ 1556 init_lock_keys(); 1557 1558 1559 /* Done! */ 1560 g_cpucache_up = FULL; 1561 1562 /* 1563 * Register a cpu startup notifier callback that initializes 1564 * cpu_cache_get for all new cpus 1565 */ 1566 register_cpu_notifier(&cpucache_notifier); 1567 1568 /* 1569 * The reap timers are started later, with a module init call: That part 1570 * of the kernel is not yet operational. 1571 */ 1572} 1573 1574static int __init cpucache_init(void) 1575{ 1576 int cpu; 1577 1578 /* 1579 * Register the timers that return unneeded pages to the page allocator 1580 */ 1581 for_each_online_cpu(cpu) 1582 start_cpu_timer(cpu); 1583 return 0; 1584} 1585__initcall(cpucache_init); 1586 1587/* 1588 * Interface to system's page allocator. No need to hold the cache-lock. 1589 * 1590 * If we requested dmaable memory, we will get it. Even if we 1591 * did not request dmaable memory, we might get it, but that 1592 * would be relatively rare and ignorable. 1593 */ 1594static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1595{ 1596 struct page *page; 1597 int nr_pages; 1598 int i; 1599 1600#ifndef CONFIG_MMU 1601 /* 1602 * Nommu uses slab's for process anonymous memory allocations, and thus 1603 * requires __GFP_COMP to properly refcount higher order allocations 1604 */ 1605 flags |= __GFP_COMP; 1606#endif 1607 1608 /* 1609 * Under NUMA we want memory on the indicated node. We will handle 1610 * the needed fallback ourselves since we want to serve from our 1611 * per node object lists first for other nodes. 1612 */ 1613 flags |= cachep->gfpflags | GFP_THISNODE; 1614 1615 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1616 if (!page) 1617 return NULL; 1618 1619 nr_pages = (1 << cachep->gfporder); 1620 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1621 add_zone_page_state(page_zone(page), 1622 NR_SLAB_RECLAIMABLE, nr_pages); 1623 else 1624 add_zone_page_state(page_zone(page), 1625 NR_SLAB_UNRECLAIMABLE, nr_pages); 1626 for (i = 0; i < nr_pages; i++) 1627 __SetPageSlab(page + i); 1628 return page_address(page); 1629} 1630 1631/* 1632 * Interface to system's page release. 1633 */ 1634static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1635{ 1636 unsigned long i = (1 << cachep->gfporder); 1637 struct page *page = virt_to_page(addr); 1638 const unsigned long nr_freed = i; 1639 1640 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1641 sub_zone_page_state(page_zone(page), 1642 NR_SLAB_RECLAIMABLE, nr_freed); 1643 else 1644 sub_zone_page_state(page_zone(page), 1645 NR_SLAB_UNRECLAIMABLE, nr_freed); 1646 while (i--) { 1647 BUG_ON(!PageSlab(page)); 1648 __ClearPageSlab(page); 1649 page++; 1650 } 1651 if (current->reclaim_state) 1652 current->reclaim_state->reclaimed_slab += nr_freed; 1653 free_pages((unsigned long)addr, cachep->gfporder); 1654} 1655 1656static void kmem_rcu_free(struct rcu_head *head) 1657{ 1658 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1659 struct kmem_cache *cachep = slab_rcu->cachep; 1660 1661 kmem_freepages(cachep, slab_rcu->addr); 1662 if (OFF_SLAB(cachep)) 1663 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1664} 1665 1666#if DEBUG 1667 1668#ifdef CONFIG_DEBUG_PAGEALLOC 1669static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1670 unsigned long caller) 1671{ 1672 int size = obj_size(cachep); 1673 1674 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1675 1676 if (size < 5 * sizeof(unsigned long)) 1677 return; 1678 1679 *addr++ = 0x12345678; 1680 *addr++ = caller; 1681 *addr++ = smp_processor_id(); 1682 size -= 3 * sizeof(unsigned long); 1683 { 1684 unsigned long *sptr = &caller; 1685 unsigned long svalue; 1686 1687 while (!kstack_end(sptr)) { 1688 svalue = *sptr++; 1689 if (kernel_text_address(svalue)) { 1690 *addr++ = svalue; 1691 size -= sizeof(unsigned long); 1692 if (size <= sizeof(unsigned long)) 1693 break; 1694 } 1695 } 1696 1697 } 1698 *addr++ = 0x87654321; 1699} 1700#endif 1701 1702static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1703{ 1704 int size = obj_size(cachep); 1705 addr = &((char *)addr)[obj_offset(cachep)]; 1706 1707 memset(addr, val, size); 1708 *(unsigned char *)(addr + size - 1) = POISON_END; 1709} 1710 1711static void dump_line(char *data, int offset, int limit) 1712{ 1713 int i; 1714 unsigned char error = 0; 1715 int bad_count = 0; 1716 1717 printk(KERN_ERR "%03x:", offset); 1718 for (i = 0; i < limit; i++) { 1719 if (data[offset + i] != POISON_FREE) { 1720 error = data[offset + i]; 1721 bad_count++; 1722 } 1723 printk(" %02x", (unsigned char)data[offset + i]); 1724 } 1725 printk("\n"); 1726 1727 if (bad_count == 1) { 1728 error ^= POISON_FREE; 1729 if (!(error & (error - 1))) { 1730 printk(KERN_ERR "Single bit error detected. Probably " 1731 "bad RAM.\n"); 1732#ifdef CONFIG_X86 1733 printk(KERN_ERR "Run memtest86+ or a similar memory " 1734 "test tool.\n"); 1735#else 1736 printk(KERN_ERR "Run a memory test tool.\n"); 1737#endif 1738 } 1739 } 1740} 1741#endif 1742 1743#if DEBUG 1744 1745static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1746{ 1747 int i, size; 1748 char *realobj; 1749 1750 if (cachep->flags & SLAB_RED_ZONE) { 1751 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", 1752 *dbg_redzone1(cachep, objp), 1753 *dbg_redzone2(cachep, objp)); 1754 } 1755 1756 if (cachep->flags & SLAB_STORE_USER) { 1757 printk(KERN_ERR "Last user: [<%p>]", 1758 *dbg_userword(cachep, objp)); 1759 print_symbol("(%s)", 1760 (unsigned long)*dbg_userword(cachep, objp)); 1761 printk("\n"); 1762 } 1763 realobj = (char *)objp + obj_offset(cachep); 1764 size = obj_size(cachep); 1765 for (i = 0; i < size && lines; i += 16, lines--) { 1766 int limit; 1767 limit = 16; 1768 if (i + limit > size) 1769 limit = size - i; 1770 dump_line(realobj, i, limit); 1771 } 1772} 1773 1774static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1775{ 1776 char *realobj; 1777 int size, i; 1778 int lines = 0; 1779 1780 realobj = (char *)objp + obj_offset(cachep); 1781 size = obj_size(cachep); 1782 1783 for (i = 0; i < size; i++) { 1784 char exp = POISON_FREE; 1785 if (i == size - 1) 1786 exp = POISON_END; 1787 if (realobj[i] != exp) { 1788 int limit; 1789 /* Mismatch ! */ 1790 /* Print header */ 1791 if (lines == 0) { 1792 printk(KERN_ERR 1793 "Slab corruption: start=%p, len=%d\n", 1794 realobj, size); 1795 print_objinfo(cachep, objp, 0); 1796 } 1797 /* Hexdump the affected line */ 1798 i = (i / 16) * 16; 1799 limit = 16; 1800 if (i + limit > size) 1801 limit = size - i; 1802 dump_line(realobj, i, limit); 1803 i += 16; 1804 lines++; 1805 /* Limit to 5 lines */ 1806 if (lines > 5) 1807 break; 1808 } 1809 } 1810 if (lines != 0) { 1811 /* Print some data about the neighboring objects, if they 1812 * exist: 1813 */ 1814 struct slab *slabp = virt_to_slab(objp); 1815 unsigned int objnr; 1816 1817 objnr = obj_to_index(cachep, slabp, objp); 1818 if (objnr) { 1819 objp = index_to_obj(cachep, slabp, objnr - 1); 1820 realobj = (char *)objp + obj_offset(cachep); 1821 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1822 realobj, size); 1823 print_objinfo(cachep, objp, 2); 1824 } 1825 if (objnr + 1 < cachep->num) { 1826 objp = index_to_obj(cachep, slabp, objnr + 1); 1827 realobj = (char *)objp + obj_offset(cachep); 1828 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1829 realobj, size); 1830 print_objinfo(cachep, objp, 2); 1831 } 1832 } 1833} 1834#endif 1835 1836#if DEBUG 1837/** 1838 * slab_destroy_objs - destroy a slab and its objects 1839 * @cachep: cache pointer being destroyed 1840 * @slabp: slab pointer being destroyed 1841 * 1842 * Call the registered destructor for each object in a slab that is being 1843 * destroyed. 1844 */ 1845static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1846{ 1847 int i; 1848 for (i = 0; i < cachep->num; i++) { 1849 void *objp = index_to_obj(cachep, slabp, i); 1850 1851 if (cachep->flags & SLAB_POISON) { 1852#ifdef CONFIG_DEBUG_PAGEALLOC 1853 if (cachep->buffer_size % PAGE_SIZE == 0 && 1854 OFF_SLAB(cachep)) 1855 kernel_map_pages(virt_to_page(objp), 1856 cachep->buffer_size / PAGE_SIZE, 1); 1857 else 1858 check_poison_obj(cachep, objp); 1859#else 1860 check_poison_obj(cachep, objp); 1861#endif 1862 } 1863 if (cachep->flags & SLAB_RED_ZONE) { 1864 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1865 slab_error(cachep, "start of a freed object " 1866 "was overwritten"); 1867 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1868 slab_error(cachep, "end of a freed object " 1869 "was overwritten"); 1870 } 1871 if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1872 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); 1873 } 1874} 1875#else 1876static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1877{ 1878 if (cachep->dtor) { 1879 int i; 1880 for (i = 0; i < cachep->num; i++) { 1881 void *objp = index_to_obj(cachep, slabp, i); 1882 (cachep->dtor) (objp, cachep, 0); 1883 } 1884 } 1885} 1886#endif 1887 1888/** 1889 * slab_destroy - destroy and release all objects in a slab 1890 * @cachep: cache pointer being destroyed 1891 * @slabp: slab pointer being destroyed 1892 * 1893 * Destroy all the objs in a slab, and release the mem back to the system. 1894 * Before calling the slab must have been unlinked from the cache. The 1895 * cache-lock is not held/needed. 1896 */ 1897static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1898{ 1899 void *addr = slabp->s_mem - slabp->colouroff; 1900 1901 slab_destroy_objs(cachep, slabp); 1902 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1903 struct slab_rcu *slab_rcu; 1904 1905 slab_rcu = (struct slab_rcu *)slabp; 1906 slab_rcu->cachep = cachep; 1907 slab_rcu->addr = addr; 1908 call_rcu(&slab_rcu->head, kmem_rcu_free); 1909 } else { 1910 kmem_freepages(cachep, addr); 1911 if (OFF_SLAB(cachep)) 1912 kmem_cache_free(cachep->slabp_cache, slabp); 1913 } 1914} 1915 1916/* 1917 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1918 * size of kmem_list3. 1919 */ 1920static void set_up_list3s(struct kmem_cache *cachep, int index) 1921{ 1922 int node; 1923 1924 for_each_online_node(node) { 1925 cachep->nodelists[node] = &initkmem_list3[index + node]; 1926 cachep->nodelists[node]->next_reap = jiffies + 1927 REAPTIMEOUT_LIST3 + 1928 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1929 } 1930} 1931 1932static void __kmem_cache_destroy(struct kmem_cache *cachep) 1933{ 1934 int i; 1935 struct kmem_list3 *l3; 1936 1937 for_each_online_cpu(i) 1938 kfree(cachep->array[i]); 1939 1940 /* NUMA: free the list3 structures */ 1941 for_each_online_node(i) { 1942 l3 = cachep->nodelists[i]; 1943 if (l3) { 1944 kfree(l3->shared); 1945 free_alien_cache(l3->alien); 1946 kfree(l3); 1947 } 1948 } 1949 kmem_cache_free(&cache_cache, cachep); 1950} 1951 1952 1953/** 1954 * calculate_slab_order - calculate size (page order) of slabs 1955 * @cachep: pointer to the cache that is being created 1956 * @size: size of objects to be created in this cache. 1957 * @align: required alignment for the objects. 1958 * @flags: slab allocation flags 1959 * 1960 * Also calculates the number of objects per slab. 1961 * 1962 * This could be made much more intelligent. For now, try to avoid using 1963 * high order pages for slabs. When the gfp() functions are more friendly 1964 * towards high-order requests, this should be changed. 1965 */ 1966static size_t calculate_slab_order(struct kmem_cache *cachep, 1967 size_t size, size_t align, unsigned long flags) 1968{ 1969 unsigned long offslab_limit; 1970 size_t left_over = 0; 1971 int gfporder; 1972 1973 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1974 unsigned int num; 1975 size_t remainder; 1976 1977 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1978 if (!num) 1979 continue; 1980 1981 if (flags & CFLGS_OFF_SLAB) { 1982 /* 1983 * Max number of objs-per-slab for caches which 1984 * use off-slab slabs. Needed to avoid a possible 1985 * looping condition in cache_grow(). 1986 */ 1987 offslab_limit = size - sizeof(struct slab); 1988 offslab_limit /= sizeof(kmem_bufctl_t); 1989 1990 if (num > offslab_limit) 1991 break; 1992 } 1993 1994 /* Found something acceptable - save it away */ 1995 cachep->num = num; 1996 cachep->gfporder = gfporder; 1997 left_over = remainder; 1998 1999 /* 2000 * A VFS-reclaimable slab tends to have most allocations 2001 * as GFP_NOFS and we really don't want to have to be allocating 2002 * higher-order pages when we are unable to shrink dcache. 2003 */ 2004 if (flags & SLAB_RECLAIM_ACCOUNT) 2005 break; 2006 2007 /* 2008 * Large number of objects is good, but very large slabs are 2009 * currently bad for the gfp()s. 2010 */ 2011 if (gfporder >= slab_break_gfp_order) 2012 break; 2013 2014 /* 2015 * Acceptable internal fragmentation? 2016 */ 2017 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2018 break; 2019 } 2020 return left_over; 2021} 2022 2023static int setup_cpu_cache(struct kmem_cache *cachep) 2024{ 2025 if (g_cpucache_up == FULL) 2026 return enable_cpucache(cachep); 2027 2028 if (g_cpucache_up == NONE) { 2029 /* 2030 * Note: the first kmem_cache_create must create the cache 2031 * that's used by kmalloc(24), otherwise the creation of 2032 * further caches will BUG(). 2033 */ 2034 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2035 2036 /* 2037 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2038 * the first cache, then we need to set up all its list3s, 2039 * otherwise the creation of further caches will BUG(). 2040 */ 2041 set_up_list3s(cachep, SIZE_AC); 2042 if (INDEX_AC == INDEX_L3) 2043 g_cpucache_up = PARTIAL_L3; 2044 else 2045 g_cpucache_up = PARTIAL_AC; 2046 } else { 2047 cachep->array[smp_processor_id()] = 2048 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2049 2050 if (g_cpucache_up == PARTIAL_AC) { 2051 set_up_list3s(cachep, SIZE_L3); 2052 g_cpucache_up = PARTIAL_L3; 2053 } else { 2054 int node; 2055 for_each_online_node(node) { 2056 cachep->nodelists[node] = 2057 kmalloc_node(sizeof(struct kmem_list3), 2058 GFP_KERNEL, node); 2059 BUG_ON(!cachep->nodelists[node]); 2060 kmem_list3_init(cachep->nodelists[node]); 2061 } 2062 } 2063 } 2064 cachep->nodelists[numa_node_id()]->next_reap = 2065 jiffies + REAPTIMEOUT_LIST3 + 2066 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2067 2068 cpu_cache_get(cachep)->avail = 0; 2069 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2070 cpu_cache_get(cachep)->batchcount = 1; 2071 cpu_cache_get(cachep)->touched = 0; 2072 cachep->batchcount = 1; 2073 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2074 return 0; 2075} 2076 2077/** 2078 * kmem_cache_create - Create a cache. 2079 * @name: A string which is used in /proc/slabinfo to identify this cache. 2080 * @size: The size of objects to be created in this cache. 2081 * @align: The required alignment for the objects. 2082 * @flags: SLAB flags 2083 * @ctor: A constructor for the objects. 2084 * @dtor: A destructor for the objects. 2085 * 2086 * Returns a ptr to the cache on success, NULL on failure. 2087 * Cannot be called within a int, but can be interrupted. 2088 * The @ctor is run when new pages are allocated by the cache 2089 * and the @dtor is run before the pages are handed back. 2090 * 2091 * @name must be valid until the cache is destroyed. This implies that 2092 * the module calling this has to destroy the cache before getting unloaded. 2093 * 2094 * The flags are 2095 * 2096 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2097 * to catch references to uninitialised memory. 2098 * 2099 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2100 * for buffer overruns. 2101 * 2102 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2103 * cacheline. This can be beneficial if you're counting cycles as closely 2104 * as davem. 2105 */ 2106struct kmem_cache * 2107kmem_cache_create (const char *name, size_t size, size_t align, 2108 unsigned long flags, 2109 void (*ctor)(void*, struct kmem_cache *, unsigned long), 2110 void (*dtor)(void*, struct kmem_cache *, unsigned long)) 2111{ 2112 size_t left_over, slab_size, ralign; 2113 struct kmem_cache *cachep = NULL, *pc; 2114 2115 /* 2116 * Sanity checks... these are all serious usage bugs. 2117 */ 2118 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2119 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2120 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2121 name); 2122 BUG(); 2123 } 2124 2125 /* 2126 * We use cache_chain_mutex to ensure a consistent view of 2127 * cpu_online_map as well. Please see cpuup_callback 2128 */ 2129 mutex_lock(&cache_chain_mutex); 2130 2131 list_for_each_entry(pc, &cache_chain, next) { 2132 mm_segment_t old_fs = get_fs(); 2133 char tmp; 2134 int res; 2135 2136 /* 2137 * This happens when the module gets unloaded and doesn't 2138 * destroy its slab cache and no-one else reuses the vmalloc 2139 * area of the module. Print a warning. 2140 */ 2141 set_fs(KERNEL_DS); 2142 res = __get_user(tmp, pc->name); 2143 set_fs(old_fs); 2144 if (res) { 2145 printk("SLAB: cache with size %d has lost its name\n", 2146 pc->buffer_size); 2147 continue; 2148 } 2149 2150 if (!strcmp(pc->name, name)) { 2151 printk("kmem_cache_create: duplicate cache %s\n", name); 2152 dump_stack(); 2153 goto oops; 2154 } 2155 } 2156 2157#if DEBUG 2158 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2159 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { 2160 /* No constructor, but inital state check requested */ 2161 printk(KERN_ERR "%s: No con, but init state check " 2162 "requested - %s\n", __FUNCTION__, name); 2163 flags &= ~SLAB_DEBUG_INITIAL; 2164 } 2165#if FORCED_DEBUG 2166 /* 2167 * Enable redzoning and last user accounting, except for caches with 2168 * large objects, if the increased size would increase the object size 2169 * above the next power of two: caches with object sizes just above a 2170 * power of two have a significant amount of internal fragmentation. 2171 */ 2172 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD)) 2173 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2174 if (!(flags & SLAB_DESTROY_BY_RCU)) 2175 flags |= SLAB_POISON; 2176#endif 2177 if (flags & SLAB_DESTROY_BY_RCU) 2178 BUG_ON(flags & SLAB_POISON); 2179#endif 2180 if (flags & SLAB_DESTROY_BY_RCU) 2181 BUG_ON(dtor); 2182 2183 /* 2184 * Always checks flags, a caller might be expecting debug support which 2185 * isn't available. 2186 */ 2187 BUG_ON(flags & ~CREATE_MASK); 2188 2189 /* 2190 * Check that size is in terms of words. This is needed to avoid 2191 * unaligned accesses for some archs when redzoning is used, and makes 2192 * sure any on-slab bufctl's are also correctly aligned. 2193 */ 2194 if (size & (BYTES_PER_WORD - 1)) { 2195 size += (BYTES_PER_WORD - 1); 2196 size &= ~(BYTES_PER_WORD - 1); 2197 } 2198 2199 /* calculate the final buffer alignment: */ 2200 2201 /* 1) arch recommendation: can be overridden for debug */ 2202 if (flags & SLAB_HWCACHE_ALIGN) { 2203 /* 2204 * Default alignment: as specified by the arch code. Except if 2205 * an object is really small, then squeeze multiple objects into 2206 * one cacheline. 2207 */ 2208 ralign = cache_line_size(); 2209 while (size <= ralign / 2) 2210 ralign /= 2; 2211 } else { 2212 ralign = BYTES_PER_WORD; 2213 } 2214 2215 /* 2216 * Redzoning and user store require word alignment. Note this will be 2217 * overridden by architecture or caller mandated alignment if either 2218 * is greater than BYTES_PER_WORD. 2219 */ 2220 if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) 2221 ralign = BYTES_PER_WORD; 2222 2223 /* 2) arch mandated alignment */ 2224 if (ralign < ARCH_SLAB_MINALIGN) { 2225 ralign = ARCH_SLAB_MINALIGN; 2226 } 2227 /* 3) caller mandated alignment */ 2228 if (ralign < align) { 2229 ralign = align; 2230 } 2231 /* disable debug if necessary */ 2232 if (ralign > BYTES_PER_WORD) 2233 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2234 /* 2235 * 4) Store it. 2236 */ 2237 align = ralign; 2238 2239 /* Get cache's description obj. */ 2240 cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL); 2241 if (!cachep) 2242 goto oops; 2243 2244#if DEBUG 2245 cachep->obj_size = size; 2246 2247 /* 2248 * Both debugging options require word-alignment which is calculated 2249 * into align above. 2250 */ 2251 if (flags & SLAB_RED_ZONE) { 2252 /* add space for red zone words */ 2253 cachep->obj_offset += BYTES_PER_WORD; 2254 size += 2 * BYTES_PER_WORD; 2255 } 2256 if (flags & SLAB_STORE_USER) { 2257 /* user store requires one word storage behind the end of 2258 * the real object. 2259 */ 2260 size += BYTES_PER_WORD; 2261 } 2262#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2263 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2264 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2265 cachep->obj_offset += PAGE_SIZE - size; 2266 size = PAGE_SIZE; 2267 } 2268#endif 2269#endif 2270 2271 /* 2272 * Determine if the slab management is 'on' or 'off' slab. 2273 * (bootstrapping cannot cope with offslab caches so don't do 2274 * it too early on.) 2275 */ 2276 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2277 /* 2278 * Size is large, assume best to place the slab management obj 2279 * off-slab (should allow better packing of objs). 2280 */ 2281 flags |= CFLGS_OFF_SLAB; 2282 2283 size = ALIGN(size, align); 2284 2285 left_over = calculate_slab_order(cachep, size, align, flags); 2286 2287 if (!cachep->num) { 2288 printk("kmem_cache_create: couldn't create cache %s.\n", name); 2289 kmem_cache_free(&cache_cache, cachep); 2290 cachep = NULL; 2291 goto oops; 2292 } 2293 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2294 + sizeof(struct slab), align); 2295 2296 /* 2297 * If the slab has been placed off-slab, and we have enough space then 2298 * move it on-slab. This is at the expense of any extra colouring. 2299 */ 2300 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2301 flags &= ~CFLGS_OFF_SLAB; 2302 left_over -= slab_size; 2303 } 2304 2305 if (flags & CFLGS_OFF_SLAB) { 2306 /* really off slab. No need for manual alignment */ 2307 slab_size = 2308 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2309 } 2310 2311 cachep->colour_off = cache_line_size(); 2312 /* Offset must be a multiple of the alignment. */ 2313 if (cachep->colour_off < align) 2314 cachep->colour_off = align; 2315 cachep->colour = left_over / cachep->colour_off; 2316 cachep->slab_size = slab_size; 2317 cachep->flags = flags; 2318 cachep->gfpflags = 0; 2319 if (flags & SLAB_CACHE_DMA) 2320 cachep->gfpflags |= GFP_DMA; 2321 cachep->buffer_size = size; 2322 2323 if (flags & CFLGS_OFF_SLAB) { 2324 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2325 /* 2326 * This is a possibility for one of the malloc_sizes caches. 2327 * But since we go off slab only for object size greater than 2328 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2329 * this should not happen at all. 2330 * But leave a BUG_ON for some lucky dude. 2331 */ 2332 BUG_ON(!cachep->slabp_cache); 2333 } 2334 cachep->ctor = ctor; 2335 cachep->dtor = dtor; 2336 cachep->name = name; 2337 2338 if (setup_cpu_cache(cachep)) { 2339 __kmem_cache_destroy(cachep); 2340 cachep = NULL; 2341 goto oops; 2342 } 2343 2344 /* cache setup completed, link it into the list */ 2345 list_add(&cachep->next, &cache_chain); 2346oops: 2347 if (!cachep && (flags & SLAB_PANIC)) 2348 panic("kmem_cache_create(): failed to create slab `%s'\n", 2349 name); 2350 mutex_unlock(&cache_chain_mutex); 2351 return cachep; 2352} 2353EXPORT_SYMBOL(kmem_cache_create); 2354 2355#if DEBUG 2356static void check_irq_off(void) 2357{ 2358 BUG_ON(!irqs_disabled()); 2359} 2360 2361static void check_irq_on(void) 2362{ 2363 BUG_ON(irqs_disabled()); 2364} 2365 2366static void check_spinlock_acquired(struct kmem_cache *cachep) 2367{ 2368#ifdef CONFIG_SMP 2369 check_irq_off(); 2370 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2371#endif 2372} 2373 2374static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2375{ 2376#ifdef CONFIG_SMP 2377 check_irq_off(); 2378 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2379#endif 2380} 2381 2382#else 2383#define check_irq_off() do { } while(0) 2384#define check_irq_on() do { } while(0) 2385#define check_spinlock_acquired(x) do { } while(0) 2386#define check_spinlock_acquired_node(x, y) do { } while(0) 2387#endif 2388 2389static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2390 struct array_cache *ac, 2391 int force, int node); 2392 2393static void do_drain(void *arg) 2394{ 2395 struct kmem_cache *cachep = arg; 2396 struct array_cache *ac; 2397 int node = numa_node_id(); 2398 2399 check_irq_off(); 2400 ac = cpu_cache_get(cachep); 2401 spin_lock(&cachep->nodelists[node]->list_lock); 2402 free_block(cachep, ac->entry, ac->avail, node); 2403 spin_unlock(&cachep->nodelists[node]->list_lock); 2404 ac->avail = 0; 2405} 2406 2407static void drain_cpu_caches(struct kmem_cache *cachep) 2408{ 2409 struct kmem_list3 *l3; 2410 int node; 2411 2412 on_each_cpu(do_drain, cachep, 1, 1); 2413 check_irq_on(); 2414 for_each_online_node(node) { 2415 l3 = cachep->nodelists[node]; 2416 if (l3 && l3->alien) 2417 drain_alien_cache(cachep, l3->alien); 2418 } 2419 2420 for_each_online_node(node) { 2421 l3 = cachep->nodelists[node]; 2422 if (l3) 2423 drain_array(cachep, l3, l3->shared, 1, node); 2424 } 2425} 2426 2427/* 2428 * Remove slabs from the list of free slabs. 2429 * Specify the number of slabs to drain in tofree. 2430 * 2431 * Returns the actual number of slabs released. 2432 */ 2433static int drain_freelist(struct kmem_cache *cache, 2434 struct kmem_list3 *l3, int tofree) 2435{ 2436 struct list_head *p; 2437 int nr_freed; 2438 struct slab *slabp; 2439 2440 nr_freed = 0; 2441 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2442 2443 spin_lock_irq(&l3->list_lock); 2444 p = l3->slabs_free.prev; 2445 if (p == &l3->slabs_free) { 2446 spin_unlock_irq(&l3->list_lock); 2447 goto out; 2448 } 2449 2450 slabp = list_entry(p, struct slab, list); 2451#if DEBUG 2452 BUG_ON(slabp->inuse); 2453#endif 2454 list_del(&slabp->list); 2455 /* 2456 * Safe to drop the lock. The slab is no longer linked 2457 * to the cache. 2458 */ 2459 l3->free_objects -= cache->num; 2460 spin_unlock_irq(&l3->list_lock); 2461 slab_destroy(cache, slabp); 2462 nr_freed++; 2463 } 2464out: 2465 return nr_freed; 2466} 2467 2468/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2469static int __cache_shrink(struct kmem_cache *cachep) 2470{ 2471 int ret = 0, i = 0; 2472 struct kmem_list3 *l3; 2473 2474 drain_cpu_caches(cachep); 2475 2476 check_irq_on(); 2477 for_each_online_node(i) { 2478 l3 = cachep->nodelists[i]; 2479 if (!l3) 2480 continue; 2481 2482 drain_freelist(cachep, l3, l3->free_objects); 2483 2484 ret += !list_empty(&l3->slabs_full) || 2485 !list_empty(&l3->slabs_partial); 2486 } 2487 return (ret ? 1 : 0); 2488} 2489 2490/** 2491 * kmem_cache_shrink - Shrink a cache. 2492 * @cachep: The cache to shrink. 2493 * 2494 * Releases as many slabs as possible for a cache. 2495 * To help debugging, a zero exit status indicates all slabs were released. 2496 */ 2497int kmem_cache_shrink(struct kmem_cache *cachep) 2498{ 2499 int ret; 2500 BUG_ON(!cachep || in_interrupt()); 2501 2502 mutex_lock(&cache_chain_mutex); 2503 ret = __cache_shrink(cachep); 2504 mutex_unlock(&cache_chain_mutex); 2505 return ret; 2506} 2507EXPORT_SYMBOL(kmem_cache_shrink); 2508 2509/** 2510 * kmem_cache_destroy - delete a cache 2511 * @cachep: the cache to destroy 2512 * 2513 * Remove a struct kmem_cache object from the slab cache. 2514 * 2515 * It is expected this function will be called by a module when it is 2516 * unloaded. This will remove the cache completely, and avoid a duplicate 2517 * cache being allocated each time a module is loaded and unloaded, if the 2518 * module doesn't have persistent in-kernel storage across loads and unloads. 2519 * 2520 * The cache must be empty before calling this function. 2521 * 2522 * The caller must guarantee that noone will allocate memory from the cache 2523 * during the kmem_cache_destroy(). 2524 */ 2525void kmem_cache_destroy(struct kmem_cache *cachep) 2526{ 2527 BUG_ON(!cachep || in_interrupt()); 2528 2529 /* Find the cache in the chain of caches. */ 2530 mutex_lock(&cache_chain_mutex); 2531 /* 2532 * the chain is never empty, cache_cache is never destroyed 2533 */ 2534 list_del(&cachep->next); 2535 if (__cache_shrink(cachep)) { 2536 slab_error(cachep, "Can't free all objects"); 2537 list_add(&cachep->next, &cache_chain); 2538 mutex_unlock(&cache_chain_mutex); 2539 return; 2540 } 2541 2542 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2543 synchronize_rcu(); 2544 2545 __kmem_cache_destroy(cachep); 2546 mutex_unlock(&cache_chain_mutex); 2547} 2548EXPORT_SYMBOL(kmem_cache_destroy); 2549 2550/* 2551 * Get the memory for a slab management obj. 2552 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2553 * always come from malloc_sizes caches. The slab descriptor cannot 2554 * come from the same cache which is getting created because, 2555 * when we are searching for an appropriate cache for these 2556 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2557 * If we are creating a malloc_sizes cache here it would not be visible to 2558 * kmem_find_general_cachep till the initialization is complete. 2559 * Hence we cannot have slabp_cache same as the original cache. 2560 */ 2561static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2562 int colour_off, gfp_t local_flags, 2563 int nodeid) 2564{ 2565 struct slab *slabp; 2566 2567 if (OFF_SLAB(cachep)) { 2568 /* Slab management obj is off-slab. */ 2569 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2570 local_flags, nodeid); 2571 if (!slabp) 2572 return NULL; 2573 } else { 2574 slabp = objp + colour_off; 2575 colour_off += cachep->slab_size; 2576 } 2577 slabp->inuse = 0; 2578 slabp->colouroff = colour_off; 2579 slabp->s_mem = objp + colour_off; 2580 slabp->nodeid = nodeid; 2581 return slabp; 2582} 2583 2584static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2585{ 2586 return (kmem_bufctl_t *) (slabp + 1); 2587} 2588 2589static void cache_init_objs(struct kmem_cache *cachep, 2590 struct slab *slabp, unsigned long ctor_flags) 2591{ 2592 int i; 2593 2594 for (i = 0; i < cachep->num; i++) { 2595 void *objp = index_to_obj(cachep, slabp, i); 2596#if DEBUG 2597 /* need to poison the objs? */ 2598 if (cachep->flags & SLAB_POISON) 2599 poison_obj(cachep, objp, POISON_FREE); 2600 if (cachep->flags & SLAB_STORE_USER) 2601 *dbg_userword(cachep, objp) = NULL; 2602 2603 if (cachep->flags & SLAB_RED_ZONE) { 2604 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2605 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2606 } 2607 /* 2608 * Constructors are not allowed to allocate memory from the same 2609 * cache which they are a constructor for. Otherwise, deadlock. 2610 * They must also be threaded. 2611 */ 2612 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2613 cachep->ctor(objp + obj_offset(cachep), cachep, 2614 ctor_flags); 2615 2616 if (cachep->flags & SLAB_RED_ZONE) { 2617 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2618 slab_error(cachep, "constructor overwrote the" 2619 " end of an object"); 2620 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2621 slab_error(cachep, "constructor overwrote the" 2622 " start of an object"); 2623 } 2624 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2625 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2626 kernel_map_pages(virt_to_page(objp), 2627 cachep->buffer_size / PAGE_SIZE, 0); 2628#else 2629 if (cachep->ctor) 2630 cachep->ctor(objp, cachep, ctor_flags); 2631#endif 2632 slab_bufctl(slabp)[i] = i + 1; 2633 } 2634 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2635 slabp->free = 0; 2636} 2637 2638static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2639{ 2640 if (flags & SLAB_DMA) 2641 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2642 else 2643 BUG_ON(cachep->gfpflags & GFP_DMA); 2644} 2645 2646static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2647 int nodeid) 2648{ 2649 void *objp = index_to_obj(cachep, slabp, slabp->free); 2650 kmem_bufctl_t next; 2651 2652 slabp->inuse++; 2653 next = slab_bufctl(slabp)[slabp->free]; 2654#if DEBUG 2655 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2656 WARN_ON(slabp->nodeid != nodeid); 2657#endif 2658 slabp->free = next; 2659 2660 return objp; 2661} 2662 2663static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2664 void *objp, int nodeid) 2665{ 2666 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2667 2668#if DEBUG 2669 /* Verify that the slab belongs to the intended node */ 2670 WARN_ON(slabp->nodeid != nodeid); 2671 2672 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2673 printk(KERN_ERR "slab: double free detected in cache " 2674 "'%s', objp %p\n", cachep->name, objp); 2675 BUG(); 2676 } 2677#endif 2678 slab_bufctl(slabp)[objnr] = slabp->free; 2679 slabp->free = objnr; 2680 slabp->inuse--; 2681} 2682 2683/* 2684 * Map pages beginning at addr to the given cache and slab. This is required 2685 * for the slab allocator to be able to lookup the cache and slab of a 2686 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2687 */ 2688static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2689 void *addr) 2690{ 2691 int nr_pages; 2692 struct page *page; 2693 2694 page = virt_to_page(addr); 2695 2696 nr_pages = 1; 2697 if (likely(!PageCompound(page))) 2698 nr_pages <<= cache->gfporder; 2699 2700 do { 2701 page_set_cache(page, cache); 2702 page_set_slab(page, slab); 2703 page++; 2704 } while (--nr_pages); 2705} 2706 2707/* 2708 * Grow (by 1) the number of slabs within a cache. This is called by 2709 * kmem_cache_alloc() when there are no active objs left in a cache. 2710 */ 2711static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) 2712{ 2713 struct slab *slabp; 2714 void *objp; 2715 size_t offset; 2716 gfp_t local_flags; 2717 unsigned long ctor_flags; 2718 struct kmem_list3 *l3; 2719 2720 /* 2721 * Be lazy and only check for valid flags here, keeping it out of the 2722 * critical path in kmem_cache_alloc(). 2723 */ 2724 BUG_ON(flags & ~(SLAB_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); 2725 if (flags & __GFP_NO_GROW) 2726 return 0; 2727 2728 ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2729 local_flags = (flags & GFP_LEVEL_MASK); 2730 if (!(local_flags & __GFP_WAIT)) 2731 /* 2732 * Not allowed to sleep. Need to tell a constructor about 2733 * this - it might need to know... 2734 */ 2735 ctor_flags |= SLAB_CTOR_ATOMIC; 2736 2737 /* Take the l3 list lock to change the colour_next on this node */ 2738 check_irq_off(); 2739 l3 = cachep->nodelists[nodeid]; 2740 spin_lock(&l3->list_lock); 2741 2742 /* Get colour for the slab, and cal the next value. */ 2743 offset = l3->colour_next; 2744 l3->colour_next++; 2745 if (l3->colour_next >= cachep->colour) 2746 l3->colour_next = 0; 2747 spin_unlock(&l3->list_lock); 2748 2749 offset *= cachep->colour_off; 2750 2751 if (local_flags & __GFP_WAIT) 2752 local_irq_enable(); 2753 2754 /* 2755 * The test for missing atomic flag is performed here, rather than 2756 * the more obvious place, simply to reduce the critical path length 2757 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2758 * will eventually be caught here (where it matters). 2759 */ 2760 kmem_flagcheck(cachep, flags); 2761 2762 /* 2763 * Get mem for the objs. Attempt to allocate a physical page from 2764 * 'nodeid'. 2765 */ 2766 objp = kmem_getpages(cachep, flags, nodeid); 2767 if (!objp) 2768 goto failed; 2769 2770 /* Get slab management. */ 2771 slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); 2772 if (!slabp) 2773 goto opps1; 2774 2775 slabp->nodeid = nodeid; 2776 slab_map_pages(cachep, slabp, objp); 2777 2778 cache_init_objs(cachep, slabp, ctor_flags); 2779 2780 if (local_flags & __GFP_WAIT) 2781 local_irq_disable(); 2782 check_irq_off(); 2783 spin_lock(&l3->list_lock); 2784 2785 /* Make slab active. */ 2786 list_add_tail(&slabp->list, &(l3->slabs_free)); 2787 STATS_INC_GROWN(cachep); 2788 l3->free_objects += cachep->num; 2789 spin_unlock(&l3->list_lock); 2790 return 1; 2791opps1: 2792 kmem_freepages(cachep, objp); 2793failed: 2794 if (local_flags & __GFP_WAIT) 2795 local_irq_disable(); 2796 return 0; 2797} 2798 2799#if DEBUG 2800 2801/* 2802 * Perform extra freeing checks: 2803 * - detect bad pointers. 2804 * - POISON/RED_ZONE checking 2805 * - destructor calls, for caches with POISON+dtor 2806 */ 2807static void kfree_debugcheck(const void *objp) 2808{ 2809 struct page *page; 2810 2811 if (!virt_addr_valid(objp)) { 2812 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2813 (unsigned long)objp); 2814 BUG(); 2815 } 2816 page = virt_to_page(objp); 2817 if (!PageSlab(page)) { 2818 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", 2819 (unsigned long)objp); 2820 BUG(); 2821 } 2822} 2823 2824static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2825{ 2826 unsigned long redzone1, redzone2; 2827 2828 redzone1 = *dbg_redzone1(cache, obj); 2829 redzone2 = *dbg_redzone2(cache, obj); 2830 2831 /* 2832 * Redzone is ok. 2833 */ 2834 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2835 return; 2836 2837 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2838 slab_error(cache, "double free detected"); 2839 else 2840 slab_error(cache, "memory outside object was overwritten"); 2841 2842 printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", 2843 obj, redzone1, redzone2); 2844} 2845 2846static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2847 void *caller) 2848{ 2849 struct page *page; 2850 unsigned int objnr; 2851 struct slab *slabp; 2852 2853 objp -= obj_offset(cachep); 2854 kfree_debugcheck(objp); 2855 page = virt_to_page(objp); 2856 2857 slabp = page_get_slab(page); 2858 2859 if (cachep->flags & SLAB_RED_ZONE) { 2860 verify_redzone_free(cachep, objp); 2861 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2862 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2863 } 2864 if (cachep->flags & SLAB_STORE_USER) 2865 *dbg_userword(cachep, objp) = caller; 2866 2867 objnr = obj_to_index(cachep, slabp, objp); 2868 2869 BUG_ON(objnr >= cachep->num); 2870 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2871 2872 if (cachep->flags & SLAB_DEBUG_INITIAL) { 2873 /* 2874 * Need to call the slab's constructor so the caller can 2875 * perform a verify of its state (debugging). Called without 2876 * the cache-lock held. 2877 */ 2878 cachep->ctor(objp + obj_offset(cachep), 2879 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); 2880 } 2881 if (cachep->flags & SLAB_POISON && cachep->dtor) { 2882 /* we want to cache poison the object, 2883 * call the destruction callback 2884 */ 2885 cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2886 } 2887#ifdef CONFIG_DEBUG_SLAB_LEAK 2888 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2889#endif 2890 if (cachep->flags & SLAB_POISON) { 2891#ifdef CONFIG_DEBUG_PAGEALLOC 2892 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2893 store_stackinfo(cachep, objp, (unsigned long)caller); 2894 kernel_map_pages(virt_to_page(objp), 2895 cachep->buffer_size / PAGE_SIZE, 0); 2896 } else { 2897 poison_obj(cachep, objp, POISON_FREE); 2898 } 2899#else 2900 poison_obj(cachep, objp, POISON_FREE); 2901#endif 2902 } 2903 return objp; 2904} 2905 2906static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2907{ 2908 kmem_bufctl_t i; 2909 int entries = 0; 2910 2911 /* Check slab's freelist to see if this obj is there. */ 2912 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2913 entries++; 2914 if (entries > cachep->num || i >= cachep->num) 2915 goto bad; 2916 } 2917 if (entries != cachep->num - slabp->inuse) { 2918bad: 2919 printk(KERN_ERR "slab: Internal list corruption detected in " 2920 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2921 cachep->name, cachep->num, slabp, slabp->inuse); 2922 for (i = 0; 2923 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2924 i++) { 2925 if (i % 16 == 0) 2926 printk("\n%03x:", i); 2927 printk(" %02x", ((unsigned char *)slabp)[i]); 2928 } 2929 printk("\n"); 2930 BUG(); 2931 } 2932} 2933#else 2934#define kfree_debugcheck(x) do { } while(0) 2935#define cache_free_debugcheck(x,objp,z) (objp) 2936#define check_slabp(x,y) do { } while(0) 2937#endif 2938 2939static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2940{ 2941 int batchcount; 2942 struct kmem_list3 *l3; 2943 struct array_cache *ac; 2944 int node; 2945 2946 node = numa_node_id(); 2947 2948 check_irq_off(); 2949 ac = cpu_cache_get(cachep); 2950retry: 2951 batchcount = ac->batchcount; 2952 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2953 /* 2954 * If there was little recent activity on this cache, then 2955 * perform only a partial refill. Otherwise we could generate 2956 * refill bouncing. 2957 */ 2958 batchcount = BATCHREFILL_LIMIT; 2959 } 2960 l3 = cachep->nodelists[node]; 2961 2962 BUG_ON(ac->avail > 0 || !l3); 2963 spin_lock(&l3->list_lock); 2964 2965 /* See if we can refill from the shared array */ 2966 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2967 goto alloc_done; 2968 2969 while (batchcount > 0) { 2970 struct list_head *entry; 2971 struct slab *slabp; 2972 /* Get slab alloc is to come from. */ 2973 entry = l3->slabs_partial.next; 2974 if (entry == &l3->slabs_partial) { 2975 l3->free_touched = 1; 2976 entry = l3->slabs_free.next; 2977 if (entry == &l3->slabs_free) 2978 goto must_grow; 2979 } 2980 2981 slabp = list_entry(entry, struct slab, list); 2982 check_slabp(cachep, slabp); 2983 check_spinlock_acquired(cachep); 2984 while (slabp->inuse < cachep->num && batchcount--) { 2985 STATS_INC_ALLOCED(cachep); 2986 STATS_INC_ACTIVE(cachep); 2987 STATS_SET_HIGH(cachep); 2988 2989 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2990 node); 2991 } 2992 check_slabp(cachep, slabp); 2993 2994 /* move slabp to correct slabp list: */ 2995 list_del(&slabp->list); 2996 if (slabp->free == BUFCTL_END) 2997 list_add(&slabp->list, &l3->slabs_full); 2998 else 2999 list_add(&slabp->list, &l3->slabs_partial); 3000 } 3001 3002must_grow: 3003 l3->free_objects -= ac->avail; 3004alloc_done: 3005 spin_unlock(&l3->list_lock); 3006 3007 if (unlikely(!ac->avail)) { 3008 int x; 3009 x = cache_grow(cachep, flags, node); 3010 3011 /* cache_grow can reenable interrupts, then ac could change. */ 3012 ac = cpu_cache_get(cachep); 3013 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3014 return NULL; 3015 3016 if (!ac->avail) /* objects refilled by interrupt? */ 3017 goto retry; 3018 } 3019 ac->touched = 1; 3020 return ac->entry[--ac->avail]; 3021} 3022 3023static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3024 gfp_t flags) 3025{ 3026 might_sleep_if(flags & __GFP_WAIT); 3027#if DEBUG 3028 kmem_flagcheck(cachep, flags); 3029#endif 3030} 3031 3032#if DEBUG 3033static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3034 gfp_t flags, void *objp, void *caller) 3035{ 3036 if (!objp) 3037 return objp; 3038 if (cachep->flags & SLAB_POISON) { 3039#ifdef CONFIG_DEBUG_PAGEALLOC 3040 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3041 kernel_map_pages(virt_to_page(objp), 3042 cachep->buffer_size / PAGE_SIZE, 1); 3043 else 3044 check_poison_obj(cachep, objp); 3045#else 3046 check_poison_obj(cachep, objp); 3047#endif 3048 poison_obj(cachep, objp, POISON_INUSE); 3049 } 3050 if (cachep->flags & SLAB_STORE_USER) 3051 *dbg_userword(cachep, objp) = caller; 3052 3053 if (cachep->flags & SLAB_RED_ZONE) { 3054 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3055 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3056 slab_error(cachep, "double free, or memory outside" 3057 " object was overwritten"); 3058 printk(KERN_ERR 3059 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n", 3060 objp, *dbg_redzone1(cachep, objp), 3061 *dbg_redzone2(cachep, objp)); 3062 } 3063 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3064 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3065 } 3066#ifdef CONFIG_DEBUG_SLAB_LEAK 3067 { 3068 struct slab *slabp; 3069 unsigned objnr; 3070 3071 slabp = page_get_slab(virt_to_page(objp)); 3072 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3073 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3074 } 3075#endif 3076 objp += obj_offset(cachep); 3077 if (cachep->ctor && cachep->flags & SLAB_POISON) { 3078 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; 3079 3080 if (!(flags & __GFP_WAIT)) 3081 ctor_flags |= SLAB_CTOR_ATOMIC; 3082 3083 cachep->ctor(objp, cachep, ctor_flags); 3084 } 3085#if ARCH_SLAB_MINALIGN 3086 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3087 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3088 objp, ARCH_SLAB_MINALIGN); 3089 } 3090#endif 3091 return objp; 3092} 3093#else 3094#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3095#endif 3096 3097static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3098{ 3099 void *objp; 3100 struct array_cache *ac; 3101 3102 check_irq_off(); 3103 ac = cpu_cache_get(cachep); 3104 if (likely(ac->avail)) { 3105 STATS_INC_ALLOCHIT(cachep); 3106 ac->touched = 1; 3107 objp = ac->entry[--ac->avail]; 3108 } else { 3109 STATS_INC_ALLOCMISS(cachep); 3110 objp = cache_alloc_refill(cachep, flags); 3111 } 3112 return objp; 3113} 3114 3115static __always_inline void *__cache_alloc(struct kmem_cache *cachep, 3116 gfp_t flags, void *caller) 3117{ 3118 unsigned long save_flags; 3119 void *objp = NULL; 3120 3121 cache_alloc_debugcheck_before(cachep, flags); 3122 3123 local_irq_save(save_flags); 3124 3125 if (unlikely(NUMA_BUILD && 3126 current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) 3127 objp = alternate_node_alloc(cachep, flags); 3128 3129 if (!objp) 3130 objp = ____cache_alloc(cachep, flags); 3131 /* 3132 * We may just have run out of memory on the local node. 3133 * ____cache_alloc_node() knows how to locate memory on other nodes 3134 */ 3135 if (NUMA_BUILD && !objp) 3136 objp = ____cache_alloc_node(cachep, flags, numa_node_id()); 3137 local_irq_restore(save_flags); 3138 objp = cache_alloc_debugcheck_after(cachep, flags, objp, 3139 caller); 3140 prefetchw(objp); 3141 return objp; 3142} 3143 3144#ifdef CONFIG_NUMA 3145/* 3146 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3147 * 3148 * If we are in_interrupt, then process context, including cpusets and 3149 * mempolicy, may not apply and should not be used for allocation policy. 3150 */ 3151static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3152{ 3153 int nid_alloc, nid_here; 3154 3155 if (in_interrupt() || (flags & __GFP_THISNODE)) 3156 return NULL; 3157 nid_alloc = nid_here = numa_node_id(); 3158 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3159 nid_alloc = cpuset_mem_spread_node(); 3160 else if (current->mempolicy) 3161 nid_alloc = slab_node(current->mempolicy); 3162 if (nid_alloc != nid_here) 3163 return ____cache_alloc_node(cachep, flags, nid_alloc); 3164 return NULL; 3165} 3166 3167/* 3168 * Fallback function if there was no memory available and no objects on a 3169 * certain node and we are allowed to fall back. We mimick the behavior of 3170 * the page allocator. We fall back according to a zonelist determined by 3171 * the policy layer while obeying cpuset constraints. 3172 */ 3173void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3174{ 3175 struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3176 ->node_zonelists[gfp_zone(flags)]; 3177 struct zone **z; 3178 void *obj = NULL; 3179 3180 for (z = zonelist->zones; *z && !obj; z++) { 3181 int nid = zone_to_nid(*z); 3182 3183 if (zone_idx(*z) <= ZONE_NORMAL && 3184 cpuset_zone_allowed(*z, flags) && 3185 cache->nodelists[nid]) 3186 obj = ____cache_alloc_node(cache, 3187 flags | __GFP_THISNODE, nid); 3188 } 3189 return obj; 3190} 3191 3192/* 3193 * A interface to enable slab creation on nodeid 3194 */ 3195static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3196 int nodeid) 3197{ 3198 struct list_head *entry; 3199 struct slab *slabp; 3200 struct kmem_list3 *l3; 3201 void *obj; 3202 int x; 3203 3204 l3 = cachep->nodelists[nodeid]; 3205 BUG_ON(!l3); 3206 3207retry: 3208 check_irq_off(); 3209 spin_lock(&l3->list_lock); 3210 entry = l3->slabs_partial.next; 3211 if (entry == &l3->slabs_partial) { 3212 l3->free_touched = 1; 3213 entry = l3->slabs_free.next; 3214 if (entry == &l3->slabs_free) 3215 goto must_grow; 3216 } 3217 3218 slabp = list_entry(entry, struct slab, list); 3219 check_spinlock_acquired_node(cachep, nodeid); 3220 check_slabp(cachep, slabp); 3221 3222 STATS_INC_NODEALLOCS(cachep); 3223 STATS_INC_ACTIVE(cachep); 3224 STATS_SET_HIGH(cachep); 3225 3226 BUG_ON(slabp->inuse == cachep->num); 3227 3228 obj = slab_get_obj(cachep, slabp, nodeid); 3229 check_slabp(cachep, slabp); 3230 l3->free_objects--; 3231 /* move slabp to correct slabp list: */ 3232 list_del(&slabp->list); 3233 3234 if (slabp->free == BUFCTL_END) 3235 list_add(&slabp->list, &l3->slabs_full); 3236 else 3237 list_add(&slabp->list, &l3->slabs_partial); 3238 3239 spin_unlock(&l3->list_lock); 3240 goto done; 3241 3242must_grow: 3243 spin_unlock(&l3->list_lock); 3244 x = cache_grow(cachep, flags, nodeid); 3245 if (x) 3246 goto retry; 3247 3248 if (!(flags & __GFP_THISNODE)) 3249 /* Unable to grow the cache. Fall back to other nodes. */ 3250 return fallback_alloc(cachep, flags); 3251 3252 return NULL; 3253 3254done: 3255 return obj; 3256} 3257#endif 3258 3259/* 3260 * Caller needs to acquire correct kmem_list's list_lock 3261 */ 3262static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3263 int node) 3264{ 3265 int i; 3266 struct kmem_list3 *l3; 3267 3268 for (i = 0; i < nr_objects; i++) { 3269 void *objp = objpp[i]; 3270 struct slab *slabp; 3271 3272 slabp = virt_to_slab(objp); 3273 l3 = cachep->nodelists[node]; 3274 list_del(&slabp->list); 3275 check_spinlock_acquired_node(cachep, node); 3276 check_slabp(cachep, slabp); 3277 slab_put_obj(cachep, slabp, objp, node); 3278 STATS_DEC_ACTIVE(cachep); 3279 l3->free_objects++; 3280 check_slabp(cachep, slabp); 3281 3282 /* fixup slab chains */ 3283 if (slabp->inuse == 0) { 3284 if (l3->free_objects > l3->free_limit) { 3285 l3->free_objects -= cachep->num; 3286 /* No need to drop any previously held 3287 * lock here, even if we have a off-slab slab 3288 * descriptor it is guaranteed to come from 3289 * a different cache, refer to comments before 3290 * alloc_slabmgmt. 3291 */ 3292 slab_destroy(cachep, slabp); 3293 } else { 3294 list_add(&slabp->list, &l3->slabs_free); 3295 } 3296 } else { 3297 /* Unconditionally move a slab to the end of the 3298 * partial list on free - maximum time for the 3299 * other objects to be freed, too. 3300 */ 3301 list_add_tail(&slabp->list, &l3->slabs_partial); 3302 } 3303 } 3304} 3305 3306static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3307{ 3308 int batchcount; 3309 struct kmem_list3 *l3; 3310 int node = numa_node_id(); 3311 3312 batchcount = ac->batchcount; 3313#if DEBUG 3314 BUG_ON(!batchcount || batchcount > ac->avail); 3315#endif 3316 check_irq_off(); 3317 l3 = cachep->nodelists[node]; 3318 spin_lock(&l3->list_lock); 3319 if (l3->shared) { 3320 struct array_cache *shared_array = l3->shared; 3321 int max = shared_array->limit - shared_array->avail; 3322 if (max) { 3323 if (batchcount > max) 3324 batchcount = max; 3325 memcpy(&(shared_array->entry[shared_array->avail]), 3326 ac->entry, sizeof(void *) * batchcount); 3327 shared_array->avail += batchcount; 3328 goto free_done; 3329 } 3330 } 3331 3332 free_block(cachep, ac->entry, batchcount, node); 3333free_done: 3334#if STATS 3335 { 3336 int i = 0; 3337 struct list_head *p; 3338 3339 p = l3->slabs_free.next; 3340 while (p != &(l3->slabs_free)) { 3341 struct slab *slabp; 3342 3343 slabp = list_entry(p, struct slab, list); 3344 BUG_ON(slabp->inuse); 3345 3346 i++; 3347 p = p->next; 3348 } 3349 STATS_SET_FREEABLE(cachep, i); 3350 } 3351#endif 3352 spin_unlock(&l3->list_lock); 3353 ac->avail -= batchcount; 3354 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3355} 3356 3357/* 3358 * Release an obj back to its cache. If the obj has a constructed state, it must 3359 * be in this state _before_ it is released. Called with disabled ints. 3360 */ 3361static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3362{ 3363 struct array_cache *ac = cpu_cache_get(cachep); 3364 3365 check_irq_off(); 3366 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3367 3368 if (cache_free_alien(cachep, objp)) 3369 return; 3370 3371 if (likely(ac->avail < ac->limit)) { 3372 STATS_INC_FREEHIT(cachep); 3373 ac->entry[ac->avail++] = objp; 3374 return; 3375 } else { 3376 STATS_INC_FREEMISS(cachep); 3377 cache_flusharray(cachep, ac); 3378 ac->entry[ac->avail++] = objp; 3379 } 3380} 3381 3382/** 3383 * kmem_cache_alloc - Allocate an object 3384 * @cachep: The cache to allocate from. 3385 * @flags: See kmalloc(). 3386 * 3387 * Allocate an object from this cache. The flags are only relevant 3388 * if the cache has no available objects. 3389 */ 3390void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3391{ 3392 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3393} 3394EXPORT_SYMBOL(kmem_cache_alloc); 3395 3396/** 3397 * kmem_cache_zalloc - Allocate an object. The memory is set to zero. 3398 * @cache: The cache to allocate from. 3399 * @flags: See kmalloc(). 3400 * 3401 * Allocate an object from this cache and set the allocated memory to zero. 3402 * The flags are only relevant if the cache has no available objects. 3403 */ 3404void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) 3405{ 3406 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); 3407 if (ret) 3408 memset(ret, 0, obj_size(cache)); 3409 return ret; 3410} 3411EXPORT_SYMBOL(kmem_cache_zalloc); 3412 3413/** 3414 * kmem_ptr_validate - check if an untrusted pointer might 3415 * be a slab entry. 3416 * @cachep: the cache we're checking against 3417 * @ptr: pointer to validate 3418 * 3419 * This verifies that the untrusted pointer looks sane: 3420 * it is _not_ a guarantee that the pointer is actually 3421 * part of the slab cache in question, but it at least 3422 * validates that the pointer can be dereferenced and 3423 * looks half-way sane. 3424 * 3425 * Currently only used for dentry validation. 3426 */ 3427int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) 3428{ 3429 unsigned long addr = (unsigned long)ptr; 3430 unsigned long min_addr = PAGE_OFFSET; 3431 unsigned long align_mask = BYTES_PER_WORD - 1; 3432 unsigned long size = cachep->buffer_size; 3433 struct page *page; 3434 3435 if (unlikely(addr < min_addr)) 3436 goto out; 3437 if (unlikely(addr > (unsigned long)high_memory - size)) 3438 goto out; 3439 if (unlikely(addr & align_mask)) 3440 goto out; 3441 if (unlikely(!kern_addr_valid(addr))) 3442 goto out; 3443 if (unlikely(!kern_addr_valid(addr + size - 1))) 3444 goto out; 3445 page = virt_to_page(ptr); 3446 if (unlikely(!PageSlab(page))) 3447 goto out; 3448 if (unlikely(page_get_cache(page) != cachep)) 3449 goto out; 3450 return 1; 3451out: 3452 return 0; 3453} 3454 3455#ifdef CONFIG_NUMA 3456/** 3457 * kmem_cache_alloc_node - Allocate an object on the specified node 3458 * @cachep: The cache to allocate from. 3459 * @flags: See kmalloc(). 3460 * @nodeid: node number of the target node. 3461 * 3462 * Identical to kmem_cache_alloc, except that this function is slow 3463 * and can sleep. And it will allocate memory on the given node, which 3464 * can improve the performance for cpu bound structures. 3465 * New and improved: it will now make sure that the object gets 3466 * put on the correct node list so that there is no false sharing. 3467 */ 3468static __always_inline void * 3469__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3470 int nodeid, void *caller) 3471{ 3472 unsigned long save_flags; 3473 void *ptr; 3474 3475 cache_alloc_debugcheck_before(cachep, flags); 3476 local_irq_save(save_flags); 3477 3478 if (nodeid == -1 || nodeid == numa_node_id() || 3479 !cachep->nodelists[nodeid]) 3480 ptr = ____cache_alloc(cachep, flags); 3481 else 3482 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3483 local_irq_restore(save_flags); 3484 3485 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3486 3487 return ptr; 3488} 3489 3490void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3491{ 3492 return __cache_alloc_node(cachep, flags, nodeid, 3493 __builtin_return_address(0)); 3494} 3495EXPORT_SYMBOL(kmem_cache_alloc_node); 3496 3497static __always_inline void * 3498__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3499{ 3500 struct kmem_cache *cachep; 3501 3502 cachep = kmem_find_general_cachep(size, flags); 3503 if (unlikely(cachep == NULL)) 3504 return NULL; 3505 return kmem_cache_alloc_node(cachep, flags, node); 3506} 3507 3508#ifdef CONFIG_DEBUG_SLAB 3509void *__kmalloc_node(size_t size, gfp_t flags, int node) 3510{ 3511 return __do_kmalloc_node(size, flags, node, 3512 __builtin_return_address(0)); 3513} 3514EXPORT_SYMBOL(__kmalloc_node); 3515 3516void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3517 int node, void *caller) 3518{ 3519 return __do_kmalloc_node(size, flags, node, caller); 3520} 3521EXPORT_SYMBOL(__kmalloc_node_track_caller); 3522#else 3523void *__kmalloc_node(size_t size, gfp_t flags, int node) 3524{ 3525 return __do_kmalloc_node(size, flags, node, NULL); 3526} 3527EXPORT_SYMBOL(__kmalloc_node); 3528#endif /* CONFIG_DEBUG_SLAB */ 3529#endif /* CONFIG_NUMA */ 3530 3531/** 3532 * __do_kmalloc - allocate memory 3533 * @size: how many bytes of memory are required. 3534 * @flags: the type of memory to allocate (see kmalloc). 3535 * @caller: function caller for debug tracking of the caller 3536 */ 3537static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3538 void *caller) 3539{ 3540 struct kmem_cache *cachep; 3541 3542 /* If you want to save a few bytes .text space: replace 3543 * __ with kmem_. 3544 * Then kmalloc uses the uninlined functions instead of the inline 3545 * functions. 3546 */ 3547 cachep = __find_general_cachep(size, flags); 3548 if (unlikely(cachep == NULL)) 3549 return NULL; 3550 return __cache_alloc(cachep, flags, caller); 3551} 3552 3553 3554#ifdef CONFIG_DEBUG_SLAB 3555void *__kmalloc(size_t size, gfp_t flags) 3556{ 3557 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3558} 3559EXPORT_SYMBOL(__kmalloc); 3560 3561void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3562{ 3563 return __do_kmalloc(size, flags, caller); 3564} 3565EXPORT_SYMBOL(__kmalloc_track_caller); 3566 3567#else 3568void *__kmalloc(size_t size, gfp_t flags) 3569{ 3570 return __do_kmalloc(size, flags, NULL); 3571} 3572EXPORT_SYMBOL(__kmalloc); 3573#endif 3574 3575/** 3576 * kmem_cache_free - Deallocate an object 3577 * @cachep: The cache the allocation was from. 3578 * @objp: The previously allocated object. 3579 * 3580 * Free an object which was previously allocated from this 3581 * cache. 3582 */ 3583void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3584{ 3585 unsigned long flags; 3586 3587 BUG_ON(virt_to_cache(objp) != cachep); 3588 3589 local_irq_save(flags); 3590 __cache_free(cachep, objp); 3591 local_irq_restore(flags); 3592} 3593EXPORT_SYMBOL(kmem_cache_free); 3594 3595/** 3596 * kfree - free previously allocated memory 3597 * @objp: pointer returned by kmalloc. 3598 * 3599 * If @objp is NULL, no operation is performed. 3600 * 3601 * Don't free memory not originally allocated by kmalloc() 3602 * or you will run into trouble. 3603 */ 3604void kfree(const void *objp) 3605{ 3606 struct kmem_cache *c; 3607 unsigned long flags; 3608 3609 if (unlikely(!objp)) 3610 return; 3611 local_irq_save(flags); 3612 kfree_debugcheck(objp); 3613 c = virt_to_cache(objp); 3614 debug_check_no_locks_freed(objp, obj_size(c)); 3615 __cache_free(c, (void *)objp); 3616 local_irq_restore(flags); 3617} 3618EXPORT_SYMBOL(kfree); 3619 3620unsigned int kmem_cache_size(struct kmem_cache *cachep) 3621{ 3622 return obj_size(cachep); 3623} 3624EXPORT_SYMBOL(kmem_cache_size); 3625 3626const char *kmem_cache_name(struct kmem_cache *cachep) 3627{ 3628 return cachep->name; 3629} 3630EXPORT_SYMBOL_GPL(kmem_cache_name); 3631 3632/* 3633 * This initializes kmem_list3 or resizes varioius caches for all nodes. 3634 */ 3635static int alloc_kmemlist(struct kmem_cache *cachep) 3636{ 3637 int node; 3638 struct kmem_list3 *l3; 3639 struct array_cache *new_shared; 3640 struct array_cache **new_alien = NULL; 3641 3642 for_each_online_node(node) { 3643 3644 if (use_alien_caches) { 3645 new_alien = alloc_alien_cache(node, cachep->limit); 3646 if (!new_alien) 3647 goto fail; 3648 } 3649 3650 new_shared = alloc_arraycache(node, 3651 cachep->shared*cachep->batchcount, 3652 0xbaadf00d); 3653 if (!new_shared) { 3654 free_alien_cache(new_alien); 3655 goto fail; 3656 } 3657 3658 l3 = cachep->nodelists[node]; 3659 if (l3) { 3660 struct array_cache *shared = l3->shared; 3661 3662 spin_lock_irq(&l3->list_lock); 3663 3664 if (shared) 3665 free_block(cachep, shared->entry, 3666 shared->avail, node); 3667 3668 l3->shared = new_shared; 3669 if (!l3->alien) { 3670 l3->alien = new_alien; 3671 new_alien = NULL; 3672 } 3673 l3->free_limit = (1 + nr_cpus_node(node)) * 3674 cachep->batchcount + cachep->num; 3675 spin_unlock_irq(&l3->list_lock); 3676 kfree(shared); 3677 free_alien_cache(new_alien); 3678 continue; 3679 } 3680 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3681 if (!l3) { 3682 free_alien_cache(new_alien); 3683 kfree(new_shared); 3684 goto fail; 3685 } 3686 3687 kmem_list3_init(l3); 3688 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3689 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3690 l3->shared = new_shared; 3691 l3->alien = new_alien; 3692 l3->free_limit = (1 + nr_cpus_node(node)) * 3693 cachep->batchcount + cachep->num; 3694 cachep->nodelists[node] = l3; 3695 } 3696 return 0; 3697 3698fail: 3699 if (!cachep->next.next) { 3700 /* Cache is not active yet. Roll back what we did */ 3701 node--; 3702 while (node >= 0) { 3703 if (cachep->nodelists[node]) { 3704 l3 = cachep->nodelists[node]; 3705 3706 kfree(l3->shared); 3707 free_alien_cache(l3->alien); 3708 kfree(l3); 3709 cachep->nodelists[node] = NULL; 3710 } 3711 node--; 3712 } 3713 } 3714 return -ENOMEM; 3715} 3716 3717struct ccupdate_struct { 3718 struct kmem_cache *cachep; 3719 struct array_cache *new[NR_CPUS]; 3720}; 3721 3722static void do_ccupdate_local(void *info) 3723{ 3724 struct ccupdate_struct *new = info; 3725 struct array_cache *old; 3726 3727 check_irq_off(); 3728 old = cpu_cache_get(new->cachep); 3729 3730 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3731 new->new[smp_processor_id()] = old; 3732} 3733 3734/* Always called with the cache_chain_mutex held */ 3735static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3736 int batchcount, int shared) 3737{ 3738 struct ccupdate_struct *new; 3739 int i; 3740 3741 new = kzalloc(sizeof(*new), GFP_KERNEL); 3742 if (!new) 3743 return -ENOMEM; 3744 3745 for_each_online_cpu(i) { 3746 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3747 batchcount); 3748 if (!new->new[i]) { 3749 for (i--; i >= 0; i--) 3750 kfree(new->new[i]); 3751 kfree(new); 3752 return -ENOMEM; 3753 } 3754 } 3755 new->cachep = cachep; 3756 3757 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3758 3759 check_irq_on(); 3760 cachep->batchcount = batchcount; 3761 cachep->limit = limit; 3762 cachep->shared = shared; 3763 3764 for_each_online_cpu(i) { 3765 struct array_cache *ccold = new->new[i]; 3766 if (!ccold) 3767 continue; 3768 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3769 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3770 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3771 kfree(ccold); 3772 } 3773 kfree(new); 3774 return alloc_kmemlist(cachep); 3775} 3776 3777/* Called with cache_chain_mutex held always */ 3778static int enable_cpucache(struct kmem_cache *cachep) 3779{ 3780 int err; 3781 int limit, shared; 3782 3783 /* 3784 * The head array serves three purposes: 3785 * - create a LIFO ordering, i.e. return objects that are cache-warm 3786 * - reduce the number of spinlock operations. 3787 * - reduce the number of linked list operations on the slab and 3788 * bufctl chains: array operations are cheaper. 3789 * The numbers are guessed, we should auto-tune as described by 3790 * Bonwick. 3791 */ 3792 if (cachep->buffer_size > 131072) 3793 limit = 1; 3794 else if (cachep->buffer_size > PAGE_SIZE) 3795 limit = 8; 3796 else if (cachep->buffer_size > 1024) 3797 limit = 24; 3798 else if (cachep->buffer_size > 256) 3799 limit = 54; 3800 else 3801 limit = 120; 3802 3803 /* 3804 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3805 * allocation behaviour: Most allocs on one cpu, most free operations 3806 * on another cpu. For these cases, an efficient object passing between 3807 * cpus is necessary. This is provided by a shared array. The array 3808 * replaces Bonwick's magazine layer. 3809 * On uniprocessor, it's functionally equivalent (but less efficient) 3810 * to a larger limit. Thus disabled by default. 3811 */ 3812 shared = 0; 3813#ifdef CONFIG_SMP 3814 if (cachep->buffer_size <= PAGE_SIZE) 3815 shared = 8; 3816#endif 3817 3818#if DEBUG 3819 /* 3820 * With debugging enabled, large batchcount lead to excessively long 3821 * periods with disabled local interrupts. Limit the batchcount 3822 */ 3823 if (limit > 32) 3824 limit = 32; 3825#endif 3826 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 3827 if (err) 3828 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3829 cachep->name, -err); 3830 return err; 3831} 3832 3833/* 3834 * Drain an array if it contains any elements taking the l3 lock only if 3835 * necessary. Note that the l3 listlock also protects the array_cache 3836 * if drain_array() is used on the shared array. 3837 */ 3838void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 3839 struct array_cache *ac, int force, int node) 3840{ 3841 int tofree; 3842 3843 if (!ac || !ac->avail) 3844 return; 3845 if (ac->touched && !force) { 3846 ac->touched = 0; 3847 } else { 3848 spin_lock_irq(&l3->list_lock); 3849 if (ac->avail) { 3850 tofree = force ? ac->avail : (ac->limit + 4) / 5; 3851 if (tofree > ac->avail) 3852 tofree = (ac->avail + 1) / 2; 3853 free_block(cachep, ac->entry, tofree, node); 3854 ac->avail -= tofree; 3855 memmove(ac->entry, &(ac->entry[tofree]), 3856 sizeof(void *) * ac->avail); 3857 } 3858 spin_unlock_irq(&l3->list_lock); 3859 } 3860} 3861 3862/** 3863 * cache_reap - Reclaim memory from caches. 3864 * @unused: unused parameter 3865 * 3866 * Called from workqueue/eventd every few seconds. 3867 * Purpose: 3868 * - clear the per-cpu caches for this CPU. 3869 * - return freeable pages to the main free memory pool. 3870 * 3871 * If we cannot acquire the cache chain mutex then just give up - we'll try 3872 * again on the next iteration. 3873 */ 3874static void cache_reap(struct work_struct *unused) 3875{ 3876 struct kmem_cache *searchp; 3877 struct kmem_list3 *l3; 3878 int node = numa_node_id(); 3879 3880 if (!mutex_trylock(&cache_chain_mutex)) { 3881 /* Give up. Setup the next iteration. */ 3882 schedule_delayed_work(&__get_cpu_var(reap_work), 3883 REAPTIMEOUT_CPUC); 3884 return; 3885 } 3886 3887 list_for_each_entry(searchp, &cache_chain, next) { 3888 check_irq_on(); 3889 3890 /* 3891 * We only take the l3 lock if absolutely necessary and we 3892 * have established with reasonable certainty that 3893 * we can do some work if the lock was obtained. 3894 */ 3895 l3 = searchp->nodelists[node]; 3896 3897 reap_alien(searchp, l3); 3898 3899 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 3900 3901 /* 3902 * These are racy checks but it does not matter 3903 * if we skip one check or scan twice. 3904 */ 3905 if (time_after(l3->next_reap, jiffies)) 3906 goto next; 3907 3908 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 3909 3910 drain_array(searchp, l3, l3->shared, 0, node); 3911 3912 if (l3->free_touched) 3913 l3->free_touched = 0; 3914 else { 3915 int freed; 3916 3917 freed = drain_freelist(searchp, l3, (l3->free_limit + 3918 5 * searchp->num - 1) / (5 * searchp->num)); 3919 STATS_ADD_REAPED(searchp, freed); 3920 } 3921next: 3922 cond_resched(); 3923 } 3924 check_irq_on(); 3925 mutex_unlock(&cache_chain_mutex); 3926 next_reap_node(); 3927 refresh_cpu_vm_stats(smp_processor_id()); 3928 /* Set up the next iteration */ 3929 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3930} 3931 3932#ifdef CONFIG_PROC_FS 3933 3934static void print_slabinfo_header(struct seq_file *m) 3935{ 3936 /* 3937 * Output format version, so at least we can change it 3938 * without _too_ many complaints. 3939 */ 3940#if STATS 3941 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 3942#else 3943 seq_puts(m, "slabinfo - version: 2.1\n"); 3944#endif 3945 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 3946 "<objperslab> <pagesperslab>"); 3947 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 3948 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 3949#if STATS 3950 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 3951 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 3952 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 3953#endif 3954 seq_putc(m, '\n'); 3955} 3956 3957static void *s_start(struct seq_file *m, loff_t *pos) 3958{ 3959 loff_t n = *pos; 3960 struct list_head *p; 3961 3962 mutex_lock(&cache_chain_mutex); 3963 if (!n) 3964 print_slabinfo_header(m); 3965 p = cache_chain.next; 3966 while (n--) { 3967 p = p->next; 3968 if (p == &cache_chain) 3969 return NULL; 3970 } 3971 return list_entry(p, struct kmem_cache, next); 3972} 3973 3974static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3975{ 3976 struct kmem_cache *cachep = p; 3977 ++*pos; 3978 return cachep->next.next == &cache_chain ? 3979 NULL : list_entry(cachep->next.next, struct kmem_cache, next); 3980} 3981 3982static void s_stop(struct seq_file *m, void *p) 3983{ 3984 mutex_unlock(&cache_chain_mutex); 3985} 3986 3987static int s_show(struct seq_file *m, void *p) 3988{ 3989 struct kmem_cache *cachep = p; 3990 struct slab *slabp; 3991 unsigned long active_objs; 3992 unsigned long num_objs; 3993 unsigned long active_slabs = 0; 3994 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 3995 const char *name; 3996 char *error = NULL; 3997 int node; 3998 struct kmem_list3 *l3; 3999 4000 active_objs = 0; 4001 num_slabs = 0; 4002 for_each_online_node(node) { 4003 l3 = cachep->nodelists[node]; 4004 if (!l3) 4005 continue; 4006 4007 check_irq_on(); 4008 spin_lock_irq(&l3->list_lock); 4009 4010 list_for_each_entry(slabp, &l3->slabs_full, list) { 4011 if (slabp->inuse != cachep->num && !error) 4012 error = "slabs_full accounting error"; 4013 active_objs += cachep->num; 4014 active_slabs++; 4015 } 4016 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4017 if (slabp->inuse == cachep->num && !error) 4018 error = "slabs_partial inuse accounting error"; 4019 if (!slabp->inuse && !error) 4020 error = "slabs_partial/inuse accounting error"; 4021 active_objs += slabp->inuse; 4022 active_slabs++; 4023 } 4024 list_for_each_entry(slabp, &l3->slabs_free, list) { 4025 if (slabp->inuse && !error) 4026 error = "slabs_free/inuse accounting error"; 4027 num_slabs++; 4028 } 4029 free_objects += l3->free_objects; 4030 if (l3->shared) 4031 shared_avail += l3->shared->avail; 4032 4033 spin_unlock_irq(&l3->list_lock); 4034 } 4035 num_slabs += active_slabs; 4036 num_objs = num_slabs * cachep->num; 4037 if (num_objs - active_objs != free_objects && !error) 4038 error = "free_objects accounting error"; 4039 4040 name = cachep->name; 4041 if (error) 4042 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4043 4044 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4045 name, active_objs, num_objs, cachep->buffer_size, 4046 cachep->num, (1 << cachep->gfporder)); 4047 seq_printf(m, " : tunables %4u %4u %4u", 4048 cachep->limit, cachep->batchcount, cachep->shared); 4049 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4050 active_slabs, num_slabs, shared_avail); 4051#if STATS 4052 { /* list3 stats */ 4053 unsigned long high = cachep->high_mark; 4054 unsigned long allocs = cachep->num_allocations; 4055 unsigned long grown = cachep->grown; 4056 unsigned long reaped = cachep->reaped; 4057 unsigned long errors = cachep->errors; 4058 unsigned long max_freeable = cachep->max_freeable; 4059 unsigned long node_allocs = cachep->node_allocs; 4060 unsigned long node_frees = cachep->node_frees; 4061 unsigned long overflows = cachep->node_overflow; 4062 4063 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4064 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4065 reaped, errors, max_freeable, node_allocs, 4066 node_frees, overflows); 4067 } 4068 /* cpu stats */ 4069 { 4070 unsigned long allochit = atomic_read(&cachep->allochit); 4071 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4072 unsigned long freehit = atomic_read(&cachep->freehit); 4073 unsigned long freemiss = atomic_read(&cachep->freemiss); 4074 4075 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4076 allochit, allocmiss, freehit, freemiss); 4077 } 4078#endif 4079 seq_putc(m, '\n'); 4080 return 0; 4081} 4082 4083/* 4084 * slabinfo_op - iterator that generates /proc/slabinfo 4085 * 4086 * Output layout: 4087 * cache-name 4088 * num-active-objs 4089 * total-objs 4090 * object size 4091 * num-active-slabs 4092 * total-slabs 4093 * num-pages-per-slab 4094 * + further values on SMP and with statistics enabled 4095 */ 4096 4097struct seq_operations slabinfo_op = { 4098 .start = s_start, 4099 .next = s_next, 4100 .stop = s_stop, 4101 .show = s_show, 4102}; 4103 4104#define MAX_SLABINFO_WRITE 128 4105/** 4106 * slabinfo_write - Tuning for the slab allocator 4107 * @file: unused 4108 * @buffer: user buffer 4109 * @count: data length 4110 * @ppos: unused 4111 */ 4112ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4113 size_t count, loff_t *ppos) 4114{ 4115 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4116 int limit, batchcount, shared, res; 4117 struct kmem_cache *cachep; 4118 4119 if (count > MAX_SLABINFO_WRITE) 4120 return -EINVAL; 4121 if (copy_from_user(&kbuf, buffer, count)) 4122 return -EFAULT; 4123 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4124 4125 tmp = strchr(kbuf, ' '); 4126 if (!tmp) 4127 return -EINVAL; 4128 *tmp = '\0'; 4129 tmp++; 4130 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4131 return -EINVAL; 4132 4133 /* Find the cache in the chain of caches. */ 4134 mutex_lock(&cache_chain_mutex); 4135 res = -EINVAL; 4136 list_for_each_entry(cachep, &cache_chain, next) { 4137 if (!strcmp(cachep->name, kbuf)) { 4138 if (limit < 1 || batchcount < 1 || 4139 batchcount > limit || shared < 0) { 4140 res = 0; 4141 } else { 4142 res = do_tune_cpucache(cachep, limit, 4143 batchcount, shared); 4144 } 4145 break; 4146 } 4147 } 4148 mutex_unlock(&cache_chain_mutex); 4149 if (res >= 0) 4150 res = count; 4151 return res; 4152} 4153 4154#ifdef CONFIG_DEBUG_SLAB_LEAK 4155 4156static void *leaks_start(struct seq_file *m, loff_t *pos) 4157{ 4158 loff_t n = *pos; 4159 struct list_head *p; 4160 4161 mutex_lock(&cache_chain_mutex); 4162 p = cache_chain.next; 4163 while (n--) { 4164 p = p->next; 4165 if (p == &cache_chain) 4166 return NULL; 4167 } 4168 return list_entry(p, struct kmem_cache, next); 4169} 4170 4171static inline int add_caller(unsigned long *n, unsigned long v) 4172{ 4173 unsigned long *p; 4174 int l; 4175 if (!v) 4176 return 1; 4177 l = n[1]; 4178 p = n + 2; 4179 while (l) { 4180 int i = l/2; 4181 unsigned long *q = p + 2 * i; 4182 if (*q == v) { 4183 q[1]++; 4184 return 1; 4185 } 4186 if (*q > v) { 4187 l = i; 4188 } else { 4189 p = q + 2; 4190 l -= i + 1; 4191 } 4192 } 4193 if (++n[1] == n[0]) 4194 return 0; 4195 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4196 p[0] = v; 4197 p[1] = 1; 4198 return 1; 4199} 4200 4201static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4202{ 4203 void *p; 4204 int i; 4205 if (n[0] == n[1]) 4206 return; 4207 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4208 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4209 continue; 4210 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4211 return; 4212 } 4213} 4214 4215static void show_symbol(struct seq_file *m, unsigned long address) 4216{ 4217#ifdef CONFIG_KALLSYMS 4218 char *modname; 4219 const char *name; 4220 unsigned long offset, size; 4221 char namebuf[KSYM_NAME_LEN+1]; 4222 4223 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); 4224 4225 if (name) { 4226 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4227 if (modname) 4228 seq_printf(m, " [%s]", modname); 4229 return; 4230 } 4231#endif 4232 seq_printf(m, "%p", (void *)address); 4233} 4234 4235static int leaks_show(struct seq_file *m, void *p) 4236{ 4237 struct kmem_cache *cachep = p; 4238 struct slab *slabp; 4239 struct kmem_list3 *l3; 4240 const char *name; 4241 unsigned long *n = m->private; 4242 int node; 4243 int i; 4244 4245 if (!(cachep->flags & SLAB_STORE_USER)) 4246 return 0; 4247 if (!(cachep->flags & SLAB_RED_ZONE)) 4248 return 0; 4249 4250 /* OK, we can do it */ 4251 4252 n[1] = 0; 4253 4254 for_each_online_node(node) { 4255 l3 = cachep->nodelists[node]; 4256 if (!l3) 4257 continue; 4258 4259 check_irq_on(); 4260 spin_lock_irq(&l3->list_lock); 4261 4262 list_for_each_entry(slabp, &l3->slabs_full, list) 4263 handle_slab(n, cachep, slabp); 4264 list_for_each_entry(slabp, &l3->slabs_partial, list) 4265 handle_slab(n, cachep, slabp); 4266 spin_unlock_irq(&l3->list_lock); 4267 } 4268 name = cachep->name; 4269 if (n[0] == n[1]) { 4270 /* Increase the buffer size */ 4271 mutex_unlock(&cache_chain_mutex); 4272 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4273 if (!m->private) { 4274 /* Too bad, we are really out */ 4275 m->private = n; 4276 mutex_lock(&cache_chain_mutex); 4277 return -ENOMEM; 4278 } 4279 *(unsigned long *)m->private = n[0] * 2; 4280 kfree(n); 4281 mutex_lock(&cache_chain_mutex); 4282 /* Now make sure this entry will be retried */ 4283 m->count = m->size; 4284 return 0; 4285 } 4286 for (i = 0; i < n[1]; i++) { 4287 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4288 show_symbol(m, n[2*i+2]); 4289 seq_putc(m, '\n'); 4290 } 4291 4292 return 0; 4293} 4294 4295struct seq_operations slabstats_op = { 4296 .start = leaks_start, 4297 .next = s_next, 4298 .stop = s_stop, 4299 .show = leaks_show, 4300}; 4301#endif 4302#endif 4303 4304/** 4305 * ksize - get the actual amount of memory allocated for a given object 4306 * @objp: Pointer to the object 4307 * 4308 * kmalloc may internally round up allocations and return more memory 4309 * than requested. ksize() can be used to determine the actual amount of 4310 * memory allocated. The caller may use this additional memory, even though 4311 * a smaller amount of memory was initially specified with the kmalloc call. 4312 * The caller must guarantee that objp points to a valid object previously 4313 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4314 * must not be freed during the duration of the call. 4315 */ 4316unsigned int ksize(const void *objp) 4317{ 4318 if (unlikely(objp == NULL)) 4319 return 0; 4320 4321 return obj_size(virt_to_cache(objp)); 4322} 4323