slab.c revision d5cff635290aec9ad7e6ee546aa4fae895361cbb
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same initializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/slab.h> 90#include <linux/mm.h> 91#include <linux/poison.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/proc_fs.h> 99#include <linux/seq_file.h> 100#include <linux/notifier.h> 101#include <linux/kallsyms.h> 102#include <linux/cpu.h> 103#include <linux/sysctl.h> 104#include <linux/module.h> 105#include <linux/kmemtrace.h> 106#include <linux/rcupdate.h> 107#include <linux/string.h> 108#include <linux/uaccess.h> 109#include <linux/nodemask.h> 110#include <linux/kmemleak.h> 111#include <linux/mempolicy.h> 112#include <linux/mutex.h> 113#include <linux/fault-inject.h> 114#include <linux/rtmutex.h> 115#include <linux/reciprocal_div.h> 116#include <linux/debugobjects.h> 117 118#include <asm/cacheflush.h> 119#include <asm/tlbflush.h> 120#include <asm/page.h> 121 122/* 123 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 124 * 0 for faster, smaller code (especially in the critical paths). 125 * 126 * STATS - 1 to collect stats for /proc/slabinfo. 127 * 0 for faster, smaller code (especially in the critical paths). 128 * 129 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 130 */ 131 132#ifdef CONFIG_DEBUG_SLAB 133#define DEBUG 1 134#define STATS 1 135#define FORCED_DEBUG 1 136#else 137#define DEBUG 0 138#define STATS 0 139#define FORCED_DEBUG 0 140#endif 141 142/* Shouldn't this be in a header file somewhere? */ 143#define BYTES_PER_WORD sizeof(void *) 144#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 145 146#ifndef ARCH_KMALLOC_MINALIGN 147/* 148 * Enforce a minimum alignment for the kmalloc caches. 149 * Usually, the kmalloc caches are cache_line_size() aligned, except when 150 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 151 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 152 * alignment larger than the alignment of a 64-bit integer. 153 * ARCH_KMALLOC_MINALIGN allows that. 154 * Note that increasing this value may disable some debug features. 155 */ 156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157#endif 158 159#ifndef ARCH_SLAB_MINALIGN 160/* 161 * Enforce a minimum alignment for all caches. 162 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 163 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 164 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 165 * some debug features. 166 */ 167#define ARCH_SLAB_MINALIGN 0 168#endif 169 170#ifndef ARCH_KMALLOC_FLAGS 171#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 172#endif 173 174/* Legal flag mask for kmem_cache_create(). */ 175#if DEBUG 176# define CREATE_MASK (SLAB_RED_ZONE | \ 177 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 178 SLAB_CACHE_DMA | \ 179 SLAB_STORE_USER | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 182 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) 183#else 184# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 185 SLAB_CACHE_DMA | \ 186 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 187 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ 188 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) 189#endif 190 191/* 192 * kmem_bufctl_t: 193 * 194 * Bufctl's are used for linking objs within a slab 195 * linked offsets. 196 * 197 * This implementation relies on "struct page" for locating the cache & 198 * slab an object belongs to. 199 * This allows the bufctl structure to be small (one int), but limits 200 * the number of objects a slab (not a cache) can contain when off-slab 201 * bufctls are used. The limit is the size of the largest general cache 202 * that does not use off-slab slabs. 203 * For 32bit archs with 4 kB pages, is this 56. 204 * This is not serious, as it is only for large objects, when it is unwise 205 * to have too many per slab. 206 * Note: This limit can be raised by introducing a general cache whose size 207 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 208 */ 209 210typedef unsigned int kmem_bufctl_t; 211#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 212#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 213#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 214#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 215 216/* 217 * struct slab 218 * 219 * Manages the objs in a slab. Placed either at the beginning of mem allocated 220 * for a slab, or allocated from an general cache. 221 * Slabs are chained into three list: fully used, partial, fully free slabs. 222 */ 223struct slab { 224 struct list_head list; 225 unsigned long colouroff; 226 void *s_mem; /* including colour offset */ 227 unsigned int inuse; /* num of objs active in slab */ 228 kmem_bufctl_t free; 229 unsigned short nodeid; 230}; 231 232/* 233 * struct slab_rcu 234 * 235 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 236 * arrange for kmem_freepages to be called via RCU. This is useful if 237 * we need to approach a kernel structure obliquely, from its address 238 * obtained without the usual locking. We can lock the structure to 239 * stabilize it and check it's still at the given address, only if we 240 * can be sure that the memory has not been meanwhile reused for some 241 * other kind of object (which our subsystem's lock might corrupt). 242 * 243 * rcu_read_lock before reading the address, then rcu_read_unlock after 244 * taking the spinlock within the structure expected at that address. 245 * 246 * We assume struct slab_rcu can overlay struct slab when destroying. 247 */ 248struct slab_rcu { 249 struct rcu_head head; 250 struct kmem_cache *cachep; 251 void *addr; 252}; 253 254/* 255 * struct array_cache 256 * 257 * Purpose: 258 * - LIFO ordering, to hand out cache-warm objects from _alloc 259 * - reduce the number of linked list operations 260 * - reduce spinlock operations 261 * 262 * The limit is stored in the per-cpu structure to reduce the data cache 263 * footprint. 264 * 265 */ 266struct array_cache { 267 unsigned int avail; 268 unsigned int limit; 269 unsigned int batchcount; 270 unsigned int touched; 271 spinlock_t lock; 272 void *entry[]; /* 273 * Must have this definition in here for the proper 274 * alignment of array_cache. Also simplifies accessing 275 * the entries. 276 */ 277}; 278 279/* 280 * bootstrap: The caches do not work without cpuarrays anymore, but the 281 * cpuarrays are allocated from the generic caches... 282 */ 283#define BOOT_CPUCACHE_ENTRIES 1 284struct arraycache_init { 285 struct array_cache cache; 286 void *entries[BOOT_CPUCACHE_ENTRIES]; 287}; 288 289/* 290 * The slab lists for all objects. 291 */ 292struct kmem_list3 { 293 struct list_head slabs_partial; /* partial list first, better asm code */ 294 struct list_head slabs_full; 295 struct list_head slabs_free; 296 unsigned long free_objects; 297 unsigned int free_limit; 298 unsigned int colour_next; /* Per-node cache coloring */ 299 spinlock_t list_lock; 300 struct array_cache *shared; /* shared per node */ 301 struct array_cache **alien; /* on other nodes */ 302 unsigned long next_reap; /* updated without locking */ 303 int free_touched; /* updated without locking */ 304}; 305 306/* 307 * Need this for bootstrapping a per node allocator. 308 */ 309#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 310struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 311#define CACHE_CACHE 0 312#define SIZE_AC MAX_NUMNODES 313#define SIZE_L3 (2 * MAX_NUMNODES) 314 315static int drain_freelist(struct kmem_cache *cache, 316 struct kmem_list3 *l3, int tofree); 317static void free_block(struct kmem_cache *cachep, void **objpp, int len, 318 int node); 319static int enable_cpucache(struct kmem_cache *cachep); 320static void cache_reap(struct work_struct *unused); 321 322/* 323 * This function must be completely optimized away if a constant is passed to 324 * it. Mostly the same as what is in linux/slab.h except it returns an index. 325 */ 326static __always_inline int index_of(const size_t size) 327{ 328 extern void __bad_size(void); 329 330 if (__builtin_constant_p(size)) { 331 int i = 0; 332 333#define CACHE(x) \ 334 if (size <=x) \ 335 return i; \ 336 else \ 337 i++; 338#include <linux/kmalloc_sizes.h> 339#undef CACHE 340 __bad_size(); 341 } else 342 __bad_size(); 343 return 0; 344} 345 346static int slab_early_init = 1; 347 348#define INDEX_AC index_of(sizeof(struct arraycache_init)) 349#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 350 351static void kmem_list3_init(struct kmem_list3 *parent) 352{ 353 INIT_LIST_HEAD(&parent->slabs_full); 354 INIT_LIST_HEAD(&parent->slabs_partial); 355 INIT_LIST_HEAD(&parent->slabs_free); 356 parent->shared = NULL; 357 parent->alien = NULL; 358 parent->colour_next = 0; 359 spin_lock_init(&parent->list_lock); 360 parent->free_objects = 0; 361 parent->free_touched = 0; 362} 363 364#define MAKE_LIST(cachep, listp, slab, nodeid) \ 365 do { \ 366 INIT_LIST_HEAD(listp); \ 367 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 368 } while (0) 369 370#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 371 do { \ 372 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 373 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 374 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 375 } while (0) 376 377/* 378 * struct kmem_cache 379 * 380 * manages a cache. 381 */ 382 383struct kmem_cache { 384/* 1) per-cpu data, touched during every alloc/free */ 385 struct array_cache *array[NR_CPUS]; 386/* 2) Cache tunables. Protected by cache_chain_mutex */ 387 unsigned int batchcount; 388 unsigned int limit; 389 unsigned int shared; 390 391 unsigned int buffer_size; 392 u32 reciprocal_buffer_size; 393/* 3) touched by every alloc & free from the backend */ 394 395 unsigned int flags; /* constant flags */ 396 unsigned int num; /* # of objs per slab */ 397 398/* 4) cache_grow/shrink */ 399 /* order of pgs per slab (2^n) */ 400 unsigned int gfporder; 401 402 /* force GFP flags, e.g. GFP_DMA */ 403 gfp_t gfpflags; 404 405 size_t colour; /* cache colouring range */ 406 unsigned int colour_off; /* colour offset */ 407 struct kmem_cache *slabp_cache; 408 unsigned int slab_size; 409 unsigned int dflags; /* dynamic flags */ 410 411 /* constructor func */ 412 void (*ctor)(void *obj); 413 414/* 5) cache creation/removal */ 415 const char *name; 416 struct list_head next; 417 418/* 6) statistics */ 419#if STATS 420 unsigned long num_active; 421 unsigned long num_allocations; 422 unsigned long high_mark; 423 unsigned long grown; 424 unsigned long reaped; 425 unsigned long errors; 426 unsigned long max_freeable; 427 unsigned long node_allocs; 428 unsigned long node_frees; 429 unsigned long node_overflow; 430 atomic_t allochit; 431 atomic_t allocmiss; 432 atomic_t freehit; 433 atomic_t freemiss; 434#endif 435#if DEBUG 436 /* 437 * If debugging is enabled, then the allocator can add additional 438 * fields and/or padding to every object. buffer_size contains the total 439 * object size including these internal fields, the following two 440 * variables contain the offset to the user object and its size. 441 */ 442 int obj_offset; 443 int obj_size; 444#endif 445 /* 446 * We put nodelists[] at the end of kmem_cache, because we want to size 447 * this array to nr_node_ids slots instead of MAX_NUMNODES 448 * (see kmem_cache_init()) 449 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache 450 * is statically defined, so we reserve the max number of nodes. 451 */ 452 struct kmem_list3 *nodelists[MAX_NUMNODES]; 453 /* 454 * Do not add fields after nodelists[] 455 */ 456}; 457 458#define CFLGS_OFF_SLAB (0x80000000UL) 459#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 460 461#define BATCHREFILL_LIMIT 16 462/* 463 * Optimization question: fewer reaps means less probability for unnessary 464 * cpucache drain/refill cycles. 465 * 466 * OTOH the cpuarrays can contain lots of objects, 467 * which could lock up otherwise freeable slabs. 468 */ 469#define REAPTIMEOUT_CPUC (2*HZ) 470#define REAPTIMEOUT_LIST3 (4*HZ) 471 472#if STATS 473#define STATS_INC_ACTIVE(x) ((x)->num_active++) 474#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 475#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 476#define STATS_INC_GROWN(x) ((x)->grown++) 477#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 478#define STATS_SET_HIGH(x) \ 479 do { \ 480 if ((x)->num_active > (x)->high_mark) \ 481 (x)->high_mark = (x)->num_active; \ 482 } while (0) 483#define STATS_INC_ERR(x) ((x)->errors++) 484#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 485#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 486#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 487#define STATS_SET_FREEABLE(x, i) \ 488 do { \ 489 if ((x)->max_freeable < i) \ 490 (x)->max_freeable = i; \ 491 } while (0) 492#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 493#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 494#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 495#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 496#else 497#define STATS_INC_ACTIVE(x) do { } while (0) 498#define STATS_DEC_ACTIVE(x) do { } while (0) 499#define STATS_INC_ALLOCED(x) do { } while (0) 500#define STATS_INC_GROWN(x) do { } while (0) 501#define STATS_ADD_REAPED(x,y) do { } while (0) 502#define STATS_SET_HIGH(x) do { } while (0) 503#define STATS_INC_ERR(x) do { } while (0) 504#define STATS_INC_NODEALLOCS(x) do { } while (0) 505#define STATS_INC_NODEFREES(x) do { } while (0) 506#define STATS_INC_ACOVERFLOW(x) do { } while (0) 507#define STATS_SET_FREEABLE(x, i) do { } while (0) 508#define STATS_INC_ALLOCHIT(x) do { } while (0) 509#define STATS_INC_ALLOCMISS(x) do { } while (0) 510#define STATS_INC_FREEHIT(x) do { } while (0) 511#define STATS_INC_FREEMISS(x) do { } while (0) 512#endif 513 514#if DEBUG 515 516/* 517 * memory layout of objects: 518 * 0 : objp 519 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 520 * the end of an object is aligned with the end of the real 521 * allocation. Catches writes behind the end of the allocation. 522 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 523 * redzone word. 524 * cachep->obj_offset: The real object. 525 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 526 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 527 * [BYTES_PER_WORD long] 528 */ 529static int obj_offset(struct kmem_cache *cachep) 530{ 531 return cachep->obj_offset; 532} 533 534static int obj_size(struct kmem_cache *cachep) 535{ 536 return cachep->obj_size; 537} 538 539static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 540{ 541 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 542 return (unsigned long long*) (objp + obj_offset(cachep) - 543 sizeof(unsigned long long)); 544} 545 546static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 547{ 548 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 549 if (cachep->flags & SLAB_STORE_USER) 550 return (unsigned long long *)(objp + cachep->buffer_size - 551 sizeof(unsigned long long) - 552 REDZONE_ALIGN); 553 return (unsigned long long *) (objp + cachep->buffer_size - 554 sizeof(unsigned long long)); 555} 556 557static void **dbg_userword(struct kmem_cache *cachep, void *objp) 558{ 559 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 560 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 561} 562 563#else 564 565#define obj_offset(x) 0 566#define obj_size(cachep) (cachep->buffer_size) 567#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 568#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 569#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 570 571#endif 572 573#ifdef CONFIG_KMEMTRACE 574size_t slab_buffer_size(struct kmem_cache *cachep) 575{ 576 return cachep->buffer_size; 577} 578EXPORT_SYMBOL(slab_buffer_size); 579#endif 580 581/* 582 * Do not go above this order unless 0 objects fit into the slab. 583 */ 584#define BREAK_GFP_ORDER_HI 1 585#define BREAK_GFP_ORDER_LO 0 586static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 587 588/* 589 * Functions for storing/retrieving the cachep and or slab from the page 590 * allocator. These are used to find the slab an obj belongs to. With kfree(), 591 * these are used to find the cache which an obj belongs to. 592 */ 593static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 594{ 595 page->lru.next = (struct list_head *)cache; 596} 597 598static inline struct kmem_cache *page_get_cache(struct page *page) 599{ 600 page = compound_head(page); 601 BUG_ON(!PageSlab(page)); 602 return (struct kmem_cache *)page->lru.next; 603} 604 605static inline void page_set_slab(struct page *page, struct slab *slab) 606{ 607 page->lru.prev = (struct list_head *)slab; 608} 609 610static inline struct slab *page_get_slab(struct page *page) 611{ 612 BUG_ON(!PageSlab(page)); 613 return (struct slab *)page->lru.prev; 614} 615 616static inline struct kmem_cache *virt_to_cache(const void *obj) 617{ 618 struct page *page = virt_to_head_page(obj); 619 return page_get_cache(page); 620} 621 622static inline struct slab *virt_to_slab(const void *obj) 623{ 624 struct page *page = virt_to_head_page(obj); 625 return page_get_slab(page); 626} 627 628static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 629 unsigned int idx) 630{ 631 return slab->s_mem + cache->buffer_size * idx; 632} 633 634/* 635 * We want to avoid an expensive divide : (offset / cache->buffer_size) 636 * Using the fact that buffer_size is a constant for a particular cache, 637 * we can replace (offset / cache->buffer_size) by 638 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 639 */ 640static inline unsigned int obj_to_index(const struct kmem_cache *cache, 641 const struct slab *slab, void *obj) 642{ 643 u32 offset = (obj - slab->s_mem); 644 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 645} 646 647/* 648 * These are the default caches for kmalloc. Custom caches can have other sizes. 649 */ 650struct cache_sizes malloc_sizes[] = { 651#define CACHE(x) { .cs_size = (x) }, 652#include <linux/kmalloc_sizes.h> 653 CACHE(ULONG_MAX) 654#undef CACHE 655}; 656EXPORT_SYMBOL(malloc_sizes); 657 658/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 659struct cache_names { 660 char *name; 661 char *name_dma; 662}; 663 664static struct cache_names __initdata cache_names[] = { 665#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 666#include <linux/kmalloc_sizes.h> 667 {NULL,} 668#undef CACHE 669}; 670 671static struct arraycache_init initarray_cache __initdata = 672 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 673static struct arraycache_init initarray_generic = 674 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 675 676/* internal cache of cache description objs */ 677static struct kmem_cache cache_cache = { 678 .batchcount = 1, 679 .limit = BOOT_CPUCACHE_ENTRIES, 680 .shared = 1, 681 .buffer_size = sizeof(struct kmem_cache), 682 .name = "kmem_cache", 683}; 684 685#define BAD_ALIEN_MAGIC 0x01020304ul 686 687#ifdef CONFIG_LOCKDEP 688 689/* 690 * Slab sometimes uses the kmalloc slabs to store the slab headers 691 * for other slabs "off slab". 692 * The locking for this is tricky in that it nests within the locks 693 * of all other slabs in a few places; to deal with this special 694 * locking we put on-slab caches into a separate lock-class. 695 * 696 * We set lock class for alien array caches which are up during init. 697 * The lock annotation will be lost if all cpus of a node goes down and 698 * then comes back up during hotplug 699 */ 700static struct lock_class_key on_slab_l3_key; 701static struct lock_class_key on_slab_alc_key; 702 703static inline void init_lock_keys(void) 704 705{ 706 int q; 707 struct cache_sizes *s = malloc_sizes; 708 709 while (s->cs_size != ULONG_MAX) { 710 for_each_node(q) { 711 struct array_cache **alc; 712 int r; 713 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 714 if (!l3 || OFF_SLAB(s->cs_cachep)) 715 continue; 716 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 717 alc = l3->alien; 718 /* 719 * FIXME: This check for BAD_ALIEN_MAGIC 720 * should go away when common slab code is taught to 721 * work even without alien caches. 722 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 723 * for alloc_alien_cache, 724 */ 725 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 726 continue; 727 for_each_node(r) { 728 if (alc[r]) 729 lockdep_set_class(&alc[r]->lock, 730 &on_slab_alc_key); 731 } 732 } 733 s++; 734 } 735} 736#else 737static inline void init_lock_keys(void) 738{ 739} 740#endif 741 742/* 743 * Guard access to the cache-chain. 744 */ 745static DEFINE_MUTEX(cache_chain_mutex); 746static struct list_head cache_chain; 747 748/* 749 * chicken and egg problem: delay the per-cpu array allocation 750 * until the general caches are up. 751 */ 752static enum { 753 NONE, 754 PARTIAL_AC, 755 PARTIAL_L3, 756 FULL 757} g_cpucache_up; 758 759/* 760 * used by boot code to determine if it can use slab based allocator 761 */ 762int slab_is_available(void) 763{ 764 return g_cpucache_up == FULL; 765} 766 767static DEFINE_PER_CPU(struct delayed_work, reap_work); 768 769static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 770{ 771 return cachep->array[smp_processor_id()]; 772} 773 774static inline struct kmem_cache *__find_general_cachep(size_t size, 775 gfp_t gfpflags) 776{ 777 struct cache_sizes *csizep = malloc_sizes; 778 779#if DEBUG 780 /* This happens if someone tries to call 781 * kmem_cache_create(), or __kmalloc(), before 782 * the generic caches are initialized. 783 */ 784 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 785#endif 786 if (!size) 787 return ZERO_SIZE_PTR; 788 789 while (size > csizep->cs_size) 790 csizep++; 791 792 /* 793 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 794 * has cs_{dma,}cachep==NULL. Thus no special case 795 * for large kmalloc calls required. 796 */ 797#ifdef CONFIG_ZONE_DMA 798 if (unlikely(gfpflags & GFP_DMA)) 799 return csizep->cs_dmacachep; 800#endif 801 return csizep->cs_cachep; 802} 803 804static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 805{ 806 return __find_general_cachep(size, gfpflags); 807} 808 809static size_t slab_mgmt_size(size_t nr_objs, size_t align) 810{ 811 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 812} 813 814/* 815 * Calculate the number of objects and left-over bytes for a given buffer size. 816 */ 817static void cache_estimate(unsigned long gfporder, size_t buffer_size, 818 size_t align, int flags, size_t *left_over, 819 unsigned int *num) 820{ 821 int nr_objs; 822 size_t mgmt_size; 823 size_t slab_size = PAGE_SIZE << gfporder; 824 825 /* 826 * The slab management structure can be either off the slab or 827 * on it. For the latter case, the memory allocated for a 828 * slab is used for: 829 * 830 * - The struct slab 831 * - One kmem_bufctl_t for each object 832 * - Padding to respect alignment of @align 833 * - @buffer_size bytes for each object 834 * 835 * If the slab management structure is off the slab, then the 836 * alignment will already be calculated into the size. Because 837 * the slabs are all pages aligned, the objects will be at the 838 * correct alignment when allocated. 839 */ 840 if (flags & CFLGS_OFF_SLAB) { 841 mgmt_size = 0; 842 nr_objs = slab_size / buffer_size; 843 844 if (nr_objs > SLAB_LIMIT) 845 nr_objs = SLAB_LIMIT; 846 } else { 847 /* 848 * Ignore padding for the initial guess. The padding 849 * is at most @align-1 bytes, and @buffer_size is at 850 * least @align. In the worst case, this result will 851 * be one greater than the number of objects that fit 852 * into the memory allocation when taking the padding 853 * into account. 854 */ 855 nr_objs = (slab_size - sizeof(struct slab)) / 856 (buffer_size + sizeof(kmem_bufctl_t)); 857 858 /* 859 * This calculated number will be either the right 860 * amount, or one greater than what we want. 861 */ 862 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 863 > slab_size) 864 nr_objs--; 865 866 if (nr_objs > SLAB_LIMIT) 867 nr_objs = SLAB_LIMIT; 868 869 mgmt_size = slab_mgmt_size(nr_objs, align); 870 } 871 *num = nr_objs; 872 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 873} 874 875#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) 876 877static void __slab_error(const char *function, struct kmem_cache *cachep, 878 char *msg) 879{ 880 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 881 function, cachep->name, msg); 882 dump_stack(); 883} 884 885/* 886 * By default on NUMA we use alien caches to stage the freeing of 887 * objects allocated from other nodes. This causes massive memory 888 * inefficiencies when using fake NUMA setup to split memory into a 889 * large number of small nodes, so it can be disabled on the command 890 * line 891 */ 892 893static int use_alien_caches __read_mostly = 1; 894static int numa_platform __read_mostly = 1; 895static int __init noaliencache_setup(char *s) 896{ 897 use_alien_caches = 0; 898 return 1; 899} 900__setup("noaliencache", noaliencache_setup); 901 902#ifdef CONFIG_NUMA 903/* 904 * Special reaping functions for NUMA systems called from cache_reap(). 905 * These take care of doing round robin flushing of alien caches (containing 906 * objects freed on different nodes from which they were allocated) and the 907 * flushing of remote pcps by calling drain_node_pages. 908 */ 909static DEFINE_PER_CPU(unsigned long, reap_node); 910 911static void init_reap_node(int cpu) 912{ 913 int node; 914 915 node = next_node(cpu_to_node(cpu), node_online_map); 916 if (node == MAX_NUMNODES) 917 node = first_node(node_online_map); 918 919 per_cpu(reap_node, cpu) = node; 920} 921 922static void next_reap_node(void) 923{ 924 int node = __get_cpu_var(reap_node); 925 926 node = next_node(node, node_online_map); 927 if (unlikely(node >= MAX_NUMNODES)) 928 node = first_node(node_online_map); 929 __get_cpu_var(reap_node) = node; 930} 931 932#else 933#define init_reap_node(cpu) do { } while (0) 934#define next_reap_node(void) do { } while (0) 935#endif 936 937/* 938 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 939 * via the workqueue/eventd. 940 * Add the CPU number into the expiration time to minimize the possibility of 941 * the CPUs getting into lockstep and contending for the global cache chain 942 * lock. 943 */ 944static void __cpuinit start_cpu_timer(int cpu) 945{ 946 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 947 948 /* 949 * When this gets called from do_initcalls via cpucache_init(), 950 * init_workqueues() has already run, so keventd will be setup 951 * at that time. 952 */ 953 if (keventd_up() && reap_work->work.func == NULL) { 954 init_reap_node(cpu); 955 INIT_DELAYED_WORK(reap_work, cache_reap); 956 schedule_delayed_work_on(cpu, reap_work, 957 __round_jiffies_relative(HZ, cpu)); 958 } 959} 960 961static struct array_cache *alloc_arraycache(int node, int entries, 962 int batchcount) 963{ 964 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 965 struct array_cache *nc = NULL; 966 967 nc = kmalloc_node(memsize, GFP_KERNEL, node); 968 /* 969 * The array_cache structures contain pointers to free object. 970 * However, when such objects are allocated or transfered to another 971 * cache the pointers are not cleared and they could be counted as 972 * valid references during a kmemleak scan. Therefore, kmemleak must 973 * not scan such objects. 974 */ 975 kmemleak_no_scan(nc); 976 if (nc) { 977 nc->avail = 0; 978 nc->limit = entries; 979 nc->batchcount = batchcount; 980 nc->touched = 0; 981 spin_lock_init(&nc->lock); 982 } 983 return nc; 984} 985 986/* 987 * Transfer objects in one arraycache to another. 988 * Locking must be handled by the caller. 989 * 990 * Return the number of entries transferred. 991 */ 992static int transfer_objects(struct array_cache *to, 993 struct array_cache *from, unsigned int max) 994{ 995 /* Figure out how many entries to transfer */ 996 int nr = min(min(from->avail, max), to->limit - to->avail); 997 998 if (!nr) 999 return 0; 1000 1001 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 1002 sizeof(void *) *nr); 1003 1004 from->avail -= nr; 1005 to->avail += nr; 1006 to->touched = 1; 1007 return nr; 1008} 1009 1010#ifndef CONFIG_NUMA 1011 1012#define drain_alien_cache(cachep, alien) do { } while (0) 1013#define reap_alien(cachep, l3) do { } while (0) 1014 1015static inline struct array_cache **alloc_alien_cache(int node, int limit) 1016{ 1017 return (struct array_cache **)BAD_ALIEN_MAGIC; 1018} 1019 1020static inline void free_alien_cache(struct array_cache **ac_ptr) 1021{ 1022} 1023 1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1025{ 1026 return 0; 1027} 1028 1029static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1030 gfp_t flags) 1031{ 1032 return NULL; 1033} 1034 1035static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1036 gfp_t flags, int nodeid) 1037{ 1038 return NULL; 1039} 1040 1041#else /* CONFIG_NUMA */ 1042 1043static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1044static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1045 1046static struct array_cache **alloc_alien_cache(int node, int limit) 1047{ 1048 struct array_cache **ac_ptr; 1049 int memsize = sizeof(void *) * nr_node_ids; 1050 int i; 1051 1052 if (limit > 1) 1053 limit = 12; 1054 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1055 if (ac_ptr) { 1056 for_each_node(i) { 1057 if (i == node || !node_online(i)) { 1058 ac_ptr[i] = NULL; 1059 continue; 1060 } 1061 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1062 if (!ac_ptr[i]) { 1063 for (i--; i >= 0; i--) 1064 kfree(ac_ptr[i]); 1065 kfree(ac_ptr); 1066 return NULL; 1067 } 1068 } 1069 } 1070 return ac_ptr; 1071} 1072 1073static void free_alien_cache(struct array_cache **ac_ptr) 1074{ 1075 int i; 1076 1077 if (!ac_ptr) 1078 return; 1079 for_each_node(i) 1080 kfree(ac_ptr[i]); 1081 kfree(ac_ptr); 1082} 1083 1084static void __drain_alien_cache(struct kmem_cache *cachep, 1085 struct array_cache *ac, int node) 1086{ 1087 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1088 1089 if (ac->avail) { 1090 spin_lock(&rl3->list_lock); 1091 /* 1092 * Stuff objects into the remote nodes shared array first. 1093 * That way we could avoid the overhead of putting the objects 1094 * into the free lists and getting them back later. 1095 */ 1096 if (rl3->shared) 1097 transfer_objects(rl3->shared, ac, ac->limit); 1098 1099 free_block(cachep, ac->entry, ac->avail, node); 1100 ac->avail = 0; 1101 spin_unlock(&rl3->list_lock); 1102 } 1103} 1104 1105/* 1106 * Called from cache_reap() to regularly drain alien caches round robin. 1107 */ 1108static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1109{ 1110 int node = __get_cpu_var(reap_node); 1111 1112 if (l3->alien) { 1113 struct array_cache *ac = l3->alien[node]; 1114 1115 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1116 __drain_alien_cache(cachep, ac, node); 1117 spin_unlock_irq(&ac->lock); 1118 } 1119 } 1120} 1121 1122static void drain_alien_cache(struct kmem_cache *cachep, 1123 struct array_cache **alien) 1124{ 1125 int i = 0; 1126 struct array_cache *ac; 1127 unsigned long flags; 1128 1129 for_each_online_node(i) { 1130 ac = alien[i]; 1131 if (ac) { 1132 spin_lock_irqsave(&ac->lock, flags); 1133 __drain_alien_cache(cachep, ac, i); 1134 spin_unlock_irqrestore(&ac->lock, flags); 1135 } 1136 } 1137} 1138 1139static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1140{ 1141 struct slab *slabp = virt_to_slab(objp); 1142 int nodeid = slabp->nodeid; 1143 struct kmem_list3 *l3; 1144 struct array_cache *alien = NULL; 1145 int node; 1146 1147 node = numa_node_id(); 1148 1149 /* 1150 * Make sure we are not freeing a object from another node to the array 1151 * cache on this cpu. 1152 */ 1153 if (likely(slabp->nodeid == node)) 1154 return 0; 1155 1156 l3 = cachep->nodelists[node]; 1157 STATS_INC_NODEFREES(cachep); 1158 if (l3->alien && l3->alien[nodeid]) { 1159 alien = l3->alien[nodeid]; 1160 spin_lock(&alien->lock); 1161 if (unlikely(alien->avail == alien->limit)) { 1162 STATS_INC_ACOVERFLOW(cachep); 1163 __drain_alien_cache(cachep, alien, nodeid); 1164 } 1165 alien->entry[alien->avail++] = objp; 1166 spin_unlock(&alien->lock); 1167 } else { 1168 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1169 free_block(cachep, &objp, 1, nodeid); 1170 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1171 } 1172 return 1; 1173} 1174#endif 1175 1176static void __cpuinit cpuup_canceled(long cpu) 1177{ 1178 struct kmem_cache *cachep; 1179 struct kmem_list3 *l3 = NULL; 1180 int node = cpu_to_node(cpu); 1181 const struct cpumask *mask = cpumask_of_node(node); 1182 1183 list_for_each_entry(cachep, &cache_chain, next) { 1184 struct array_cache *nc; 1185 struct array_cache *shared; 1186 struct array_cache **alien; 1187 1188 /* cpu is dead; no one can alloc from it. */ 1189 nc = cachep->array[cpu]; 1190 cachep->array[cpu] = NULL; 1191 l3 = cachep->nodelists[node]; 1192 1193 if (!l3) 1194 goto free_array_cache; 1195 1196 spin_lock_irq(&l3->list_lock); 1197 1198 /* Free limit for this kmem_list3 */ 1199 l3->free_limit -= cachep->batchcount; 1200 if (nc) 1201 free_block(cachep, nc->entry, nc->avail, node); 1202 1203 if (!cpus_empty(*mask)) { 1204 spin_unlock_irq(&l3->list_lock); 1205 goto free_array_cache; 1206 } 1207 1208 shared = l3->shared; 1209 if (shared) { 1210 free_block(cachep, shared->entry, 1211 shared->avail, node); 1212 l3->shared = NULL; 1213 } 1214 1215 alien = l3->alien; 1216 l3->alien = NULL; 1217 1218 spin_unlock_irq(&l3->list_lock); 1219 1220 kfree(shared); 1221 if (alien) { 1222 drain_alien_cache(cachep, alien); 1223 free_alien_cache(alien); 1224 } 1225free_array_cache: 1226 kfree(nc); 1227 } 1228 /* 1229 * In the previous loop, all the objects were freed to 1230 * the respective cache's slabs, now we can go ahead and 1231 * shrink each nodelist to its limit. 1232 */ 1233 list_for_each_entry(cachep, &cache_chain, next) { 1234 l3 = cachep->nodelists[node]; 1235 if (!l3) 1236 continue; 1237 drain_freelist(cachep, l3, l3->free_objects); 1238 } 1239} 1240 1241static int __cpuinit cpuup_prepare(long cpu) 1242{ 1243 struct kmem_cache *cachep; 1244 struct kmem_list3 *l3 = NULL; 1245 int node = cpu_to_node(cpu); 1246 const int memsize = sizeof(struct kmem_list3); 1247 1248 /* 1249 * We need to do this right in the beginning since 1250 * alloc_arraycache's are going to use this list. 1251 * kmalloc_node allows us to add the slab to the right 1252 * kmem_list3 and not this cpu's kmem_list3 1253 */ 1254 1255 list_for_each_entry(cachep, &cache_chain, next) { 1256 /* 1257 * Set up the size64 kmemlist for cpu before we can 1258 * begin anything. Make sure some other cpu on this 1259 * node has not already allocated this 1260 */ 1261 if (!cachep->nodelists[node]) { 1262 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1263 if (!l3) 1264 goto bad; 1265 kmem_list3_init(l3); 1266 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1267 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1268 1269 /* 1270 * The l3s don't come and go as CPUs come and 1271 * go. cache_chain_mutex is sufficient 1272 * protection here. 1273 */ 1274 cachep->nodelists[node] = l3; 1275 } 1276 1277 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1278 cachep->nodelists[node]->free_limit = 1279 (1 + nr_cpus_node(node)) * 1280 cachep->batchcount + cachep->num; 1281 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1282 } 1283 1284 /* 1285 * Now we can go ahead with allocating the shared arrays and 1286 * array caches 1287 */ 1288 list_for_each_entry(cachep, &cache_chain, next) { 1289 struct array_cache *nc; 1290 struct array_cache *shared = NULL; 1291 struct array_cache **alien = NULL; 1292 1293 nc = alloc_arraycache(node, cachep->limit, 1294 cachep->batchcount); 1295 if (!nc) 1296 goto bad; 1297 if (cachep->shared) { 1298 shared = alloc_arraycache(node, 1299 cachep->shared * cachep->batchcount, 1300 0xbaadf00d); 1301 if (!shared) { 1302 kfree(nc); 1303 goto bad; 1304 } 1305 } 1306 if (use_alien_caches) { 1307 alien = alloc_alien_cache(node, cachep->limit); 1308 if (!alien) { 1309 kfree(shared); 1310 kfree(nc); 1311 goto bad; 1312 } 1313 } 1314 cachep->array[cpu] = nc; 1315 l3 = cachep->nodelists[node]; 1316 BUG_ON(!l3); 1317 1318 spin_lock_irq(&l3->list_lock); 1319 if (!l3->shared) { 1320 /* 1321 * We are serialised from CPU_DEAD or 1322 * CPU_UP_CANCELLED by the cpucontrol lock 1323 */ 1324 l3->shared = shared; 1325 shared = NULL; 1326 } 1327#ifdef CONFIG_NUMA 1328 if (!l3->alien) { 1329 l3->alien = alien; 1330 alien = NULL; 1331 } 1332#endif 1333 spin_unlock_irq(&l3->list_lock); 1334 kfree(shared); 1335 free_alien_cache(alien); 1336 } 1337 return 0; 1338bad: 1339 cpuup_canceled(cpu); 1340 return -ENOMEM; 1341} 1342 1343static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1344 unsigned long action, void *hcpu) 1345{ 1346 long cpu = (long)hcpu; 1347 int err = 0; 1348 1349 switch (action) { 1350 case CPU_UP_PREPARE: 1351 case CPU_UP_PREPARE_FROZEN: 1352 mutex_lock(&cache_chain_mutex); 1353 err = cpuup_prepare(cpu); 1354 mutex_unlock(&cache_chain_mutex); 1355 break; 1356 case CPU_ONLINE: 1357 case CPU_ONLINE_FROZEN: 1358 start_cpu_timer(cpu); 1359 break; 1360#ifdef CONFIG_HOTPLUG_CPU 1361 case CPU_DOWN_PREPARE: 1362 case CPU_DOWN_PREPARE_FROZEN: 1363 /* 1364 * Shutdown cache reaper. Note that the cache_chain_mutex is 1365 * held so that if cache_reap() is invoked it cannot do 1366 * anything expensive but will only modify reap_work 1367 * and reschedule the timer. 1368 */ 1369 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1370 /* Now the cache_reaper is guaranteed to be not running. */ 1371 per_cpu(reap_work, cpu).work.func = NULL; 1372 break; 1373 case CPU_DOWN_FAILED: 1374 case CPU_DOWN_FAILED_FROZEN: 1375 start_cpu_timer(cpu); 1376 break; 1377 case CPU_DEAD: 1378 case CPU_DEAD_FROZEN: 1379 /* 1380 * Even if all the cpus of a node are down, we don't free the 1381 * kmem_list3 of any cache. This to avoid a race between 1382 * cpu_down, and a kmalloc allocation from another cpu for 1383 * memory from the node of the cpu going down. The list3 1384 * structure is usually allocated from kmem_cache_create() and 1385 * gets destroyed at kmem_cache_destroy(). 1386 */ 1387 /* fall through */ 1388#endif 1389 case CPU_UP_CANCELED: 1390 case CPU_UP_CANCELED_FROZEN: 1391 mutex_lock(&cache_chain_mutex); 1392 cpuup_canceled(cpu); 1393 mutex_unlock(&cache_chain_mutex); 1394 break; 1395 } 1396 return err ? NOTIFY_BAD : NOTIFY_OK; 1397} 1398 1399static struct notifier_block __cpuinitdata cpucache_notifier = { 1400 &cpuup_callback, NULL, 0 1401}; 1402 1403/* 1404 * swap the static kmem_list3 with kmalloced memory 1405 */ 1406static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1407 int nodeid) 1408{ 1409 struct kmem_list3 *ptr; 1410 1411 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1412 BUG_ON(!ptr); 1413 1414 local_irq_disable(); 1415 memcpy(ptr, list, sizeof(struct kmem_list3)); 1416 /* 1417 * Do not assume that spinlocks can be initialized via memcpy: 1418 */ 1419 spin_lock_init(&ptr->list_lock); 1420 1421 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1422 cachep->nodelists[nodeid] = ptr; 1423 local_irq_enable(); 1424} 1425 1426/* 1427 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1428 * size of kmem_list3. 1429 */ 1430static void __init set_up_list3s(struct kmem_cache *cachep, int index) 1431{ 1432 int node; 1433 1434 for_each_online_node(node) { 1435 cachep->nodelists[node] = &initkmem_list3[index + node]; 1436 cachep->nodelists[node]->next_reap = jiffies + 1437 REAPTIMEOUT_LIST3 + 1438 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1439 } 1440} 1441 1442/* 1443 * Initialisation. Called after the page allocator have been initialised and 1444 * before smp_init(). 1445 */ 1446void __init kmem_cache_init(void) 1447{ 1448 size_t left_over; 1449 struct cache_sizes *sizes; 1450 struct cache_names *names; 1451 int i; 1452 int order; 1453 int node; 1454 1455 if (num_possible_nodes() == 1) { 1456 use_alien_caches = 0; 1457 numa_platform = 0; 1458 } 1459 1460 for (i = 0; i < NUM_INIT_LISTS; i++) { 1461 kmem_list3_init(&initkmem_list3[i]); 1462 if (i < MAX_NUMNODES) 1463 cache_cache.nodelists[i] = NULL; 1464 } 1465 set_up_list3s(&cache_cache, CACHE_CACHE); 1466 1467 /* 1468 * Fragmentation resistance on low memory - only use bigger 1469 * page orders on machines with more than 32MB of memory. 1470 */ 1471 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1472 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1473 1474 /* Bootstrap is tricky, because several objects are allocated 1475 * from caches that do not exist yet: 1476 * 1) initialize the cache_cache cache: it contains the struct 1477 * kmem_cache structures of all caches, except cache_cache itself: 1478 * cache_cache is statically allocated. 1479 * Initially an __init data area is used for the head array and the 1480 * kmem_list3 structures, it's replaced with a kmalloc allocated 1481 * array at the end of the bootstrap. 1482 * 2) Create the first kmalloc cache. 1483 * The struct kmem_cache for the new cache is allocated normally. 1484 * An __init data area is used for the head array. 1485 * 3) Create the remaining kmalloc caches, with minimally sized 1486 * head arrays. 1487 * 4) Replace the __init data head arrays for cache_cache and the first 1488 * kmalloc cache with kmalloc allocated arrays. 1489 * 5) Replace the __init data for kmem_list3 for cache_cache and 1490 * the other cache's with kmalloc allocated memory. 1491 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1492 */ 1493 1494 node = numa_node_id(); 1495 1496 /* 1) create the cache_cache */ 1497 INIT_LIST_HEAD(&cache_chain); 1498 list_add(&cache_cache.next, &cache_chain); 1499 cache_cache.colour_off = cache_line_size(); 1500 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1501 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1502 1503 /* 1504 * struct kmem_cache size depends on nr_node_ids, which 1505 * can be less than MAX_NUMNODES. 1506 */ 1507 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + 1508 nr_node_ids * sizeof(struct kmem_list3 *); 1509#if DEBUG 1510 cache_cache.obj_size = cache_cache.buffer_size; 1511#endif 1512 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1513 cache_line_size()); 1514 cache_cache.reciprocal_buffer_size = 1515 reciprocal_value(cache_cache.buffer_size); 1516 1517 for (order = 0; order < MAX_ORDER; order++) { 1518 cache_estimate(order, cache_cache.buffer_size, 1519 cache_line_size(), 0, &left_over, &cache_cache.num); 1520 if (cache_cache.num) 1521 break; 1522 } 1523 BUG_ON(!cache_cache.num); 1524 cache_cache.gfporder = order; 1525 cache_cache.colour = left_over / cache_cache.colour_off; 1526 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1527 sizeof(struct slab), cache_line_size()); 1528 1529 /* 2+3) create the kmalloc caches */ 1530 sizes = malloc_sizes; 1531 names = cache_names; 1532 1533 /* 1534 * Initialize the caches that provide memory for the array cache and the 1535 * kmem_list3 structures first. Without this, further allocations will 1536 * bug. 1537 */ 1538 1539 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1540 sizes[INDEX_AC].cs_size, 1541 ARCH_KMALLOC_MINALIGN, 1542 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1543 NULL); 1544 1545 if (INDEX_AC != INDEX_L3) { 1546 sizes[INDEX_L3].cs_cachep = 1547 kmem_cache_create(names[INDEX_L3].name, 1548 sizes[INDEX_L3].cs_size, 1549 ARCH_KMALLOC_MINALIGN, 1550 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1551 NULL); 1552 } 1553 1554 slab_early_init = 0; 1555 1556 while (sizes->cs_size != ULONG_MAX) { 1557 /* 1558 * For performance, all the general caches are L1 aligned. 1559 * This should be particularly beneficial on SMP boxes, as it 1560 * eliminates "false sharing". 1561 * Note for systems short on memory removing the alignment will 1562 * allow tighter packing of the smaller caches. 1563 */ 1564 if (!sizes->cs_cachep) { 1565 sizes->cs_cachep = kmem_cache_create(names->name, 1566 sizes->cs_size, 1567 ARCH_KMALLOC_MINALIGN, 1568 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1569 NULL); 1570 } 1571#ifdef CONFIG_ZONE_DMA 1572 sizes->cs_dmacachep = kmem_cache_create( 1573 names->name_dma, 1574 sizes->cs_size, 1575 ARCH_KMALLOC_MINALIGN, 1576 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1577 SLAB_PANIC, 1578 NULL); 1579#endif 1580 sizes++; 1581 names++; 1582 } 1583 /* 4) Replace the bootstrap head arrays */ 1584 { 1585 struct array_cache *ptr; 1586 1587 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1588 1589 local_irq_disable(); 1590 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1591 memcpy(ptr, cpu_cache_get(&cache_cache), 1592 sizeof(struct arraycache_init)); 1593 /* 1594 * Do not assume that spinlocks can be initialized via memcpy: 1595 */ 1596 spin_lock_init(&ptr->lock); 1597 1598 cache_cache.array[smp_processor_id()] = ptr; 1599 local_irq_enable(); 1600 1601 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1602 1603 local_irq_disable(); 1604 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1605 != &initarray_generic.cache); 1606 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1607 sizeof(struct arraycache_init)); 1608 /* 1609 * Do not assume that spinlocks can be initialized via memcpy: 1610 */ 1611 spin_lock_init(&ptr->lock); 1612 1613 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1614 ptr; 1615 local_irq_enable(); 1616 } 1617 /* 5) Replace the bootstrap kmem_list3's */ 1618 { 1619 int nid; 1620 1621 for_each_online_node(nid) { 1622 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid); 1623 1624 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1625 &initkmem_list3[SIZE_AC + nid], nid); 1626 1627 if (INDEX_AC != INDEX_L3) { 1628 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1629 &initkmem_list3[SIZE_L3 + nid], nid); 1630 } 1631 } 1632 } 1633 1634 /* 6) resize the head arrays to their final sizes */ 1635 { 1636 struct kmem_cache *cachep; 1637 mutex_lock(&cache_chain_mutex); 1638 list_for_each_entry(cachep, &cache_chain, next) 1639 if (enable_cpucache(cachep)) 1640 BUG(); 1641 mutex_unlock(&cache_chain_mutex); 1642 } 1643 1644 /* Annotate slab for lockdep -- annotate the malloc caches */ 1645 init_lock_keys(); 1646 1647 1648 /* Done! */ 1649 g_cpucache_up = FULL; 1650 1651 /* 1652 * Register a cpu startup notifier callback that initializes 1653 * cpu_cache_get for all new cpus 1654 */ 1655 register_cpu_notifier(&cpucache_notifier); 1656 1657 /* 1658 * The reap timers are started later, with a module init call: That part 1659 * of the kernel is not yet operational. 1660 */ 1661} 1662 1663static int __init cpucache_init(void) 1664{ 1665 int cpu; 1666 1667 /* 1668 * Register the timers that return unneeded pages to the page allocator 1669 */ 1670 for_each_online_cpu(cpu) 1671 start_cpu_timer(cpu); 1672 return 0; 1673} 1674__initcall(cpucache_init); 1675 1676/* 1677 * Interface to system's page allocator. No need to hold the cache-lock. 1678 * 1679 * If we requested dmaable memory, we will get it. Even if we 1680 * did not request dmaable memory, we might get it, but that 1681 * would be relatively rare and ignorable. 1682 */ 1683static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1684{ 1685 struct page *page; 1686 int nr_pages; 1687 int i; 1688 1689#ifndef CONFIG_MMU 1690 /* 1691 * Nommu uses slab's for process anonymous memory allocations, and thus 1692 * requires __GFP_COMP to properly refcount higher order allocations 1693 */ 1694 flags |= __GFP_COMP; 1695#endif 1696 1697 flags |= cachep->gfpflags; 1698 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1699 flags |= __GFP_RECLAIMABLE; 1700 1701 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1702 if (!page) 1703 return NULL; 1704 1705 nr_pages = (1 << cachep->gfporder); 1706 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1707 add_zone_page_state(page_zone(page), 1708 NR_SLAB_RECLAIMABLE, nr_pages); 1709 else 1710 add_zone_page_state(page_zone(page), 1711 NR_SLAB_UNRECLAIMABLE, nr_pages); 1712 for (i = 0; i < nr_pages; i++) 1713 __SetPageSlab(page + i); 1714 return page_address(page); 1715} 1716 1717/* 1718 * Interface to system's page release. 1719 */ 1720static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1721{ 1722 unsigned long i = (1 << cachep->gfporder); 1723 struct page *page = virt_to_page(addr); 1724 const unsigned long nr_freed = i; 1725 1726 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1727 sub_zone_page_state(page_zone(page), 1728 NR_SLAB_RECLAIMABLE, nr_freed); 1729 else 1730 sub_zone_page_state(page_zone(page), 1731 NR_SLAB_UNRECLAIMABLE, nr_freed); 1732 while (i--) { 1733 BUG_ON(!PageSlab(page)); 1734 __ClearPageSlab(page); 1735 page++; 1736 } 1737 if (current->reclaim_state) 1738 current->reclaim_state->reclaimed_slab += nr_freed; 1739 free_pages((unsigned long)addr, cachep->gfporder); 1740} 1741 1742static void kmem_rcu_free(struct rcu_head *head) 1743{ 1744 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1745 struct kmem_cache *cachep = slab_rcu->cachep; 1746 1747 kmem_freepages(cachep, slab_rcu->addr); 1748 if (OFF_SLAB(cachep)) 1749 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1750} 1751 1752#if DEBUG 1753 1754#ifdef CONFIG_DEBUG_PAGEALLOC 1755static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1756 unsigned long caller) 1757{ 1758 int size = obj_size(cachep); 1759 1760 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1761 1762 if (size < 5 * sizeof(unsigned long)) 1763 return; 1764 1765 *addr++ = 0x12345678; 1766 *addr++ = caller; 1767 *addr++ = smp_processor_id(); 1768 size -= 3 * sizeof(unsigned long); 1769 { 1770 unsigned long *sptr = &caller; 1771 unsigned long svalue; 1772 1773 while (!kstack_end(sptr)) { 1774 svalue = *sptr++; 1775 if (kernel_text_address(svalue)) { 1776 *addr++ = svalue; 1777 size -= sizeof(unsigned long); 1778 if (size <= sizeof(unsigned long)) 1779 break; 1780 } 1781 } 1782 1783 } 1784 *addr++ = 0x87654321; 1785} 1786#endif 1787 1788static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1789{ 1790 int size = obj_size(cachep); 1791 addr = &((char *)addr)[obj_offset(cachep)]; 1792 1793 memset(addr, val, size); 1794 *(unsigned char *)(addr + size - 1) = POISON_END; 1795} 1796 1797static void dump_line(char *data, int offset, int limit) 1798{ 1799 int i; 1800 unsigned char error = 0; 1801 int bad_count = 0; 1802 1803 printk(KERN_ERR "%03x:", offset); 1804 for (i = 0; i < limit; i++) { 1805 if (data[offset + i] != POISON_FREE) { 1806 error = data[offset + i]; 1807 bad_count++; 1808 } 1809 printk(" %02x", (unsigned char)data[offset + i]); 1810 } 1811 printk("\n"); 1812 1813 if (bad_count == 1) { 1814 error ^= POISON_FREE; 1815 if (!(error & (error - 1))) { 1816 printk(KERN_ERR "Single bit error detected. Probably " 1817 "bad RAM.\n"); 1818#ifdef CONFIG_X86 1819 printk(KERN_ERR "Run memtest86+ or a similar memory " 1820 "test tool.\n"); 1821#else 1822 printk(KERN_ERR "Run a memory test tool.\n"); 1823#endif 1824 } 1825 } 1826} 1827#endif 1828 1829#if DEBUG 1830 1831static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1832{ 1833 int i, size; 1834 char *realobj; 1835 1836 if (cachep->flags & SLAB_RED_ZONE) { 1837 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1838 *dbg_redzone1(cachep, objp), 1839 *dbg_redzone2(cachep, objp)); 1840 } 1841 1842 if (cachep->flags & SLAB_STORE_USER) { 1843 printk(KERN_ERR "Last user: [<%p>]", 1844 *dbg_userword(cachep, objp)); 1845 print_symbol("(%s)", 1846 (unsigned long)*dbg_userword(cachep, objp)); 1847 printk("\n"); 1848 } 1849 realobj = (char *)objp + obj_offset(cachep); 1850 size = obj_size(cachep); 1851 for (i = 0; i < size && lines; i += 16, lines--) { 1852 int limit; 1853 limit = 16; 1854 if (i + limit > size) 1855 limit = size - i; 1856 dump_line(realobj, i, limit); 1857 } 1858} 1859 1860static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1861{ 1862 char *realobj; 1863 int size, i; 1864 int lines = 0; 1865 1866 realobj = (char *)objp + obj_offset(cachep); 1867 size = obj_size(cachep); 1868 1869 for (i = 0; i < size; i++) { 1870 char exp = POISON_FREE; 1871 if (i == size - 1) 1872 exp = POISON_END; 1873 if (realobj[i] != exp) { 1874 int limit; 1875 /* Mismatch ! */ 1876 /* Print header */ 1877 if (lines == 0) { 1878 printk(KERN_ERR 1879 "Slab corruption: %s start=%p, len=%d\n", 1880 cachep->name, realobj, size); 1881 print_objinfo(cachep, objp, 0); 1882 } 1883 /* Hexdump the affected line */ 1884 i = (i / 16) * 16; 1885 limit = 16; 1886 if (i + limit > size) 1887 limit = size - i; 1888 dump_line(realobj, i, limit); 1889 i += 16; 1890 lines++; 1891 /* Limit to 5 lines */ 1892 if (lines > 5) 1893 break; 1894 } 1895 } 1896 if (lines != 0) { 1897 /* Print some data about the neighboring objects, if they 1898 * exist: 1899 */ 1900 struct slab *slabp = virt_to_slab(objp); 1901 unsigned int objnr; 1902 1903 objnr = obj_to_index(cachep, slabp, objp); 1904 if (objnr) { 1905 objp = index_to_obj(cachep, slabp, objnr - 1); 1906 realobj = (char *)objp + obj_offset(cachep); 1907 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1908 realobj, size); 1909 print_objinfo(cachep, objp, 2); 1910 } 1911 if (objnr + 1 < cachep->num) { 1912 objp = index_to_obj(cachep, slabp, objnr + 1); 1913 realobj = (char *)objp + obj_offset(cachep); 1914 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1915 realobj, size); 1916 print_objinfo(cachep, objp, 2); 1917 } 1918 } 1919} 1920#endif 1921 1922#if DEBUG 1923static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) 1924{ 1925 int i; 1926 for (i = 0; i < cachep->num; i++) { 1927 void *objp = index_to_obj(cachep, slabp, i); 1928 1929 if (cachep->flags & SLAB_POISON) { 1930#ifdef CONFIG_DEBUG_PAGEALLOC 1931 if (cachep->buffer_size % PAGE_SIZE == 0 && 1932 OFF_SLAB(cachep)) 1933 kernel_map_pages(virt_to_page(objp), 1934 cachep->buffer_size / PAGE_SIZE, 1); 1935 else 1936 check_poison_obj(cachep, objp); 1937#else 1938 check_poison_obj(cachep, objp); 1939#endif 1940 } 1941 if (cachep->flags & SLAB_RED_ZONE) { 1942 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1943 slab_error(cachep, "start of a freed object " 1944 "was overwritten"); 1945 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1946 slab_error(cachep, "end of a freed object " 1947 "was overwritten"); 1948 } 1949 } 1950} 1951#else 1952static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) 1953{ 1954} 1955#endif 1956 1957/** 1958 * slab_destroy - destroy and release all objects in a slab 1959 * @cachep: cache pointer being destroyed 1960 * @slabp: slab pointer being destroyed 1961 * 1962 * Destroy all the objs in a slab, and release the mem back to the system. 1963 * Before calling the slab must have been unlinked from the cache. The 1964 * cache-lock is not held/needed. 1965 */ 1966static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1967{ 1968 void *addr = slabp->s_mem - slabp->colouroff; 1969 1970 slab_destroy_debugcheck(cachep, slabp); 1971 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1972 struct slab_rcu *slab_rcu; 1973 1974 slab_rcu = (struct slab_rcu *)slabp; 1975 slab_rcu->cachep = cachep; 1976 slab_rcu->addr = addr; 1977 call_rcu(&slab_rcu->head, kmem_rcu_free); 1978 } else { 1979 kmem_freepages(cachep, addr); 1980 if (OFF_SLAB(cachep)) 1981 kmem_cache_free(cachep->slabp_cache, slabp); 1982 } 1983} 1984 1985static void __kmem_cache_destroy(struct kmem_cache *cachep) 1986{ 1987 int i; 1988 struct kmem_list3 *l3; 1989 1990 for_each_online_cpu(i) 1991 kfree(cachep->array[i]); 1992 1993 /* NUMA: free the list3 structures */ 1994 for_each_online_node(i) { 1995 l3 = cachep->nodelists[i]; 1996 if (l3) { 1997 kfree(l3->shared); 1998 free_alien_cache(l3->alien); 1999 kfree(l3); 2000 } 2001 } 2002 kmem_cache_free(&cache_cache, cachep); 2003} 2004 2005 2006/** 2007 * calculate_slab_order - calculate size (page order) of slabs 2008 * @cachep: pointer to the cache that is being created 2009 * @size: size of objects to be created in this cache. 2010 * @align: required alignment for the objects. 2011 * @flags: slab allocation flags 2012 * 2013 * Also calculates the number of objects per slab. 2014 * 2015 * This could be made much more intelligent. For now, try to avoid using 2016 * high order pages for slabs. When the gfp() functions are more friendly 2017 * towards high-order requests, this should be changed. 2018 */ 2019static size_t calculate_slab_order(struct kmem_cache *cachep, 2020 size_t size, size_t align, unsigned long flags) 2021{ 2022 unsigned long offslab_limit; 2023 size_t left_over = 0; 2024 int gfporder; 2025 2026 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 2027 unsigned int num; 2028 size_t remainder; 2029 2030 cache_estimate(gfporder, size, align, flags, &remainder, &num); 2031 if (!num) 2032 continue; 2033 2034 if (flags & CFLGS_OFF_SLAB) { 2035 /* 2036 * Max number of objs-per-slab for caches which 2037 * use off-slab slabs. Needed to avoid a possible 2038 * looping condition in cache_grow(). 2039 */ 2040 offslab_limit = size - sizeof(struct slab); 2041 offslab_limit /= sizeof(kmem_bufctl_t); 2042 2043 if (num > offslab_limit) 2044 break; 2045 } 2046 2047 /* Found something acceptable - save it away */ 2048 cachep->num = num; 2049 cachep->gfporder = gfporder; 2050 left_over = remainder; 2051 2052 /* 2053 * A VFS-reclaimable slab tends to have most allocations 2054 * as GFP_NOFS and we really don't want to have to be allocating 2055 * higher-order pages when we are unable to shrink dcache. 2056 */ 2057 if (flags & SLAB_RECLAIM_ACCOUNT) 2058 break; 2059 2060 /* 2061 * Large number of objects is good, but very large slabs are 2062 * currently bad for the gfp()s. 2063 */ 2064 if (gfporder >= slab_break_gfp_order) 2065 break; 2066 2067 /* 2068 * Acceptable internal fragmentation? 2069 */ 2070 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2071 break; 2072 } 2073 return left_over; 2074} 2075 2076static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) 2077{ 2078 if (g_cpucache_up == FULL) 2079 return enable_cpucache(cachep); 2080 2081 if (g_cpucache_up == NONE) { 2082 /* 2083 * Note: the first kmem_cache_create must create the cache 2084 * that's used by kmalloc(24), otherwise the creation of 2085 * further caches will BUG(). 2086 */ 2087 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2088 2089 /* 2090 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2091 * the first cache, then we need to set up all its list3s, 2092 * otherwise the creation of further caches will BUG(). 2093 */ 2094 set_up_list3s(cachep, SIZE_AC); 2095 if (INDEX_AC == INDEX_L3) 2096 g_cpucache_up = PARTIAL_L3; 2097 else 2098 g_cpucache_up = PARTIAL_AC; 2099 } else { 2100 cachep->array[smp_processor_id()] = 2101 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2102 2103 if (g_cpucache_up == PARTIAL_AC) { 2104 set_up_list3s(cachep, SIZE_L3); 2105 g_cpucache_up = PARTIAL_L3; 2106 } else { 2107 int node; 2108 for_each_online_node(node) { 2109 cachep->nodelists[node] = 2110 kmalloc_node(sizeof(struct kmem_list3), 2111 GFP_KERNEL, node); 2112 BUG_ON(!cachep->nodelists[node]); 2113 kmem_list3_init(cachep->nodelists[node]); 2114 } 2115 } 2116 } 2117 cachep->nodelists[numa_node_id()]->next_reap = 2118 jiffies + REAPTIMEOUT_LIST3 + 2119 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2120 2121 cpu_cache_get(cachep)->avail = 0; 2122 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2123 cpu_cache_get(cachep)->batchcount = 1; 2124 cpu_cache_get(cachep)->touched = 0; 2125 cachep->batchcount = 1; 2126 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2127 return 0; 2128} 2129 2130/** 2131 * kmem_cache_create - Create a cache. 2132 * @name: A string which is used in /proc/slabinfo to identify this cache. 2133 * @size: The size of objects to be created in this cache. 2134 * @align: The required alignment for the objects. 2135 * @flags: SLAB flags 2136 * @ctor: A constructor for the objects. 2137 * 2138 * Returns a ptr to the cache on success, NULL on failure. 2139 * Cannot be called within a int, but can be interrupted. 2140 * The @ctor is run when new pages are allocated by the cache. 2141 * 2142 * @name must be valid until the cache is destroyed. This implies that 2143 * the module calling this has to destroy the cache before getting unloaded. 2144 * Note that kmem_cache_name() is not guaranteed to return the same pointer, 2145 * therefore applications must manage it themselves. 2146 * 2147 * The flags are 2148 * 2149 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2150 * to catch references to uninitialised memory. 2151 * 2152 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2153 * for buffer overruns. 2154 * 2155 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2156 * cacheline. This can be beneficial if you're counting cycles as closely 2157 * as davem. 2158 */ 2159struct kmem_cache * 2160kmem_cache_create (const char *name, size_t size, size_t align, 2161 unsigned long flags, void (*ctor)(void *)) 2162{ 2163 size_t left_over, slab_size, ralign; 2164 struct kmem_cache *cachep = NULL, *pc; 2165 2166 /* 2167 * Sanity checks... these are all serious usage bugs. 2168 */ 2169 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2170 size > KMALLOC_MAX_SIZE) { 2171 printk(KERN_ERR "%s: Early error in slab %s\n", __func__, 2172 name); 2173 BUG(); 2174 } 2175 2176 /* 2177 * We use cache_chain_mutex to ensure a consistent view of 2178 * cpu_online_mask as well. Please see cpuup_callback 2179 */ 2180 get_online_cpus(); 2181 mutex_lock(&cache_chain_mutex); 2182 2183 list_for_each_entry(pc, &cache_chain, next) { 2184 char tmp; 2185 int res; 2186 2187 /* 2188 * This happens when the module gets unloaded and doesn't 2189 * destroy its slab cache and no-one else reuses the vmalloc 2190 * area of the module. Print a warning. 2191 */ 2192 res = probe_kernel_address(pc->name, tmp); 2193 if (res) { 2194 printk(KERN_ERR 2195 "SLAB: cache with size %d has lost its name\n", 2196 pc->buffer_size); 2197 continue; 2198 } 2199 2200 if (!strcmp(pc->name, name)) { 2201 printk(KERN_ERR 2202 "kmem_cache_create: duplicate cache %s\n", name); 2203 dump_stack(); 2204 goto oops; 2205 } 2206 } 2207 2208#if DEBUG 2209 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2210#if FORCED_DEBUG 2211 /* 2212 * Enable redzoning and last user accounting, except for caches with 2213 * large objects, if the increased size would increase the object size 2214 * above the next power of two: caches with object sizes just above a 2215 * power of two have a significant amount of internal fragmentation. 2216 */ 2217 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2218 2 * sizeof(unsigned long long))) 2219 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2220 if (!(flags & SLAB_DESTROY_BY_RCU)) 2221 flags |= SLAB_POISON; 2222#endif 2223 if (flags & SLAB_DESTROY_BY_RCU) 2224 BUG_ON(flags & SLAB_POISON); 2225#endif 2226 /* 2227 * Always checks flags, a caller might be expecting debug support which 2228 * isn't available. 2229 */ 2230 BUG_ON(flags & ~CREATE_MASK); 2231 2232 /* 2233 * Check that size is in terms of words. This is needed to avoid 2234 * unaligned accesses for some archs when redzoning is used, and makes 2235 * sure any on-slab bufctl's are also correctly aligned. 2236 */ 2237 if (size & (BYTES_PER_WORD - 1)) { 2238 size += (BYTES_PER_WORD - 1); 2239 size &= ~(BYTES_PER_WORD - 1); 2240 } 2241 2242 /* calculate the final buffer alignment: */ 2243 2244 /* 1) arch recommendation: can be overridden for debug */ 2245 if (flags & SLAB_HWCACHE_ALIGN) { 2246 /* 2247 * Default alignment: as specified by the arch code. Except if 2248 * an object is really small, then squeeze multiple objects into 2249 * one cacheline. 2250 */ 2251 ralign = cache_line_size(); 2252 while (size <= ralign / 2) 2253 ralign /= 2; 2254 } else { 2255 ralign = BYTES_PER_WORD; 2256 } 2257 2258 /* 2259 * Redzoning and user store require word alignment or possibly larger. 2260 * Note this will be overridden by architecture or caller mandated 2261 * alignment if either is greater than BYTES_PER_WORD. 2262 */ 2263 if (flags & SLAB_STORE_USER) 2264 ralign = BYTES_PER_WORD; 2265 2266 if (flags & SLAB_RED_ZONE) { 2267 ralign = REDZONE_ALIGN; 2268 /* If redzoning, ensure that the second redzone is suitably 2269 * aligned, by adjusting the object size accordingly. */ 2270 size += REDZONE_ALIGN - 1; 2271 size &= ~(REDZONE_ALIGN - 1); 2272 } 2273 2274 /* 2) arch mandated alignment */ 2275 if (ralign < ARCH_SLAB_MINALIGN) { 2276 ralign = ARCH_SLAB_MINALIGN; 2277 } 2278 /* 3) caller mandated alignment */ 2279 if (ralign < align) { 2280 ralign = align; 2281 } 2282 /* disable debug if necessary */ 2283 if (ralign > __alignof__(unsigned long long)) 2284 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2285 /* 2286 * 4) Store it. 2287 */ 2288 align = ralign; 2289 2290 /* Get cache's description obj. */ 2291 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2292 if (!cachep) 2293 goto oops; 2294 2295#if DEBUG 2296 cachep->obj_size = size; 2297 2298 /* 2299 * Both debugging options require word-alignment which is calculated 2300 * into align above. 2301 */ 2302 if (flags & SLAB_RED_ZONE) { 2303 /* add space for red zone words */ 2304 cachep->obj_offset += sizeof(unsigned long long); 2305 size += 2 * sizeof(unsigned long long); 2306 } 2307 if (flags & SLAB_STORE_USER) { 2308 /* user store requires one word storage behind the end of 2309 * the real object. But if the second red zone needs to be 2310 * aligned to 64 bits, we must allow that much space. 2311 */ 2312 if (flags & SLAB_RED_ZONE) 2313 size += REDZONE_ALIGN; 2314 else 2315 size += BYTES_PER_WORD; 2316 } 2317#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2318 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2319 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2320 cachep->obj_offset += PAGE_SIZE - size; 2321 size = PAGE_SIZE; 2322 } 2323#endif 2324#endif 2325 2326 /* 2327 * Determine if the slab management is 'on' or 'off' slab. 2328 * (bootstrapping cannot cope with offslab caches so don't do 2329 * it too early on.) 2330 */ 2331 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2332 /* 2333 * Size is large, assume best to place the slab management obj 2334 * off-slab (should allow better packing of objs). 2335 */ 2336 flags |= CFLGS_OFF_SLAB; 2337 2338 size = ALIGN(size, align); 2339 2340 left_over = calculate_slab_order(cachep, size, align, flags); 2341 2342 if (!cachep->num) { 2343 printk(KERN_ERR 2344 "kmem_cache_create: couldn't create cache %s.\n", name); 2345 kmem_cache_free(&cache_cache, cachep); 2346 cachep = NULL; 2347 goto oops; 2348 } 2349 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2350 + sizeof(struct slab), align); 2351 2352 /* 2353 * If the slab has been placed off-slab, and we have enough space then 2354 * move it on-slab. This is at the expense of any extra colouring. 2355 */ 2356 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2357 flags &= ~CFLGS_OFF_SLAB; 2358 left_over -= slab_size; 2359 } 2360 2361 if (flags & CFLGS_OFF_SLAB) { 2362 /* really off slab. No need for manual alignment */ 2363 slab_size = 2364 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2365 } 2366 2367 cachep->colour_off = cache_line_size(); 2368 /* Offset must be a multiple of the alignment. */ 2369 if (cachep->colour_off < align) 2370 cachep->colour_off = align; 2371 cachep->colour = left_over / cachep->colour_off; 2372 cachep->slab_size = slab_size; 2373 cachep->flags = flags; 2374 cachep->gfpflags = 0; 2375 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2376 cachep->gfpflags |= GFP_DMA; 2377 cachep->buffer_size = size; 2378 cachep->reciprocal_buffer_size = reciprocal_value(size); 2379 2380 if (flags & CFLGS_OFF_SLAB) { 2381 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2382 /* 2383 * This is a possibility for one of the malloc_sizes caches. 2384 * But since we go off slab only for object size greater than 2385 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2386 * this should not happen at all. 2387 * But leave a BUG_ON for some lucky dude. 2388 */ 2389 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2390 } 2391 cachep->ctor = ctor; 2392 cachep->name = name; 2393 2394 if (setup_cpu_cache(cachep)) { 2395 __kmem_cache_destroy(cachep); 2396 cachep = NULL; 2397 goto oops; 2398 } 2399 2400 /* cache setup completed, link it into the list */ 2401 list_add(&cachep->next, &cache_chain); 2402oops: 2403 if (!cachep && (flags & SLAB_PANIC)) 2404 panic("kmem_cache_create(): failed to create slab `%s'\n", 2405 name); 2406 mutex_unlock(&cache_chain_mutex); 2407 put_online_cpus(); 2408 return cachep; 2409} 2410EXPORT_SYMBOL(kmem_cache_create); 2411 2412#if DEBUG 2413static void check_irq_off(void) 2414{ 2415 BUG_ON(!irqs_disabled()); 2416} 2417 2418static void check_irq_on(void) 2419{ 2420 BUG_ON(irqs_disabled()); 2421} 2422 2423static void check_spinlock_acquired(struct kmem_cache *cachep) 2424{ 2425#ifdef CONFIG_SMP 2426 check_irq_off(); 2427 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2428#endif 2429} 2430 2431static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2432{ 2433#ifdef CONFIG_SMP 2434 check_irq_off(); 2435 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2436#endif 2437} 2438 2439#else 2440#define check_irq_off() do { } while(0) 2441#define check_irq_on() do { } while(0) 2442#define check_spinlock_acquired(x) do { } while(0) 2443#define check_spinlock_acquired_node(x, y) do { } while(0) 2444#endif 2445 2446static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2447 struct array_cache *ac, 2448 int force, int node); 2449 2450static void do_drain(void *arg) 2451{ 2452 struct kmem_cache *cachep = arg; 2453 struct array_cache *ac; 2454 int node = numa_node_id(); 2455 2456 check_irq_off(); 2457 ac = cpu_cache_get(cachep); 2458 spin_lock(&cachep->nodelists[node]->list_lock); 2459 free_block(cachep, ac->entry, ac->avail, node); 2460 spin_unlock(&cachep->nodelists[node]->list_lock); 2461 ac->avail = 0; 2462} 2463 2464static void drain_cpu_caches(struct kmem_cache *cachep) 2465{ 2466 struct kmem_list3 *l3; 2467 int node; 2468 2469 on_each_cpu(do_drain, cachep, 1); 2470 check_irq_on(); 2471 for_each_online_node(node) { 2472 l3 = cachep->nodelists[node]; 2473 if (l3 && l3->alien) 2474 drain_alien_cache(cachep, l3->alien); 2475 } 2476 2477 for_each_online_node(node) { 2478 l3 = cachep->nodelists[node]; 2479 if (l3) 2480 drain_array(cachep, l3, l3->shared, 1, node); 2481 } 2482} 2483 2484/* 2485 * Remove slabs from the list of free slabs. 2486 * Specify the number of slabs to drain in tofree. 2487 * 2488 * Returns the actual number of slabs released. 2489 */ 2490static int drain_freelist(struct kmem_cache *cache, 2491 struct kmem_list3 *l3, int tofree) 2492{ 2493 struct list_head *p; 2494 int nr_freed; 2495 struct slab *slabp; 2496 2497 nr_freed = 0; 2498 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2499 2500 spin_lock_irq(&l3->list_lock); 2501 p = l3->slabs_free.prev; 2502 if (p == &l3->slabs_free) { 2503 spin_unlock_irq(&l3->list_lock); 2504 goto out; 2505 } 2506 2507 slabp = list_entry(p, struct slab, list); 2508#if DEBUG 2509 BUG_ON(slabp->inuse); 2510#endif 2511 list_del(&slabp->list); 2512 /* 2513 * Safe to drop the lock. The slab is no longer linked 2514 * to the cache. 2515 */ 2516 l3->free_objects -= cache->num; 2517 spin_unlock_irq(&l3->list_lock); 2518 slab_destroy(cache, slabp); 2519 nr_freed++; 2520 } 2521out: 2522 return nr_freed; 2523} 2524 2525/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2526static int __cache_shrink(struct kmem_cache *cachep) 2527{ 2528 int ret = 0, i = 0; 2529 struct kmem_list3 *l3; 2530 2531 drain_cpu_caches(cachep); 2532 2533 check_irq_on(); 2534 for_each_online_node(i) { 2535 l3 = cachep->nodelists[i]; 2536 if (!l3) 2537 continue; 2538 2539 drain_freelist(cachep, l3, l3->free_objects); 2540 2541 ret += !list_empty(&l3->slabs_full) || 2542 !list_empty(&l3->slabs_partial); 2543 } 2544 return (ret ? 1 : 0); 2545} 2546 2547/** 2548 * kmem_cache_shrink - Shrink a cache. 2549 * @cachep: The cache to shrink. 2550 * 2551 * Releases as many slabs as possible for a cache. 2552 * To help debugging, a zero exit status indicates all slabs were released. 2553 */ 2554int kmem_cache_shrink(struct kmem_cache *cachep) 2555{ 2556 int ret; 2557 BUG_ON(!cachep || in_interrupt()); 2558 2559 get_online_cpus(); 2560 mutex_lock(&cache_chain_mutex); 2561 ret = __cache_shrink(cachep); 2562 mutex_unlock(&cache_chain_mutex); 2563 put_online_cpus(); 2564 return ret; 2565} 2566EXPORT_SYMBOL(kmem_cache_shrink); 2567 2568/** 2569 * kmem_cache_destroy - delete a cache 2570 * @cachep: the cache to destroy 2571 * 2572 * Remove a &struct kmem_cache object from the slab cache. 2573 * 2574 * It is expected this function will be called by a module when it is 2575 * unloaded. This will remove the cache completely, and avoid a duplicate 2576 * cache being allocated each time a module is loaded and unloaded, if the 2577 * module doesn't have persistent in-kernel storage across loads and unloads. 2578 * 2579 * The cache must be empty before calling this function. 2580 * 2581 * The caller must guarantee that noone will allocate memory from the cache 2582 * during the kmem_cache_destroy(). 2583 */ 2584void kmem_cache_destroy(struct kmem_cache *cachep) 2585{ 2586 BUG_ON(!cachep || in_interrupt()); 2587 2588 /* Find the cache in the chain of caches. */ 2589 get_online_cpus(); 2590 mutex_lock(&cache_chain_mutex); 2591 /* 2592 * the chain is never empty, cache_cache is never destroyed 2593 */ 2594 list_del(&cachep->next); 2595 if (__cache_shrink(cachep)) { 2596 slab_error(cachep, "Can't free all objects"); 2597 list_add(&cachep->next, &cache_chain); 2598 mutex_unlock(&cache_chain_mutex); 2599 put_online_cpus(); 2600 return; 2601 } 2602 2603 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2604 synchronize_rcu(); 2605 2606 __kmem_cache_destroy(cachep); 2607 mutex_unlock(&cache_chain_mutex); 2608 put_online_cpus(); 2609} 2610EXPORT_SYMBOL(kmem_cache_destroy); 2611 2612/* 2613 * Get the memory for a slab management obj. 2614 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2615 * always come from malloc_sizes caches. The slab descriptor cannot 2616 * come from the same cache which is getting created because, 2617 * when we are searching for an appropriate cache for these 2618 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2619 * If we are creating a malloc_sizes cache here it would not be visible to 2620 * kmem_find_general_cachep till the initialization is complete. 2621 * Hence we cannot have slabp_cache same as the original cache. 2622 */ 2623static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2624 int colour_off, gfp_t local_flags, 2625 int nodeid) 2626{ 2627 struct slab *slabp; 2628 2629 if (OFF_SLAB(cachep)) { 2630 /* Slab management obj is off-slab. */ 2631 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2632 local_flags, nodeid); 2633 /* 2634 * If the first object in the slab is leaked (it's allocated 2635 * but no one has a reference to it), we want to make sure 2636 * kmemleak does not treat the ->s_mem pointer as a reference 2637 * to the object. Otherwise we will not report the leak. 2638 */ 2639 kmemleak_scan_area(slabp, offsetof(struct slab, list), 2640 sizeof(struct list_head), local_flags); 2641 if (!slabp) 2642 return NULL; 2643 } else { 2644 slabp = objp + colour_off; 2645 colour_off += cachep->slab_size; 2646 } 2647 slabp->inuse = 0; 2648 slabp->colouroff = colour_off; 2649 slabp->s_mem = objp + colour_off; 2650 slabp->nodeid = nodeid; 2651 slabp->free = 0; 2652 return slabp; 2653} 2654 2655static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2656{ 2657 return (kmem_bufctl_t *) (slabp + 1); 2658} 2659 2660static void cache_init_objs(struct kmem_cache *cachep, 2661 struct slab *slabp) 2662{ 2663 int i; 2664 2665 for (i = 0; i < cachep->num; i++) { 2666 void *objp = index_to_obj(cachep, slabp, i); 2667#if DEBUG 2668 /* need to poison the objs? */ 2669 if (cachep->flags & SLAB_POISON) 2670 poison_obj(cachep, objp, POISON_FREE); 2671 if (cachep->flags & SLAB_STORE_USER) 2672 *dbg_userword(cachep, objp) = NULL; 2673 2674 if (cachep->flags & SLAB_RED_ZONE) { 2675 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2676 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2677 } 2678 /* 2679 * Constructors are not allowed to allocate memory from the same 2680 * cache which they are a constructor for. Otherwise, deadlock. 2681 * They must also be threaded. 2682 */ 2683 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2684 cachep->ctor(objp + obj_offset(cachep)); 2685 2686 if (cachep->flags & SLAB_RED_ZONE) { 2687 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2688 slab_error(cachep, "constructor overwrote the" 2689 " end of an object"); 2690 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2691 slab_error(cachep, "constructor overwrote the" 2692 " start of an object"); 2693 } 2694 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2695 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2696 kernel_map_pages(virt_to_page(objp), 2697 cachep->buffer_size / PAGE_SIZE, 0); 2698#else 2699 if (cachep->ctor) 2700 cachep->ctor(objp); 2701#endif 2702 slab_bufctl(slabp)[i] = i + 1; 2703 } 2704 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2705} 2706 2707static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2708{ 2709 if (CONFIG_ZONE_DMA_FLAG) { 2710 if (flags & GFP_DMA) 2711 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2712 else 2713 BUG_ON(cachep->gfpflags & GFP_DMA); 2714 } 2715} 2716 2717static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2718 int nodeid) 2719{ 2720 void *objp = index_to_obj(cachep, slabp, slabp->free); 2721 kmem_bufctl_t next; 2722 2723 slabp->inuse++; 2724 next = slab_bufctl(slabp)[slabp->free]; 2725#if DEBUG 2726 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2727 WARN_ON(slabp->nodeid != nodeid); 2728#endif 2729 slabp->free = next; 2730 2731 return objp; 2732} 2733 2734static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2735 void *objp, int nodeid) 2736{ 2737 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2738 2739#if DEBUG 2740 /* Verify that the slab belongs to the intended node */ 2741 WARN_ON(slabp->nodeid != nodeid); 2742 2743 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2744 printk(KERN_ERR "slab: double free detected in cache " 2745 "'%s', objp %p\n", cachep->name, objp); 2746 BUG(); 2747 } 2748#endif 2749 slab_bufctl(slabp)[objnr] = slabp->free; 2750 slabp->free = objnr; 2751 slabp->inuse--; 2752} 2753 2754/* 2755 * Map pages beginning at addr to the given cache and slab. This is required 2756 * for the slab allocator to be able to lookup the cache and slab of a 2757 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2758 */ 2759static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2760 void *addr) 2761{ 2762 int nr_pages; 2763 struct page *page; 2764 2765 page = virt_to_page(addr); 2766 2767 nr_pages = 1; 2768 if (likely(!PageCompound(page))) 2769 nr_pages <<= cache->gfporder; 2770 2771 do { 2772 page_set_cache(page, cache); 2773 page_set_slab(page, slab); 2774 page++; 2775 } while (--nr_pages); 2776} 2777 2778/* 2779 * Grow (by 1) the number of slabs within a cache. This is called by 2780 * kmem_cache_alloc() when there are no active objs left in a cache. 2781 */ 2782static int cache_grow(struct kmem_cache *cachep, 2783 gfp_t flags, int nodeid, void *objp) 2784{ 2785 struct slab *slabp; 2786 size_t offset; 2787 gfp_t local_flags; 2788 struct kmem_list3 *l3; 2789 2790 /* 2791 * Be lazy and only check for valid flags here, keeping it out of the 2792 * critical path in kmem_cache_alloc(). 2793 */ 2794 BUG_ON(flags & GFP_SLAB_BUG_MASK); 2795 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 2796 2797 /* Take the l3 list lock to change the colour_next on this node */ 2798 check_irq_off(); 2799 l3 = cachep->nodelists[nodeid]; 2800 spin_lock(&l3->list_lock); 2801 2802 /* Get colour for the slab, and cal the next value. */ 2803 offset = l3->colour_next; 2804 l3->colour_next++; 2805 if (l3->colour_next >= cachep->colour) 2806 l3->colour_next = 0; 2807 spin_unlock(&l3->list_lock); 2808 2809 offset *= cachep->colour_off; 2810 2811 if (local_flags & __GFP_WAIT) 2812 local_irq_enable(); 2813 2814 /* 2815 * The test for missing atomic flag is performed here, rather than 2816 * the more obvious place, simply to reduce the critical path length 2817 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2818 * will eventually be caught here (where it matters). 2819 */ 2820 kmem_flagcheck(cachep, flags); 2821 2822 /* 2823 * Get mem for the objs. Attempt to allocate a physical page from 2824 * 'nodeid'. 2825 */ 2826 if (!objp) 2827 objp = kmem_getpages(cachep, local_flags, nodeid); 2828 if (!objp) 2829 goto failed; 2830 2831 /* Get slab management. */ 2832 slabp = alloc_slabmgmt(cachep, objp, offset, 2833 local_flags & ~GFP_CONSTRAINT_MASK, nodeid); 2834 if (!slabp) 2835 goto opps1; 2836 2837 slab_map_pages(cachep, slabp, objp); 2838 2839 cache_init_objs(cachep, slabp); 2840 2841 if (local_flags & __GFP_WAIT) 2842 local_irq_disable(); 2843 check_irq_off(); 2844 spin_lock(&l3->list_lock); 2845 2846 /* Make slab active. */ 2847 list_add_tail(&slabp->list, &(l3->slabs_free)); 2848 STATS_INC_GROWN(cachep); 2849 l3->free_objects += cachep->num; 2850 spin_unlock(&l3->list_lock); 2851 return 1; 2852opps1: 2853 kmem_freepages(cachep, objp); 2854failed: 2855 if (local_flags & __GFP_WAIT) 2856 local_irq_disable(); 2857 return 0; 2858} 2859 2860#if DEBUG 2861 2862/* 2863 * Perform extra freeing checks: 2864 * - detect bad pointers. 2865 * - POISON/RED_ZONE checking 2866 */ 2867static void kfree_debugcheck(const void *objp) 2868{ 2869 if (!virt_addr_valid(objp)) { 2870 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2871 (unsigned long)objp); 2872 BUG(); 2873 } 2874} 2875 2876static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2877{ 2878 unsigned long long redzone1, redzone2; 2879 2880 redzone1 = *dbg_redzone1(cache, obj); 2881 redzone2 = *dbg_redzone2(cache, obj); 2882 2883 /* 2884 * Redzone is ok. 2885 */ 2886 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2887 return; 2888 2889 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2890 slab_error(cache, "double free detected"); 2891 else 2892 slab_error(cache, "memory outside object was overwritten"); 2893 2894 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2895 obj, redzone1, redzone2); 2896} 2897 2898static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2899 void *caller) 2900{ 2901 struct page *page; 2902 unsigned int objnr; 2903 struct slab *slabp; 2904 2905 BUG_ON(virt_to_cache(objp) != cachep); 2906 2907 objp -= obj_offset(cachep); 2908 kfree_debugcheck(objp); 2909 page = virt_to_head_page(objp); 2910 2911 slabp = page_get_slab(page); 2912 2913 if (cachep->flags & SLAB_RED_ZONE) { 2914 verify_redzone_free(cachep, objp); 2915 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2916 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2917 } 2918 if (cachep->flags & SLAB_STORE_USER) 2919 *dbg_userword(cachep, objp) = caller; 2920 2921 objnr = obj_to_index(cachep, slabp, objp); 2922 2923 BUG_ON(objnr >= cachep->num); 2924 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2925 2926#ifdef CONFIG_DEBUG_SLAB_LEAK 2927 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2928#endif 2929 if (cachep->flags & SLAB_POISON) { 2930#ifdef CONFIG_DEBUG_PAGEALLOC 2931 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2932 store_stackinfo(cachep, objp, (unsigned long)caller); 2933 kernel_map_pages(virt_to_page(objp), 2934 cachep->buffer_size / PAGE_SIZE, 0); 2935 } else { 2936 poison_obj(cachep, objp, POISON_FREE); 2937 } 2938#else 2939 poison_obj(cachep, objp, POISON_FREE); 2940#endif 2941 } 2942 return objp; 2943} 2944 2945static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2946{ 2947 kmem_bufctl_t i; 2948 int entries = 0; 2949 2950 /* Check slab's freelist to see if this obj is there. */ 2951 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2952 entries++; 2953 if (entries > cachep->num || i >= cachep->num) 2954 goto bad; 2955 } 2956 if (entries != cachep->num - slabp->inuse) { 2957bad: 2958 printk(KERN_ERR "slab: Internal list corruption detected in " 2959 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2960 cachep->name, cachep->num, slabp, slabp->inuse); 2961 for (i = 0; 2962 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2963 i++) { 2964 if (i % 16 == 0) 2965 printk("\n%03x:", i); 2966 printk(" %02x", ((unsigned char *)slabp)[i]); 2967 } 2968 printk("\n"); 2969 BUG(); 2970 } 2971} 2972#else 2973#define kfree_debugcheck(x) do { } while(0) 2974#define cache_free_debugcheck(x,objp,z) (objp) 2975#define check_slabp(x,y) do { } while(0) 2976#endif 2977 2978static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2979{ 2980 int batchcount; 2981 struct kmem_list3 *l3; 2982 struct array_cache *ac; 2983 int node; 2984 2985retry: 2986 check_irq_off(); 2987 node = numa_node_id(); 2988 ac = cpu_cache_get(cachep); 2989 batchcount = ac->batchcount; 2990 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2991 /* 2992 * If there was little recent activity on this cache, then 2993 * perform only a partial refill. Otherwise we could generate 2994 * refill bouncing. 2995 */ 2996 batchcount = BATCHREFILL_LIMIT; 2997 } 2998 l3 = cachep->nodelists[node]; 2999 3000 BUG_ON(ac->avail > 0 || !l3); 3001 spin_lock(&l3->list_lock); 3002 3003 /* See if we can refill from the shared array */ 3004 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 3005 goto alloc_done; 3006 3007 while (batchcount > 0) { 3008 struct list_head *entry; 3009 struct slab *slabp; 3010 /* Get slab alloc is to come from. */ 3011 entry = l3->slabs_partial.next; 3012 if (entry == &l3->slabs_partial) { 3013 l3->free_touched = 1; 3014 entry = l3->slabs_free.next; 3015 if (entry == &l3->slabs_free) 3016 goto must_grow; 3017 } 3018 3019 slabp = list_entry(entry, struct slab, list); 3020 check_slabp(cachep, slabp); 3021 check_spinlock_acquired(cachep); 3022 3023 /* 3024 * The slab was either on partial or free list so 3025 * there must be at least one object available for 3026 * allocation. 3027 */ 3028 BUG_ON(slabp->inuse >= cachep->num); 3029 3030 while (slabp->inuse < cachep->num && batchcount--) { 3031 STATS_INC_ALLOCED(cachep); 3032 STATS_INC_ACTIVE(cachep); 3033 STATS_SET_HIGH(cachep); 3034 3035 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 3036 node); 3037 } 3038 check_slabp(cachep, slabp); 3039 3040 /* move slabp to correct slabp list: */ 3041 list_del(&slabp->list); 3042 if (slabp->free == BUFCTL_END) 3043 list_add(&slabp->list, &l3->slabs_full); 3044 else 3045 list_add(&slabp->list, &l3->slabs_partial); 3046 } 3047 3048must_grow: 3049 l3->free_objects -= ac->avail; 3050alloc_done: 3051 spin_unlock(&l3->list_lock); 3052 3053 if (unlikely(!ac->avail)) { 3054 int x; 3055 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 3056 3057 /* cache_grow can reenable interrupts, then ac could change. */ 3058 ac = cpu_cache_get(cachep); 3059 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3060 return NULL; 3061 3062 if (!ac->avail) /* objects refilled by interrupt? */ 3063 goto retry; 3064 } 3065 ac->touched = 1; 3066 return ac->entry[--ac->avail]; 3067} 3068 3069static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3070 gfp_t flags) 3071{ 3072 might_sleep_if(flags & __GFP_WAIT); 3073#if DEBUG 3074 kmem_flagcheck(cachep, flags); 3075#endif 3076} 3077 3078#if DEBUG 3079static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3080 gfp_t flags, void *objp, void *caller) 3081{ 3082 if (!objp) 3083 return objp; 3084 if (cachep->flags & SLAB_POISON) { 3085#ifdef CONFIG_DEBUG_PAGEALLOC 3086 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3087 kernel_map_pages(virt_to_page(objp), 3088 cachep->buffer_size / PAGE_SIZE, 1); 3089 else 3090 check_poison_obj(cachep, objp); 3091#else 3092 check_poison_obj(cachep, objp); 3093#endif 3094 poison_obj(cachep, objp, POISON_INUSE); 3095 } 3096 if (cachep->flags & SLAB_STORE_USER) 3097 *dbg_userword(cachep, objp) = caller; 3098 3099 if (cachep->flags & SLAB_RED_ZONE) { 3100 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3101 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3102 slab_error(cachep, "double free, or memory outside" 3103 " object was overwritten"); 3104 printk(KERN_ERR 3105 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3106 objp, *dbg_redzone1(cachep, objp), 3107 *dbg_redzone2(cachep, objp)); 3108 } 3109 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3110 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3111 } 3112#ifdef CONFIG_DEBUG_SLAB_LEAK 3113 { 3114 struct slab *slabp; 3115 unsigned objnr; 3116 3117 slabp = page_get_slab(virt_to_head_page(objp)); 3118 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3119 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3120 } 3121#endif 3122 objp += obj_offset(cachep); 3123 if (cachep->ctor && cachep->flags & SLAB_POISON) 3124 cachep->ctor(objp); 3125#if ARCH_SLAB_MINALIGN 3126 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3127 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3128 objp, ARCH_SLAB_MINALIGN); 3129 } 3130#endif 3131 return objp; 3132} 3133#else 3134#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3135#endif 3136 3137static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) 3138{ 3139 if (cachep == &cache_cache) 3140 return false; 3141 3142 return should_failslab(obj_size(cachep), flags); 3143} 3144 3145static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3146{ 3147 void *objp; 3148 struct array_cache *ac; 3149 3150 check_irq_off(); 3151 3152 ac = cpu_cache_get(cachep); 3153 if (likely(ac->avail)) { 3154 STATS_INC_ALLOCHIT(cachep); 3155 ac->touched = 1; 3156 objp = ac->entry[--ac->avail]; 3157 } else { 3158 STATS_INC_ALLOCMISS(cachep); 3159 objp = cache_alloc_refill(cachep, flags); 3160 } 3161 /* 3162 * To avoid a false negative, if an object that is in one of the 3163 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 3164 * treat the array pointers as a reference to the object. 3165 */ 3166 kmemleak_erase(&ac->entry[ac->avail]); 3167 return objp; 3168} 3169 3170#ifdef CONFIG_NUMA 3171/* 3172 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3173 * 3174 * If we are in_interrupt, then process context, including cpusets and 3175 * mempolicy, may not apply and should not be used for allocation policy. 3176 */ 3177static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3178{ 3179 int nid_alloc, nid_here; 3180 3181 if (in_interrupt() || (flags & __GFP_THISNODE)) 3182 return NULL; 3183 nid_alloc = nid_here = numa_node_id(); 3184 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3185 nid_alloc = cpuset_mem_spread_node(); 3186 else if (current->mempolicy) 3187 nid_alloc = slab_node(current->mempolicy); 3188 if (nid_alloc != nid_here) 3189 return ____cache_alloc_node(cachep, flags, nid_alloc); 3190 return NULL; 3191} 3192 3193/* 3194 * Fallback function if there was no memory available and no objects on a 3195 * certain node and fall back is permitted. First we scan all the 3196 * available nodelists for available objects. If that fails then we 3197 * perform an allocation without specifying a node. This allows the page 3198 * allocator to do its reclaim / fallback magic. We then insert the 3199 * slab into the proper nodelist and then allocate from it. 3200 */ 3201static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3202{ 3203 struct zonelist *zonelist; 3204 gfp_t local_flags; 3205 struct zoneref *z; 3206 struct zone *zone; 3207 enum zone_type high_zoneidx = gfp_zone(flags); 3208 void *obj = NULL; 3209 int nid; 3210 3211 if (flags & __GFP_THISNODE) 3212 return NULL; 3213 3214 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 3215 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 3216 3217retry: 3218 /* 3219 * Look through allowed nodes for objects available 3220 * from existing per node queues. 3221 */ 3222 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3223 nid = zone_to_nid(zone); 3224 3225 if (cpuset_zone_allowed_hardwall(zone, flags) && 3226 cache->nodelists[nid] && 3227 cache->nodelists[nid]->free_objects) { 3228 obj = ____cache_alloc_node(cache, 3229 flags | GFP_THISNODE, nid); 3230 if (obj) 3231 break; 3232 } 3233 } 3234 3235 if (!obj) { 3236 /* 3237 * This allocation will be performed within the constraints 3238 * of the current cpuset / memory policy requirements. 3239 * We may trigger various forms of reclaim on the allowed 3240 * set and go into memory reserves if necessary. 3241 */ 3242 if (local_flags & __GFP_WAIT) 3243 local_irq_enable(); 3244 kmem_flagcheck(cache, flags); 3245 obj = kmem_getpages(cache, local_flags, -1); 3246 if (local_flags & __GFP_WAIT) 3247 local_irq_disable(); 3248 if (obj) { 3249 /* 3250 * Insert into the appropriate per node queues 3251 */ 3252 nid = page_to_nid(virt_to_page(obj)); 3253 if (cache_grow(cache, flags, nid, obj)) { 3254 obj = ____cache_alloc_node(cache, 3255 flags | GFP_THISNODE, nid); 3256 if (!obj) 3257 /* 3258 * Another processor may allocate the 3259 * objects in the slab since we are 3260 * not holding any locks. 3261 */ 3262 goto retry; 3263 } else { 3264 /* cache_grow already freed obj */ 3265 obj = NULL; 3266 } 3267 } 3268 } 3269 return obj; 3270} 3271 3272/* 3273 * A interface to enable slab creation on nodeid 3274 */ 3275static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3276 int nodeid) 3277{ 3278 struct list_head *entry; 3279 struct slab *slabp; 3280 struct kmem_list3 *l3; 3281 void *obj; 3282 int x; 3283 3284 l3 = cachep->nodelists[nodeid]; 3285 BUG_ON(!l3); 3286 3287retry: 3288 check_irq_off(); 3289 spin_lock(&l3->list_lock); 3290 entry = l3->slabs_partial.next; 3291 if (entry == &l3->slabs_partial) { 3292 l3->free_touched = 1; 3293 entry = l3->slabs_free.next; 3294 if (entry == &l3->slabs_free) 3295 goto must_grow; 3296 } 3297 3298 slabp = list_entry(entry, struct slab, list); 3299 check_spinlock_acquired_node(cachep, nodeid); 3300 check_slabp(cachep, slabp); 3301 3302 STATS_INC_NODEALLOCS(cachep); 3303 STATS_INC_ACTIVE(cachep); 3304 STATS_SET_HIGH(cachep); 3305 3306 BUG_ON(slabp->inuse == cachep->num); 3307 3308 obj = slab_get_obj(cachep, slabp, nodeid); 3309 check_slabp(cachep, slabp); 3310 l3->free_objects--; 3311 /* move slabp to correct slabp list: */ 3312 list_del(&slabp->list); 3313 3314 if (slabp->free == BUFCTL_END) 3315 list_add(&slabp->list, &l3->slabs_full); 3316 else 3317 list_add(&slabp->list, &l3->slabs_partial); 3318 3319 spin_unlock(&l3->list_lock); 3320 goto done; 3321 3322must_grow: 3323 spin_unlock(&l3->list_lock); 3324 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3325 if (x) 3326 goto retry; 3327 3328 return fallback_alloc(cachep, flags); 3329 3330done: 3331 return obj; 3332} 3333 3334/** 3335 * kmem_cache_alloc_node - Allocate an object on the specified node 3336 * @cachep: The cache to allocate from. 3337 * @flags: See kmalloc(). 3338 * @nodeid: node number of the target node. 3339 * @caller: return address of caller, used for debug information 3340 * 3341 * Identical to kmem_cache_alloc but it will allocate memory on the given 3342 * node, which can improve the performance for cpu bound structures. 3343 * 3344 * Fallback to other node is possible if __GFP_THISNODE is not set. 3345 */ 3346static __always_inline void * 3347__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3348 void *caller) 3349{ 3350 unsigned long save_flags; 3351 void *ptr; 3352 3353 lockdep_trace_alloc(flags); 3354 3355 if (slab_should_failslab(cachep, flags)) 3356 return NULL; 3357 3358 cache_alloc_debugcheck_before(cachep, flags); 3359 local_irq_save(save_flags); 3360 3361 if (unlikely(nodeid == -1)) 3362 nodeid = numa_node_id(); 3363 3364 if (unlikely(!cachep->nodelists[nodeid])) { 3365 /* Node not bootstrapped yet */ 3366 ptr = fallback_alloc(cachep, flags); 3367 goto out; 3368 } 3369 3370 if (nodeid == numa_node_id()) { 3371 /* 3372 * Use the locally cached objects if possible. 3373 * However ____cache_alloc does not allow fallback 3374 * to other nodes. It may fail while we still have 3375 * objects on other nodes available. 3376 */ 3377 ptr = ____cache_alloc(cachep, flags); 3378 if (ptr) 3379 goto out; 3380 } 3381 /* ___cache_alloc_node can fall back to other nodes */ 3382 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3383 out: 3384 local_irq_restore(save_flags); 3385 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3386 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, 3387 flags); 3388 3389 if (unlikely((flags & __GFP_ZERO) && ptr)) 3390 memset(ptr, 0, obj_size(cachep)); 3391 3392 return ptr; 3393} 3394 3395static __always_inline void * 3396__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3397{ 3398 void *objp; 3399 3400 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { 3401 objp = alternate_node_alloc(cache, flags); 3402 if (objp) 3403 goto out; 3404 } 3405 objp = ____cache_alloc(cache, flags); 3406 3407 /* 3408 * We may just have run out of memory on the local node. 3409 * ____cache_alloc_node() knows how to locate memory on other nodes 3410 */ 3411 if (!objp) 3412 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3413 3414 out: 3415 return objp; 3416} 3417#else 3418 3419static __always_inline void * 3420__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3421{ 3422 return ____cache_alloc(cachep, flags); 3423} 3424 3425#endif /* CONFIG_NUMA */ 3426 3427static __always_inline void * 3428__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3429{ 3430 unsigned long save_flags; 3431 void *objp; 3432 3433 lockdep_trace_alloc(flags); 3434 3435 if (slab_should_failslab(cachep, flags)) 3436 return NULL; 3437 3438 cache_alloc_debugcheck_before(cachep, flags); 3439 local_irq_save(save_flags); 3440 objp = __do_cache_alloc(cachep, flags); 3441 local_irq_restore(save_flags); 3442 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3443 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, 3444 flags); 3445 prefetchw(objp); 3446 3447 if (unlikely((flags & __GFP_ZERO) && objp)) 3448 memset(objp, 0, obj_size(cachep)); 3449 3450 return objp; 3451} 3452 3453/* 3454 * Caller needs to acquire correct kmem_list's list_lock 3455 */ 3456static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3457 int node) 3458{ 3459 int i; 3460 struct kmem_list3 *l3; 3461 3462 for (i = 0; i < nr_objects; i++) { 3463 void *objp = objpp[i]; 3464 struct slab *slabp; 3465 3466 slabp = virt_to_slab(objp); 3467 l3 = cachep->nodelists[node]; 3468 list_del(&slabp->list); 3469 check_spinlock_acquired_node(cachep, node); 3470 check_slabp(cachep, slabp); 3471 slab_put_obj(cachep, slabp, objp, node); 3472 STATS_DEC_ACTIVE(cachep); 3473 l3->free_objects++; 3474 check_slabp(cachep, slabp); 3475 3476 /* fixup slab chains */ 3477 if (slabp->inuse == 0) { 3478 if (l3->free_objects > l3->free_limit) { 3479 l3->free_objects -= cachep->num; 3480 /* No need to drop any previously held 3481 * lock here, even if we have a off-slab slab 3482 * descriptor it is guaranteed to come from 3483 * a different cache, refer to comments before 3484 * alloc_slabmgmt. 3485 */ 3486 slab_destroy(cachep, slabp); 3487 } else { 3488 list_add(&slabp->list, &l3->slabs_free); 3489 } 3490 } else { 3491 /* Unconditionally move a slab to the end of the 3492 * partial list on free - maximum time for the 3493 * other objects to be freed, too. 3494 */ 3495 list_add_tail(&slabp->list, &l3->slabs_partial); 3496 } 3497 } 3498} 3499 3500static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3501{ 3502 int batchcount; 3503 struct kmem_list3 *l3; 3504 int node = numa_node_id(); 3505 3506 batchcount = ac->batchcount; 3507#if DEBUG 3508 BUG_ON(!batchcount || batchcount > ac->avail); 3509#endif 3510 check_irq_off(); 3511 l3 = cachep->nodelists[node]; 3512 spin_lock(&l3->list_lock); 3513 if (l3->shared) { 3514 struct array_cache *shared_array = l3->shared; 3515 int max = shared_array->limit - shared_array->avail; 3516 if (max) { 3517 if (batchcount > max) 3518 batchcount = max; 3519 memcpy(&(shared_array->entry[shared_array->avail]), 3520 ac->entry, sizeof(void *) * batchcount); 3521 shared_array->avail += batchcount; 3522 goto free_done; 3523 } 3524 } 3525 3526 free_block(cachep, ac->entry, batchcount, node); 3527free_done: 3528#if STATS 3529 { 3530 int i = 0; 3531 struct list_head *p; 3532 3533 p = l3->slabs_free.next; 3534 while (p != &(l3->slabs_free)) { 3535 struct slab *slabp; 3536 3537 slabp = list_entry(p, struct slab, list); 3538 BUG_ON(slabp->inuse); 3539 3540 i++; 3541 p = p->next; 3542 } 3543 STATS_SET_FREEABLE(cachep, i); 3544 } 3545#endif 3546 spin_unlock(&l3->list_lock); 3547 ac->avail -= batchcount; 3548 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3549} 3550 3551/* 3552 * Release an obj back to its cache. If the obj has a constructed state, it must 3553 * be in this state _before_ it is released. Called with disabled ints. 3554 */ 3555static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3556{ 3557 struct array_cache *ac = cpu_cache_get(cachep); 3558 3559 check_irq_off(); 3560 kmemleak_free_recursive(objp, cachep->flags); 3561 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3562 3563 /* 3564 * Skip calling cache_free_alien() when the platform is not numa. 3565 * This will avoid cache misses that happen while accessing slabp (which 3566 * is per page memory reference) to get nodeid. Instead use a global 3567 * variable to skip the call, which is mostly likely to be present in 3568 * the cache. 3569 */ 3570 if (numa_platform && cache_free_alien(cachep, objp)) 3571 return; 3572 3573 if (likely(ac->avail < ac->limit)) { 3574 STATS_INC_FREEHIT(cachep); 3575 ac->entry[ac->avail++] = objp; 3576 return; 3577 } else { 3578 STATS_INC_FREEMISS(cachep); 3579 cache_flusharray(cachep, ac); 3580 ac->entry[ac->avail++] = objp; 3581 } 3582} 3583 3584/** 3585 * kmem_cache_alloc - Allocate an object 3586 * @cachep: The cache to allocate from. 3587 * @flags: See kmalloc(). 3588 * 3589 * Allocate an object from this cache. The flags are only relevant 3590 * if the cache has no available objects. 3591 */ 3592void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3593{ 3594 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); 3595 3596 trace_kmem_cache_alloc(_RET_IP_, ret, 3597 obj_size(cachep), cachep->buffer_size, flags); 3598 3599 return ret; 3600} 3601EXPORT_SYMBOL(kmem_cache_alloc); 3602 3603#ifdef CONFIG_KMEMTRACE 3604void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3605{ 3606 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3607} 3608EXPORT_SYMBOL(kmem_cache_alloc_notrace); 3609#endif 3610 3611/** 3612 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. 3613 * @cachep: the cache we're checking against 3614 * @ptr: pointer to validate 3615 * 3616 * This verifies that the untrusted pointer looks sane; 3617 * it is _not_ a guarantee that the pointer is actually 3618 * part of the slab cache in question, but it at least 3619 * validates that the pointer can be dereferenced and 3620 * looks half-way sane. 3621 * 3622 * Currently only used for dentry validation. 3623 */ 3624int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3625{ 3626 unsigned long addr = (unsigned long)ptr; 3627 unsigned long min_addr = PAGE_OFFSET; 3628 unsigned long align_mask = BYTES_PER_WORD - 1; 3629 unsigned long size = cachep->buffer_size; 3630 struct page *page; 3631 3632 if (unlikely(addr < min_addr)) 3633 goto out; 3634 if (unlikely(addr > (unsigned long)high_memory - size)) 3635 goto out; 3636 if (unlikely(addr & align_mask)) 3637 goto out; 3638 if (unlikely(!kern_addr_valid(addr))) 3639 goto out; 3640 if (unlikely(!kern_addr_valid(addr + size - 1))) 3641 goto out; 3642 page = virt_to_page(ptr); 3643 if (unlikely(!PageSlab(page))) 3644 goto out; 3645 if (unlikely(page_get_cache(page) != cachep)) 3646 goto out; 3647 return 1; 3648out: 3649 return 0; 3650} 3651 3652#ifdef CONFIG_NUMA 3653void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3654{ 3655 void *ret = __cache_alloc_node(cachep, flags, nodeid, 3656 __builtin_return_address(0)); 3657 3658 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3659 obj_size(cachep), cachep->buffer_size, 3660 flags, nodeid); 3661 3662 return ret; 3663} 3664EXPORT_SYMBOL(kmem_cache_alloc_node); 3665 3666#ifdef CONFIG_KMEMTRACE 3667void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3668 gfp_t flags, 3669 int nodeid) 3670{ 3671 return __cache_alloc_node(cachep, flags, nodeid, 3672 __builtin_return_address(0)); 3673} 3674EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 3675#endif 3676 3677static __always_inline void * 3678__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3679{ 3680 struct kmem_cache *cachep; 3681 void *ret; 3682 3683 cachep = kmem_find_general_cachep(size, flags); 3684 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3685 return cachep; 3686 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3687 3688 trace_kmalloc_node((unsigned long) caller, ret, 3689 size, cachep->buffer_size, flags, node); 3690 3691 return ret; 3692} 3693 3694#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) 3695void *__kmalloc_node(size_t size, gfp_t flags, int node) 3696{ 3697 return __do_kmalloc_node(size, flags, node, 3698 __builtin_return_address(0)); 3699} 3700EXPORT_SYMBOL(__kmalloc_node); 3701 3702void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3703 int node, unsigned long caller) 3704{ 3705 return __do_kmalloc_node(size, flags, node, (void *)caller); 3706} 3707EXPORT_SYMBOL(__kmalloc_node_track_caller); 3708#else 3709void *__kmalloc_node(size_t size, gfp_t flags, int node) 3710{ 3711 return __do_kmalloc_node(size, flags, node, NULL); 3712} 3713EXPORT_SYMBOL(__kmalloc_node); 3714#endif /* CONFIG_DEBUG_SLAB */ 3715#endif /* CONFIG_NUMA */ 3716 3717/** 3718 * __do_kmalloc - allocate memory 3719 * @size: how many bytes of memory are required. 3720 * @flags: the type of memory to allocate (see kmalloc). 3721 * @caller: function caller for debug tracking of the caller 3722 */ 3723static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3724 void *caller) 3725{ 3726 struct kmem_cache *cachep; 3727 void *ret; 3728 3729 /* If you want to save a few bytes .text space: replace 3730 * __ with kmem_. 3731 * Then kmalloc uses the uninlined functions instead of the inline 3732 * functions. 3733 */ 3734 cachep = __find_general_cachep(size, flags); 3735 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3736 return cachep; 3737 ret = __cache_alloc(cachep, flags, caller); 3738 3739 trace_kmalloc((unsigned long) caller, ret, 3740 size, cachep->buffer_size, flags); 3741 3742 return ret; 3743} 3744 3745 3746#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) 3747void *__kmalloc(size_t size, gfp_t flags) 3748{ 3749 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3750} 3751EXPORT_SYMBOL(__kmalloc); 3752 3753void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) 3754{ 3755 return __do_kmalloc(size, flags, (void *)caller); 3756} 3757EXPORT_SYMBOL(__kmalloc_track_caller); 3758 3759#else 3760void *__kmalloc(size_t size, gfp_t flags) 3761{ 3762 return __do_kmalloc(size, flags, NULL); 3763} 3764EXPORT_SYMBOL(__kmalloc); 3765#endif 3766 3767/** 3768 * kmem_cache_free - Deallocate an object 3769 * @cachep: The cache the allocation was from. 3770 * @objp: The previously allocated object. 3771 * 3772 * Free an object which was previously allocated from this 3773 * cache. 3774 */ 3775void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3776{ 3777 unsigned long flags; 3778 3779 local_irq_save(flags); 3780 debug_check_no_locks_freed(objp, obj_size(cachep)); 3781 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3782 debug_check_no_obj_freed(objp, obj_size(cachep)); 3783 __cache_free(cachep, objp); 3784 local_irq_restore(flags); 3785 3786 trace_kmem_cache_free(_RET_IP_, objp); 3787} 3788EXPORT_SYMBOL(kmem_cache_free); 3789 3790/** 3791 * kfree - free previously allocated memory 3792 * @objp: pointer returned by kmalloc. 3793 * 3794 * If @objp is NULL, no operation is performed. 3795 * 3796 * Don't free memory not originally allocated by kmalloc() 3797 * or you will run into trouble. 3798 */ 3799void kfree(const void *objp) 3800{ 3801 struct kmem_cache *c; 3802 unsigned long flags; 3803 3804 trace_kfree(_RET_IP_, objp); 3805 3806 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3807 return; 3808 local_irq_save(flags); 3809 kfree_debugcheck(objp); 3810 c = virt_to_cache(objp); 3811 debug_check_no_locks_freed(objp, obj_size(c)); 3812 debug_check_no_obj_freed(objp, obj_size(c)); 3813 __cache_free(c, (void *)objp); 3814 local_irq_restore(flags); 3815} 3816EXPORT_SYMBOL(kfree); 3817 3818unsigned int kmem_cache_size(struct kmem_cache *cachep) 3819{ 3820 return obj_size(cachep); 3821} 3822EXPORT_SYMBOL(kmem_cache_size); 3823 3824const char *kmem_cache_name(struct kmem_cache *cachep) 3825{ 3826 return cachep->name; 3827} 3828EXPORT_SYMBOL_GPL(kmem_cache_name); 3829 3830/* 3831 * This initializes kmem_list3 or resizes various caches for all nodes. 3832 */ 3833static int alloc_kmemlist(struct kmem_cache *cachep) 3834{ 3835 int node; 3836 struct kmem_list3 *l3; 3837 struct array_cache *new_shared; 3838 struct array_cache **new_alien = NULL; 3839 3840 for_each_online_node(node) { 3841 3842 if (use_alien_caches) { 3843 new_alien = alloc_alien_cache(node, cachep->limit); 3844 if (!new_alien) 3845 goto fail; 3846 } 3847 3848 new_shared = NULL; 3849 if (cachep->shared) { 3850 new_shared = alloc_arraycache(node, 3851 cachep->shared*cachep->batchcount, 3852 0xbaadf00d); 3853 if (!new_shared) { 3854 free_alien_cache(new_alien); 3855 goto fail; 3856 } 3857 } 3858 3859 l3 = cachep->nodelists[node]; 3860 if (l3) { 3861 struct array_cache *shared = l3->shared; 3862 3863 spin_lock_irq(&l3->list_lock); 3864 3865 if (shared) 3866 free_block(cachep, shared->entry, 3867 shared->avail, node); 3868 3869 l3->shared = new_shared; 3870 if (!l3->alien) { 3871 l3->alien = new_alien; 3872 new_alien = NULL; 3873 } 3874 l3->free_limit = (1 + nr_cpus_node(node)) * 3875 cachep->batchcount + cachep->num; 3876 spin_unlock_irq(&l3->list_lock); 3877 kfree(shared); 3878 free_alien_cache(new_alien); 3879 continue; 3880 } 3881 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3882 if (!l3) { 3883 free_alien_cache(new_alien); 3884 kfree(new_shared); 3885 goto fail; 3886 } 3887 3888 kmem_list3_init(l3); 3889 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3890 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3891 l3->shared = new_shared; 3892 l3->alien = new_alien; 3893 l3->free_limit = (1 + nr_cpus_node(node)) * 3894 cachep->batchcount + cachep->num; 3895 cachep->nodelists[node] = l3; 3896 } 3897 return 0; 3898 3899fail: 3900 if (!cachep->next.next) { 3901 /* Cache is not active yet. Roll back what we did */ 3902 node--; 3903 while (node >= 0) { 3904 if (cachep->nodelists[node]) { 3905 l3 = cachep->nodelists[node]; 3906 3907 kfree(l3->shared); 3908 free_alien_cache(l3->alien); 3909 kfree(l3); 3910 cachep->nodelists[node] = NULL; 3911 } 3912 node--; 3913 } 3914 } 3915 return -ENOMEM; 3916} 3917 3918struct ccupdate_struct { 3919 struct kmem_cache *cachep; 3920 struct array_cache *new[NR_CPUS]; 3921}; 3922 3923static void do_ccupdate_local(void *info) 3924{ 3925 struct ccupdate_struct *new = info; 3926 struct array_cache *old; 3927 3928 check_irq_off(); 3929 old = cpu_cache_get(new->cachep); 3930 3931 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3932 new->new[smp_processor_id()] = old; 3933} 3934 3935/* Always called with the cache_chain_mutex held */ 3936static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3937 int batchcount, int shared) 3938{ 3939 struct ccupdate_struct *new; 3940 int i; 3941 3942 new = kzalloc(sizeof(*new), GFP_KERNEL); 3943 if (!new) 3944 return -ENOMEM; 3945 3946 for_each_online_cpu(i) { 3947 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3948 batchcount); 3949 if (!new->new[i]) { 3950 for (i--; i >= 0; i--) 3951 kfree(new->new[i]); 3952 kfree(new); 3953 return -ENOMEM; 3954 } 3955 } 3956 new->cachep = cachep; 3957 3958 on_each_cpu(do_ccupdate_local, (void *)new, 1); 3959 3960 check_irq_on(); 3961 cachep->batchcount = batchcount; 3962 cachep->limit = limit; 3963 cachep->shared = shared; 3964 3965 for_each_online_cpu(i) { 3966 struct array_cache *ccold = new->new[i]; 3967 if (!ccold) 3968 continue; 3969 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3970 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3971 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3972 kfree(ccold); 3973 } 3974 kfree(new); 3975 return alloc_kmemlist(cachep); 3976} 3977 3978/* Called with cache_chain_mutex held always */ 3979static int enable_cpucache(struct kmem_cache *cachep) 3980{ 3981 int err; 3982 int limit, shared; 3983 3984 /* 3985 * The head array serves three purposes: 3986 * - create a LIFO ordering, i.e. return objects that are cache-warm 3987 * - reduce the number of spinlock operations. 3988 * - reduce the number of linked list operations on the slab and 3989 * bufctl chains: array operations are cheaper. 3990 * The numbers are guessed, we should auto-tune as described by 3991 * Bonwick. 3992 */ 3993 if (cachep->buffer_size > 131072) 3994 limit = 1; 3995 else if (cachep->buffer_size > PAGE_SIZE) 3996 limit = 8; 3997 else if (cachep->buffer_size > 1024) 3998 limit = 24; 3999 else if (cachep->buffer_size > 256) 4000 limit = 54; 4001 else 4002 limit = 120; 4003 4004 /* 4005 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 4006 * allocation behaviour: Most allocs on one cpu, most free operations 4007 * on another cpu. For these cases, an efficient object passing between 4008 * cpus is necessary. This is provided by a shared array. The array 4009 * replaces Bonwick's magazine layer. 4010 * On uniprocessor, it's functionally equivalent (but less efficient) 4011 * to a larger limit. Thus disabled by default. 4012 */ 4013 shared = 0; 4014 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) 4015 shared = 8; 4016 4017#if DEBUG 4018 /* 4019 * With debugging enabled, large batchcount lead to excessively long 4020 * periods with disabled local interrupts. Limit the batchcount 4021 */ 4022 if (limit > 32) 4023 limit = 32; 4024#endif 4025 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 4026 if (err) 4027 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4028 cachep->name, -err); 4029 return err; 4030} 4031 4032/* 4033 * Drain an array if it contains any elements taking the l3 lock only if 4034 * necessary. Note that the l3 listlock also protects the array_cache 4035 * if drain_array() is used on the shared array. 4036 */ 4037void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4038 struct array_cache *ac, int force, int node) 4039{ 4040 int tofree; 4041 4042 if (!ac || !ac->avail) 4043 return; 4044 if (ac->touched && !force) { 4045 ac->touched = 0; 4046 } else { 4047 spin_lock_irq(&l3->list_lock); 4048 if (ac->avail) { 4049 tofree = force ? ac->avail : (ac->limit + 4) / 5; 4050 if (tofree > ac->avail) 4051 tofree = (ac->avail + 1) / 2; 4052 free_block(cachep, ac->entry, tofree, node); 4053 ac->avail -= tofree; 4054 memmove(ac->entry, &(ac->entry[tofree]), 4055 sizeof(void *) * ac->avail); 4056 } 4057 spin_unlock_irq(&l3->list_lock); 4058 } 4059} 4060 4061/** 4062 * cache_reap - Reclaim memory from caches. 4063 * @w: work descriptor 4064 * 4065 * Called from workqueue/eventd every few seconds. 4066 * Purpose: 4067 * - clear the per-cpu caches for this CPU. 4068 * - return freeable pages to the main free memory pool. 4069 * 4070 * If we cannot acquire the cache chain mutex then just give up - we'll try 4071 * again on the next iteration. 4072 */ 4073static void cache_reap(struct work_struct *w) 4074{ 4075 struct kmem_cache *searchp; 4076 struct kmem_list3 *l3; 4077 int node = numa_node_id(); 4078 struct delayed_work *work = to_delayed_work(w); 4079 4080 if (!mutex_trylock(&cache_chain_mutex)) 4081 /* Give up. Setup the next iteration. */ 4082 goto out; 4083 4084 list_for_each_entry(searchp, &cache_chain, next) { 4085 check_irq_on(); 4086 4087 /* 4088 * We only take the l3 lock if absolutely necessary and we 4089 * have established with reasonable certainty that 4090 * we can do some work if the lock was obtained. 4091 */ 4092 l3 = searchp->nodelists[node]; 4093 4094 reap_alien(searchp, l3); 4095 4096 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4097 4098 /* 4099 * These are racy checks but it does not matter 4100 * if we skip one check or scan twice. 4101 */ 4102 if (time_after(l3->next_reap, jiffies)) 4103 goto next; 4104 4105 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4106 4107 drain_array(searchp, l3, l3->shared, 0, node); 4108 4109 if (l3->free_touched) 4110 l3->free_touched = 0; 4111 else { 4112 int freed; 4113 4114 freed = drain_freelist(searchp, l3, (l3->free_limit + 4115 5 * searchp->num - 1) / (5 * searchp->num)); 4116 STATS_ADD_REAPED(searchp, freed); 4117 } 4118next: 4119 cond_resched(); 4120 } 4121 check_irq_on(); 4122 mutex_unlock(&cache_chain_mutex); 4123 next_reap_node(); 4124out: 4125 /* Set up the next iteration */ 4126 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4127} 4128 4129#ifdef CONFIG_SLABINFO 4130 4131static void print_slabinfo_header(struct seq_file *m) 4132{ 4133 /* 4134 * Output format version, so at least we can change it 4135 * without _too_ many complaints. 4136 */ 4137#if STATS 4138 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 4139#else 4140 seq_puts(m, "slabinfo - version: 2.1\n"); 4141#endif 4142 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4143 "<objperslab> <pagesperslab>"); 4144 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4145 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4146#if STATS 4147 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 4148 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 4149 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 4150#endif 4151 seq_putc(m, '\n'); 4152} 4153 4154static void *s_start(struct seq_file *m, loff_t *pos) 4155{ 4156 loff_t n = *pos; 4157 4158 mutex_lock(&cache_chain_mutex); 4159 if (!n) 4160 print_slabinfo_header(m); 4161 4162 return seq_list_start(&cache_chain, *pos); 4163} 4164 4165static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4166{ 4167 return seq_list_next(p, &cache_chain, pos); 4168} 4169 4170static void s_stop(struct seq_file *m, void *p) 4171{ 4172 mutex_unlock(&cache_chain_mutex); 4173} 4174 4175static int s_show(struct seq_file *m, void *p) 4176{ 4177 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); 4178 struct slab *slabp; 4179 unsigned long active_objs; 4180 unsigned long num_objs; 4181 unsigned long active_slabs = 0; 4182 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4183 const char *name; 4184 char *error = NULL; 4185 int node; 4186 struct kmem_list3 *l3; 4187 4188 active_objs = 0; 4189 num_slabs = 0; 4190 for_each_online_node(node) { 4191 l3 = cachep->nodelists[node]; 4192 if (!l3) 4193 continue; 4194 4195 check_irq_on(); 4196 spin_lock_irq(&l3->list_lock); 4197 4198 list_for_each_entry(slabp, &l3->slabs_full, list) { 4199 if (slabp->inuse != cachep->num && !error) 4200 error = "slabs_full accounting error"; 4201 active_objs += cachep->num; 4202 active_slabs++; 4203 } 4204 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4205 if (slabp->inuse == cachep->num && !error) 4206 error = "slabs_partial inuse accounting error"; 4207 if (!slabp->inuse && !error) 4208 error = "slabs_partial/inuse accounting error"; 4209 active_objs += slabp->inuse; 4210 active_slabs++; 4211 } 4212 list_for_each_entry(slabp, &l3->slabs_free, list) { 4213 if (slabp->inuse && !error) 4214 error = "slabs_free/inuse accounting error"; 4215 num_slabs++; 4216 } 4217 free_objects += l3->free_objects; 4218 if (l3->shared) 4219 shared_avail += l3->shared->avail; 4220 4221 spin_unlock_irq(&l3->list_lock); 4222 } 4223 num_slabs += active_slabs; 4224 num_objs = num_slabs * cachep->num; 4225 if (num_objs - active_objs != free_objects && !error) 4226 error = "free_objects accounting error"; 4227 4228 name = cachep->name; 4229 if (error) 4230 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4231 4232 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4233 name, active_objs, num_objs, cachep->buffer_size, 4234 cachep->num, (1 << cachep->gfporder)); 4235 seq_printf(m, " : tunables %4u %4u %4u", 4236 cachep->limit, cachep->batchcount, cachep->shared); 4237 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4238 active_slabs, num_slabs, shared_avail); 4239#if STATS 4240 { /* list3 stats */ 4241 unsigned long high = cachep->high_mark; 4242 unsigned long allocs = cachep->num_allocations; 4243 unsigned long grown = cachep->grown; 4244 unsigned long reaped = cachep->reaped; 4245 unsigned long errors = cachep->errors; 4246 unsigned long max_freeable = cachep->max_freeable; 4247 unsigned long node_allocs = cachep->node_allocs; 4248 unsigned long node_frees = cachep->node_frees; 4249 unsigned long overflows = cachep->node_overflow; 4250 4251 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4252 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4253 reaped, errors, max_freeable, node_allocs, 4254 node_frees, overflows); 4255 } 4256 /* cpu stats */ 4257 { 4258 unsigned long allochit = atomic_read(&cachep->allochit); 4259 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4260 unsigned long freehit = atomic_read(&cachep->freehit); 4261 unsigned long freemiss = atomic_read(&cachep->freemiss); 4262 4263 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4264 allochit, allocmiss, freehit, freemiss); 4265 } 4266#endif 4267 seq_putc(m, '\n'); 4268 return 0; 4269} 4270 4271/* 4272 * slabinfo_op - iterator that generates /proc/slabinfo 4273 * 4274 * Output layout: 4275 * cache-name 4276 * num-active-objs 4277 * total-objs 4278 * object size 4279 * num-active-slabs 4280 * total-slabs 4281 * num-pages-per-slab 4282 * + further values on SMP and with statistics enabled 4283 */ 4284 4285static const struct seq_operations slabinfo_op = { 4286 .start = s_start, 4287 .next = s_next, 4288 .stop = s_stop, 4289 .show = s_show, 4290}; 4291 4292#define MAX_SLABINFO_WRITE 128 4293/** 4294 * slabinfo_write - Tuning for the slab allocator 4295 * @file: unused 4296 * @buffer: user buffer 4297 * @count: data length 4298 * @ppos: unused 4299 */ 4300ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4301 size_t count, loff_t *ppos) 4302{ 4303 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4304 int limit, batchcount, shared, res; 4305 struct kmem_cache *cachep; 4306 4307 if (count > MAX_SLABINFO_WRITE) 4308 return -EINVAL; 4309 if (copy_from_user(&kbuf, buffer, count)) 4310 return -EFAULT; 4311 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4312 4313 tmp = strchr(kbuf, ' '); 4314 if (!tmp) 4315 return -EINVAL; 4316 *tmp = '\0'; 4317 tmp++; 4318 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4319 return -EINVAL; 4320 4321 /* Find the cache in the chain of caches. */ 4322 mutex_lock(&cache_chain_mutex); 4323 res = -EINVAL; 4324 list_for_each_entry(cachep, &cache_chain, next) { 4325 if (!strcmp(cachep->name, kbuf)) { 4326 if (limit < 1 || batchcount < 1 || 4327 batchcount > limit || shared < 0) { 4328 res = 0; 4329 } else { 4330 res = do_tune_cpucache(cachep, limit, 4331 batchcount, shared); 4332 } 4333 break; 4334 } 4335 } 4336 mutex_unlock(&cache_chain_mutex); 4337 if (res >= 0) 4338 res = count; 4339 return res; 4340} 4341 4342static int slabinfo_open(struct inode *inode, struct file *file) 4343{ 4344 return seq_open(file, &slabinfo_op); 4345} 4346 4347static const struct file_operations proc_slabinfo_operations = { 4348 .open = slabinfo_open, 4349 .read = seq_read, 4350 .write = slabinfo_write, 4351 .llseek = seq_lseek, 4352 .release = seq_release, 4353}; 4354 4355#ifdef CONFIG_DEBUG_SLAB_LEAK 4356 4357static void *leaks_start(struct seq_file *m, loff_t *pos) 4358{ 4359 mutex_lock(&cache_chain_mutex); 4360 return seq_list_start(&cache_chain, *pos); 4361} 4362 4363static inline int add_caller(unsigned long *n, unsigned long v) 4364{ 4365 unsigned long *p; 4366 int l; 4367 if (!v) 4368 return 1; 4369 l = n[1]; 4370 p = n + 2; 4371 while (l) { 4372 int i = l/2; 4373 unsigned long *q = p + 2 * i; 4374 if (*q == v) { 4375 q[1]++; 4376 return 1; 4377 } 4378 if (*q > v) { 4379 l = i; 4380 } else { 4381 p = q + 2; 4382 l -= i + 1; 4383 } 4384 } 4385 if (++n[1] == n[0]) 4386 return 0; 4387 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4388 p[0] = v; 4389 p[1] = 1; 4390 return 1; 4391} 4392 4393static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4394{ 4395 void *p; 4396 int i; 4397 if (n[0] == n[1]) 4398 return; 4399 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4400 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4401 continue; 4402 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4403 return; 4404 } 4405} 4406 4407static void show_symbol(struct seq_file *m, unsigned long address) 4408{ 4409#ifdef CONFIG_KALLSYMS 4410 unsigned long offset, size; 4411 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4412 4413 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4414 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4415 if (modname[0]) 4416 seq_printf(m, " [%s]", modname); 4417 return; 4418 } 4419#endif 4420 seq_printf(m, "%p", (void *)address); 4421} 4422 4423static int leaks_show(struct seq_file *m, void *p) 4424{ 4425 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); 4426 struct slab *slabp; 4427 struct kmem_list3 *l3; 4428 const char *name; 4429 unsigned long *n = m->private; 4430 int node; 4431 int i; 4432 4433 if (!(cachep->flags & SLAB_STORE_USER)) 4434 return 0; 4435 if (!(cachep->flags & SLAB_RED_ZONE)) 4436 return 0; 4437 4438 /* OK, we can do it */ 4439 4440 n[1] = 0; 4441 4442 for_each_online_node(node) { 4443 l3 = cachep->nodelists[node]; 4444 if (!l3) 4445 continue; 4446 4447 check_irq_on(); 4448 spin_lock_irq(&l3->list_lock); 4449 4450 list_for_each_entry(slabp, &l3->slabs_full, list) 4451 handle_slab(n, cachep, slabp); 4452 list_for_each_entry(slabp, &l3->slabs_partial, list) 4453 handle_slab(n, cachep, slabp); 4454 spin_unlock_irq(&l3->list_lock); 4455 } 4456 name = cachep->name; 4457 if (n[0] == n[1]) { 4458 /* Increase the buffer size */ 4459 mutex_unlock(&cache_chain_mutex); 4460 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4461 if (!m->private) { 4462 /* Too bad, we are really out */ 4463 m->private = n; 4464 mutex_lock(&cache_chain_mutex); 4465 return -ENOMEM; 4466 } 4467 *(unsigned long *)m->private = n[0] * 2; 4468 kfree(n); 4469 mutex_lock(&cache_chain_mutex); 4470 /* Now make sure this entry will be retried */ 4471 m->count = m->size; 4472 return 0; 4473 } 4474 for (i = 0; i < n[1]; i++) { 4475 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4476 show_symbol(m, n[2*i+2]); 4477 seq_putc(m, '\n'); 4478 } 4479 4480 return 0; 4481} 4482 4483static const struct seq_operations slabstats_op = { 4484 .start = leaks_start, 4485 .next = s_next, 4486 .stop = s_stop, 4487 .show = leaks_show, 4488}; 4489 4490static int slabstats_open(struct inode *inode, struct file *file) 4491{ 4492 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); 4493 int ret = -ENOMEM; 4494 if (n) { 4495 ret = seq_open(file, &slabstats_op); 4496 if (!ret) { 4497 struct seq_file *m = file->private_data; 4498 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); 4499 m->private = n; 4500 n = NULL; 4501 } 4502 kfree(n); 4503 } 4504 return ret; 4505} 4506 4507static const struct file_operations proc_slabstats_operations = { 4508 .open = slabstats_open, 4509 .read = seq_read, 4510 .llseek = seq_lseek, 4511 .release = seq_release_private, 4512}; 4513#endif 4514 4515static int __init slab_proc_init(void) 4516{ 4517 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); 4518#ifdef CONFIG_DEBUG_SLAB_LEAK 4519 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); 4520#endif 4521 return 0; 4522} 4523module_init(slab_proc_init); 4524#endif 4525 4526/** 4527 * ksize - get the actual amount of memory allocated for a given object 4528 * @objp: Pointer to the object 4529 * 4530 * kmalloc may internally round up allocations and return more memory 4531 * than requested. ksize() can be used to determine the actual amount of 4532 * memory allocated. The caller may use this additional memory, even though 4533 * a smaller amount of memory was initially specified with the kmalloc call. 4534 * The caller must guarantee that objp points to a valid object previously 4535 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4536 * must not be freed during the duration of the call. 4537 */ 4538size_t ksize(const void *objp) 4539{ 4540 BUG_ON(!objp); 4541 if (unlikely(objp == ZERO_SIZE_PTR)) 4542 return 0; 4543 4544 return obj_size(virt_to_cache(objp)); 4545} 4546EXPORT_SYMBOL(ksize); 4547