slab.c revision 04231b3002ac53f8a64a7bd142fde3fa4b6808c6
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/slab.h> 90#include <linux/mm.h> 91#include <linux/poison.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/seq_file.h> 99#include <linux/notifier.h> 100#include <linux/kallsyms.h> 101#include <linux/cpu.h> 102#include <linux/sysctl.h> 103#include <linux/module.h> 104#include <linux/rcupdate.h> 105#include <linux/string.h> 106#include <linux/uaccess.h> 107#include <linux/nodemask.h> 108#include <linux/mempolicy.h> 109#include <linux/mutex.h> 110#include <linux/fault-inject.h> 111#include <linux/rtmutex.h> 112#include <linux/reciprocal_div.h> 113 114#include <asm/cacheflush.h> 115#include <asm/tlbflush.h> 116#include <asm/page.h> 117 118/* 119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 120 * 0 for faster, smaller code (especially in the critical paths). 121 * 122 * STATS - 1 to collect stats for /proc/slabinfo. 123 * 0 for faster, smaller code (especially in the critical paths). 124 * 125 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 126 */ 127 128#ifdef CONFIG_DEBUG_SLAB 129#define DEBUG 1 130#define STATS 1 131#define FORCED_DEBUG 1 132#else 133#define DEBUG 0 134#define STATS 0 135#define FORCED_DEBUG 0 136#endif 137 138/* Shouldn't this be in a header file somewhere? */ 139#define BYTES_PER_WORD sizeof(void *) 140#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 141 142#ifndef cache_line_size 143#define cache_line_size() L1_CACHE_BYTES 144#endif 145 146#ifndef ARCH_KMALLOC_MINALIGN 147/* 148 * Enforce a minimum alignment for the kmalloc caches. 149 * Usually, the kmalloc caches are cache_line_size() aligned, except when 150 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 151 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 152 * alignment larger than the alignment of a 64-bit integer. 153 * ARCH_KMALLOC_MINALIGN allows that. 154 * Note that increasing this value may disable some debug features. 155 */ 156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157#endif 158 159#ifndef ARCH_SLAB_MINALIGN 160/* 161 * Enforce a minimum alignment for all caches. 162 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 163 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 164 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 165 * some debug features. 166 */ 167#define ARCH_SLAB_MINALIGN 0 168#endif 169 170#ifndef ARCH_KMALLOC_FLAGS 171#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 172#endif 173 174/* Legal flag mask for kmem_cache_create(). */ 175#if DEBUG 176# define CREATE_MASK (SLAB_RED_ZONE | \ 177 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 178 SLAB_CACHE_DMA | \ 179 SLAB_STORE_USER | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 182#else 183# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 184 SLAB_CACHE_DMA | \ 185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 187#endif 188 189/* 190 * kmem_bufctl_t: 191 * 192 * Bufctl's are used for linking objs within a slab 193 * linked offsets. 194 * 195 * This implementation relies on "struct page" for locating the cache & 196 * slab an object belongs to. 197 * This allows the bufctl structure to be small (one int), but limits 198 * the number of objects a slab (not a cache) can contain when off-slab 199 * bufctls are used. The limit is the size of the largest general cache 200 * that does not use off-slab slabs. 201 * For 32bit archs with 4 kB pages, is this 56. 202 * This is not serious, as it is only for large objects, when it is unwise 203 * to have too many per slab. 204 * Note: This limit can be raised by introducing a general cache whose size 205 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 206 */ 207 208typedef unsigned int kmem_bufctl_t; 209#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 210#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 211#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 212#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 213 214/* 215 * struct slab 216 * 217 * Manages the objs in a slab. Placed either at the beginning of mem allocated 218 * for a slab, or allocated from an general cache. 219 * Slabs are chained into three list: fully used, partial, fully free slabs. 220 */ 221struct slab { 222 struct list_head list; 223 unsigned long colouroff; 224 void *s_mem; /* including colour offset */ 225 unsigned int inuse; /* num of objs active in slab */ 226 kmem_bufctl_t free; 227 unsigned short nodeid; 228}; 229 230/* 231 * struct slab_rcu 232 * 233 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 234 * arrange for kmem_freepages to be called via RCU. This is useful if 235 * we need to approach a kernel structure obliquely, from its address 236 * obtained without the usual locking. We can lock the structure to 237 * stabilize it and check it's still at the given address, only if we 238 * can be sure that the memory has not been meanwhile reused for some 239 * other kind of object (which our subsystem's lock might corrupt). 240 * 241 * rcu_read_lock before reading the address, then rcu_read_unlock after 242 * taking the spinlock within the structure expected at that address. 243 * 244 * We assume struct slab_rcu can overlay struct slab when destroying. 245 */ 246struct slab_rcu { 247 struct rcu_head head; 248 struct kmem_cache *cachep; 249 void *addr; 250}; 251 252/* 253 * struct array_cache 254 * 255 * Purpose: 256 * - LIFO ordering, to hand out cache-warm objects from _alloc 257 * - reduce the number of linked list operations 258 * - reduce spinlock operations 259 * 260 * The limit is stored in the per-cpu structure to reduce the data cache 261 * footprint. 262 * 263 */ 264struct array_cache { 265 unsigned int avail; 266 unsigned int limit; 267 unsigned int batchcount; 268 unsigned int touched; 269 spinlock_t lock; 270 void *entry[0]; /* 271 * Must have this definition in here for the proper 272 * alignment of array_cache. Also simplifies accessing 273 * the entries. 274 * [0] is for gcc 2.95. It should really be []. 275 */ 276}; 277 278/* 279 * bootstrap: The caches do not work without cpuarrays anymore, but the 280 * cpuarrays are allocated from the generic caches... 281 */ 282#define BOOT_CPUCACHE_ENTRIES 1 283struct arraycache_init { 284 struct array_cache cache; 285 void *entries[BOOT_CPUCACHE_ENTRIES]; 286}; 287 288/* 289 * The slab lists for all objects. 290 */ 291struct kmem_list3 { 292 struct list_head slabs_partial; /* partial list first, better asm code */ 293 struct list_head slabs_full; 294 struct list_head slabs_free; 295 unsigned long free_objects; 296 unsigned int free_limit; 297 unsigned int colour_next; /* Per-node cache coloring */ 298 spinlock_t list_lock; 299 struct array_cache *shared; /* shared per node */ 300 struct array_cache **alien; /* on other nodes */ 301 unsigned long next_reap; /* updated without locking */ 302 int free_touched; /* updated without locking */ 303}; 304 305/* 306 * Need this for bootstrapping a per node allocator. 307 */ 308#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 309struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 310#define CACHE_CACHE 0 311#define SIZE_AC 1 312#define SIZE_L3 (1 + MAX_NUMNODES) 313 314static int drain_freelist(struct kmem_cache *cache, 315 struct kmem_list3 *l3, int tofree); 316static void free_block(struct kmem_cache *cachep, void **objpp, int len, 317 int node); 318static int enable_cpucache(struct kmem_cache *cachep); 319static void cache_reap(struct work_struct *unused); 320 321/* 322 * This function must be completely optimized away if a constant is passed to 323 * it. Mostly the same as what is in linux/slab.h except it returns an index. 324 */ 325static __always_inline int index_of(const size_t size) 326{ 327 extern void __bad_size(void); 328 329 if (__builtin_constant_p(size)) { 330 int i = 0; 331 332#define CACHE(x) \ 333 if (size <=x) \ 334 return i; \ 335 else \ 336 i++; 337#include "linux/kmalloc_sizes.h" 338#undef CACHE 339 __bad_size(); 340 } else 341 __bad_size(); 342 return 0; 343} 344 345static int slab_early_init = 1; 346 347#define INDEX_AC index_of(sizeof(struct arraycache_init)) 348#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 349 350static void kmem_list3_init(struct kmem_list3 *parent) 351{ 352 INIT_LIST_HEAD(&parent->slabs_full); 353 INIT_LIST_HEAD(&parent->slabs_partial); 354 INIT_LIST_HEAD(&parent->slabs_free); 355 parent->shared = NULL; 356 parent->alien = NULL; 357 parent->colour_next = 0; 358 spin_lock_init(&parent->list_lock); 359 parent->free_objects = 0; 360 parent->free_touched = 0; 361} 362 363#define MAKE_LIST(cachep, listp, slab, nodeid) \ 364 do { \ 365 INIT_LIST_HEAD(listp); \ 366 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 367 } while (0) 368 369#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 370 do { \ 371 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 372 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 373 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 374 } while (0) 375 376/* 377 * struct kmem_cache 378 * 379 * manages a cache. 380 */ 381 382struct kmem_cache { 383/* 1) per-cpu data, touched during every alloc/free */ 384 struct array_cache *array[NR_CPUS]; 385/* 2) Cache tunables. Protected by cache_chain_mutex */ 386 unsigned int batchcount; 387 unsigned int limit; 388 unsigned int shared; 389 390 unsigned int buffer_size; 391 u32 reciprocal_buffer_size; 392/* 3) touched by every alloc & free from the backend */ 393 394 unsigned int flags; /* constant flags */ 395 unsigned int num; /* # of objs per slab */ 396 397/* 4) cache_grow/shrink */ 398 /* order of pgs per slab (2^n) */ 399 unsigned int gfporder; 400 401 /* force GFP flags, e.g. GFP_DMA */ 402 gfp_t gfpflags; 403 404 size_t colour; /* cache colouring range */ 405 unsigned int colour_off; /* colour offset */ 406 struct kmem_cache *slabp_cache; 407 unsigned int slab_size; 408 unsigned int dflags; /* dynamic flags */ 409 410 /* constructor func */ 411 void (*ctor) (void *, struct kmem_cache *, unsigned long); 412 413/* 5) cache creation/removal */ 414 const char *name; 415 struct list_head next; 416 417/* 6) statistics */ 418#if STATS 419 unsigned long num_active; 420 unsigned long num_allocations; 421 unsigned long high_mark; 422 unsigned long grown; 423 unsigned long reaped; 424 unsigned long errors; 425 unsigned long max_freeable; 426 unsigned long node_allocs; 427 unsigned long node_frees; 428 unsigned long node_overflow; 429 atomic_t allochit; 430 atomic_t allocmiss; 431 atomic_t freehit; 432 atomic_t freemiss; 433#endif 434#if DEBUG 435 /* 436 * If debugging is enabled, then the allocator can add additional 437 * fields and/or padding to every object. buffer_size contains the total 438 * object size including these internal fields, the following two 439 * variables contain the offset to the user object and its size. 440 */ 441 int obj_offset; 442 int obj_size; 443#endif 444 /* 445 * We put nodelists[] at the end of kmem_cache, because we want to size 446 * this array to nr_node_ids slots instead of MAX_NUMNODES 447 * (see kmem_cache_init()) 448 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache 449 * is statically defined, so we reserve the max number of nodes. 450 */ 451 struct kmem_list3 *nodelists[MAX_NUMNODES]; 452 /* 453 * Do not add fields after nodelists[] 454 */ 455}; 456 457#define CFLGS_OFF_SLAB (0x80000000UL) 458#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 459 460#define BATCHREFILL_LIMIT 16 461/* 462 * Optimization question: fewer reaps means less probability for unnessary 463 * cpucache drain/refill cycles. 464 * 465 * OTOH the cpuarrays can contain lots of objects, 466 * which could lock up otherwise freeable slabs. 467 */ 468#define REAPTIMEOUT_CPUC (2*HZ) 469#define REAPTIMEOUT_LIST3 (4*HZ) 470 471#if STATS 472#define STATS_INC_ACTIVE(x) ((x)->num_active++) 473#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 474#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 475#define STATS_INC_GROWN(x) ((x)->grown++) 476#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 477#define STATS_SET_HIGH(x) \ 478 do { \ 479 if ((x)->num_active > (x)->high_mark) \ 480 (x)->high_mark = (x)->num_active; \ 481 } while (0) 482#define STATS_INC_ERR(x) ((x)->errors++) 483#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 484#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 485#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 486#define STATS_SET_FREEABLE(x, i) \ 487 do { \ 488 if ((x)->max_freeable < i) \ 489 (x)->max_freeable = i; \ 490 } while (0) 491#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 492#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 493#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 494#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 495#else 496#define STATS_INC_ACTIVE(x) do { } while (0) 497#define STATS_DEC_ACTIVE(x) do { } while (0) 498#define STATS_INC_ALLOCED(x) do { } while (0) 499#define STATS_INC_GROWN(x) do { } while (0) 500#define STATS_ADD_REAPED(x,y) do { } while (0) 501#define STATS_SET_HIGH(x) do { } while (0) 502#define STATS_INC_ERR(x) do { } while (0) 503#define STATS_INC_NODEALLOCS(x) do { } while (0) 504#define STATS_INC_NODEFREES(x) do { } while (0) 505#define STATS_INC_ACOVERFLOW(x) do { } while (0) 506#define STATS_SET_FREEABLE(x, i) do { } while (0) 507#define STATS_INC_ALLOCHIT(x) do { } while (0) 508#define STATS_INC_ALLOCMISS(x) do { } while (0) 509#define STATS_INC_FREEHIT(x) do { } while (0) 510#define STATS_INC_FREEMISS(x) do { } while (0) 511#endif 512 513#if DEBUG 514 515/* 516 * memory layout of objects: 517 * 0 : objp 518 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 519 * the end of an object is aligned with the end of the real 520 * allocation. Catches writes behind the end of the allocation. 521 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 522 * redzone word. 523 * cachep->obj_offset: The real object. 524 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 525 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 526 * [BYTES_PER_WORD long] 527 */ 528static int obj_offset(struct kmem_cache *cachep) 529{ 530 return cachep->obj_offset; 531} 532 533static int obj_size(struct kmem_cache *cachep) 534{ 535 return cachep->obj_size; 536} 537 538static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 539{ 540 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 541 return (unsigned long long*) (objp + obj_offset(cachep) - 542 sizeof(unsigned long long)); 543} 544 545static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 546{ 547 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 548 if (cachep->flags & SLAB_STORE_USER) 549 return (unsigned long long *)(objp + cachep->buffer_size - 550 sizeof(unsigned long long) - 551 REDZONE_ALIGN); 552 return (unsigned long long *) (objp + cachep->buffer_size - 553 sizeof(unsigned long long)); 554} 555 556static void **dbg_userword(struct kmem_cache *cachep, void *objp) 557{ 558 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 559 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 560} 561 562#else 563 564#define obj_offset(x) 0 565#define obj_size(cachep) (cachep->buffer_size) 566#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 567#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 568#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 569 570#endif 571 572/* 573 * Do not go above this order unless 0 objects fit into the slab. 574 */ 575#define BREAK_GFP_ORDER_HI 1 576#define BREAK_GFP_ORDER_LO 0 577static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 578 579/* 580 * Functions for storing/retrieving the cachep and or slab from the page 581 * allocator. These are used to find the slab an obj belongs to. With kfree(), 582 * these are used to find the cache which an obj belongs to. 583 */ 584static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 585{ 586 page->lru.next = (struct list_head *)cache; 587} 588 589static inline struct kmem_cache *page_get_cache(struct page *page) 590{ 591 page = compound_head(page); 592 BUG_ON(!PageSlab(page)); 593 return (struct kmem_cache *)page->lru.next; 594} 595 596static inline void page_set_slab(struct page *page, struct slab *slab) 597{ 598 page->lru.prev = (struct list_head *)slab; 599} 600 601static inline struct slab *page_get_slab(struct page *page) 602{ 603 BUG_ON(!PageSlab(page)); 604 return (struct slab *)page->lru.prev; 605} 606 607static inline struct kmem_cache *virt_to_cache(const void *obj) 608{ 609 struct page *page = virt_to_head_page(obj); 610 return page_get_cache(page); 611} 612 613static inline struct slab *virt_to_slab(const void *obj) 614{ 615 struct page *page = virt_to_head_page(obj); 616 return page_get_slab(page); 617} 618 619static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 620 unsigned int idx) 621{ 622 return slab->s_mem + cache->buffer_size * idx; 623} 624 625/* 626 * We want to avoid an expensive divide : (offset / cache->buffer_size) 627 * Using the fact that buffer_size is a constant for a particular cache, 628 * we can replace (offset / cache->buffer_size) by 629 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 630 */ 631static inline unsigned int obj_to_index(const struct kmem_cache *cache, 632 const struct slab *slab, void *obj) 633{ 634 u32 offset = (obj - slab->s_mem); 635 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 636} 637 638/* 639 * These are the default caches for kmalloc. Custom caches can have other sizes. 640 */ 641struct cache_sizes malloc_sizes[] = { 642#define CACHE(x) { .cs_size = (x) }, 643#include <linux/kmalloc_sizes.h> 644 CACHE(ULONG_MAX) 645#undef CACHE 646}; 647EXPORT_SYMBOL(malloc_sizes); 648 649/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 650struct cache_names { 651 char *name; 652 char *name_dma; 653}; 654 655static struct cache_names __initdata cache_names[] = { 656#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 657#include <linux/kmalloc_sizes.h> 658 {NULL,} 659#undef CACHE 660}; 661 662static struct arraycache_init initarray_cache __initdata = 663 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 664static struct arraycache_init initarray_generic = 665 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 666 667/* internal cache of cache description objs */ 668static struct kmem_cache cache_cache = { 669 .batchcount = 1, 670 .limit = BOOT_CPUCACHE_ENTRIES, 671 .shared = 1, 672 .buffer_size = sizeof(struct kmem_cache), 673 .name = "kmem_cache", 674}; 675 676#define BAD_ALIEN_MAGIC 0x01020304ul 677 678#ifdef CONFIG_LOCKDEP 679 680/* 681 * Slab sometimes uses the kmalloc slabs to store the slab headers 682 * for other slabs "off slab". 683 * The locking for this is tricky in that it nests within the locks 684 * of all other slabs in a few places; to deal with this special 685 * locking we put on-slab caches into a separate lock-class. 686 * 687 * We set lock class for alien array caches which are up during init. 688 * The lock annotation will be lost if all cpus of a node goes down and 689 * then comes back up during hotplug 690 */ 691static struct lock_class_key on_slab_l3_key; 692static struct lock_class_key on_slab_alc_key; 693 694static inline void init_lock_keys(void) 695 696{ 697 int q; 698 struct cache_sizes *s = malloc_sizes; 699 700 while (s->cs_size != ULONG_MAX) { 701 for_each_node(q) { 702 struct array_cache **alc; 703 int r; 704 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 705 if (!l3 || OFF_SLAB(s->cs_cachep)) 706 continue; 707 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 708 alc = l3->alien; 709 /* 710 * FIXME: This check for BAD_ALIEN_MAGIC 711 * should go away when common slab code is taught to 712 * work even without alien caches. 713 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 714 * for alloc_alien_cache, 715 */ 716 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 717 continue; 718 for_each_node(r) { 719 if (alc[r]) 720 lockdep_set_class(&alc[r]->lock, 721 &on_slab_alc_key); 722 } 723 } 724 s++; 725 } 726} 727#else 728static inline void init_lock_keys(void) 729{ 730} 731#endif 732 733/* 734 * 1. Guard access to the cache-chain. 735 * 2. Protect sanity of cpu_online_map against cpu hotplug events 736 */ 737static DEFINE_MUTEX(cache_chain_mutex); 738static struct list_head cache_chain; 739 740/* 741 * chicken and egg problem: delay the per-cpu array allocation 742 * until the general caches are up. 743 */ 744static enum { 745 NONE, 746 PARTIAL_AC, 747 PARTIAL_L3, 748 FULL 749} g_cpucache_up; 750 751/* 752 * used by boot code to determine if it can use slab based allocator 753 */ 754int slab_is_available(void) 755{ 756 return g_cpucache_up == FULL; 757} 758 759static DEFINE_PER_CPU(struct delayed_work, reap_work); 760 761static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 762{ 763 return cachep->array[smp_processor_id()]; 764} 765 766static inline struct kmem_cache *__find_general_cachep(size_t size, 767 gfp_t gfpflags) 768{ 769 struct cache_sizes *csizep = malloc_sizes; 770 771#if DEBUG 772 /* This happens if someone tries to call 773 * kmem_cache_create(), or __kmalloc(), before 774 * the generic caches are initialized. 775 */ 776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 777#endif 778 if (!size) 779 return ZERO_SIZE_PTR; 780 781 while (size > csizep->cs_size) 782 csizep++; 783 784 /* 785 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 786 * has cs_{dma,}cachep==NULL. Thus no special case 787 * for large kmalloc calls required. 788 */ 789#ifdef CONFIG_ZONE_DMA 790 if (unlikely(gfpflags & GFP_DMA)) 791 return csizep->cs_dmacachep; 792#endif 793 return csizep->cs_cachep; 794} 795 796static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 797{ 798 return __find_general_cachep(size, gfpflags); 799} 800 801static size_t slab_mgmt_size(size_t nr_objs, size_t align) 802{ 803 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 804} 805 806/* 807 * Calculate the number of objects and left-over bytes for a given buffer size. 808 */ 809static void cache_estimate(unsigned long gfporder, size_t buffer_size, 810 size_t align, int flags, size_t *left_over, 811 unsigned int *num) 812{ 813 int nr_objs; 814 size_t mgmt_size; 815 size_t slab_size = PAGE_SIZE << gfporder; 816 817 /* 818 * The slab management structure can be either off the slab or 819 * on it. For the latter case, the memory allocated for a 820 * slab is used for: 821 * 822 * - The struct slab 823 * - One kmem_bufctl_t for each object 824 * - Padding to respect alignment of @align 825 * - @buffer_size bytes for each object 826 * 827 * If the slab management structure is off the slab, then the 828 * alignment will already be calculated into the size. Because 829 * the slabs are all pages aligned, the objects will be at the 830 * correct alignment when allocated. 831 */ 832 if (flags & CFLGS_OFF_SLAB) { 833 mgmt_size = 0; 834 nr_objs = slab_size / buffer_size; 835 836 if (nr_objs > SLAB_LIMIT) 837 nr_objs = SLAB_LIMIT; 838 } else { 839 /* 840 * Ignore padding for the initial guess. The padding 841 * is at most @align-1 bytes, and @buffer_size is at 842 * least @align. In the worst case, this result will 843 * be one greater than the number of objects that fit 844 * into the memory allocation when taking the padding 845 * into account. 846 */ 847 nr_objs = (slab_size - sizeof(struct slab)) / 848 (buffer_size + sizeof(kmem_bufctl_t)); 849 850 /* 851 * This calculated number will be either the right 852 * amount, or one greater than what we want. 853 */ 854 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 855 > slab_size) 856 nr_objs--; 857 858 if (nr_objs > SLAB_LIMIT) 859 nr_objs = SLAB_LIMIT; 860 861 mgmt_size = slab_mgmt_size(nr_objs, align); 862 } 863 *num = nr_objs; 864 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 865} 866 867#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 868 869static void __slab_error(const char *function, struct kmem_cache *cachep, 870 char *msg) 871{ 872 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 873 function, cachep->name, msg); 874 dump_stack(); 875} 876 877/* 878 * By default on NUMA we use alien caches to stage the freeing of 879 * objects allocated from other nodes. This causes massive memory 880 * inefficiencies when using fake NUMA setup to split memory into a 881 * large number of small nodes, so it can be disabled on the command 882 * line 883 */ 884 885static int use_alien_caches __read_mostly = 1; 886static int numa_platform __read_mostly = 1; 887static int __init noaliencache_setup(char *s) 888{ 889 use_alien_caches = 0; 890 return 1; 891} 892__setup("noaliencache", noaliencache_setup); 893 894#ifdef CONFIG_NUMA 895/* 896 * Special reaping functions for NUMA systems called from cache_reap(). 897 * These take care of doing round robin flushing of alien caches (containing 898 * objects freed on different nodes from which they were allocated) and the 899 * flushing of remote pcps by calling drain_node_pages. 900 */ 901static DEFINE_PER_CPU(unsigned long, reap_node); 902 903static void init_reap_node(int cpu) 904{ 905 int node; 906 907 node = next_node(cpu_to_node(cpu), node_online_map); 908 if (node == MAX_NUMNODES) 909 node = first_node(node_online_map); 910 911 per_cpu(reap_node, cpu) = node; 912} 913 914static void next_reap_node(void) 915{ 916 int node = __get_cpu_var(reap_node); 917 918 node = next_node(node, node_online_map); 919 if (unlikely(node >= MAX_NUMNODES)) 920 node = first_node(node_online_map); 921 __get_cpu_var(reap_node) = node; 922} 923 924#else 925#define init_reap_node(cpu) do { } while (0) 926#define next_reap_node(void) do { } while (0) 927#endif 928 929/* 930 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 931 * via the workqueue/eventd. 932 * Add the CPU number into the expiration time to minimize the possibility of 933 * the CPUs getting into lockstep and contending for the global cache chain 934 * lock. 935 */ 936static void __cpuinit start_cpu_timer(int cpu) 937{ 938 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 939 940 /* 941 * When this gets called from do_initcalls via cpucache_init(), 942 * init_workqueues() has already run, so keventd will be setup 943 * at that time. 944 */ 945 if (keventd_up() && reap_work->work.func == NULL) { 946 init_reap_node(cpu); 947 INIT_DELAYED_WORK(reap_work, cache_reap); 948 schedule_delayed_work_on(cpu, reap_work, 949 __round_jiffies_relative(HZ, cpu)); 950 } 951} 952 953static struct array_cache *alloc_arraycache(int node, int entries, 954 int batchcount) 955{ 956 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 957 struct array_cache *nc = NULL; 958 959 nc = kmalloc_node(memsize, GFP_KERNEL, node); 960 if (nc) { 961 nc->avail = 0; 962 nc->limit = entries; 963 nc->batchcount = batchcount; 964 nc->touched = 0; 965 spin_lock_init(&nc->lock); 966 } 967 return nc; 968} 969 970/* 971 * Transfer objects in one arraycache to another. 972 * Locking must be handled by the caller. 973 * 974 * Return the number of entries transferred. 975 */ 976static int transfer_objects(struct array_cache *to, 977 struct array_cache *from, unsigned int max) 978{ 979 /* Figure out how many entries to transfer */ 980 int nr = min(min(from->avail, max), to->limit - to->avail); 981 982 if (!nr) 983 return 0; 984 985 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 986 sizeof(void *) *nr); 987 988 from->avail -= nr; 989 to->avail += nr; 990 to->touched = 1; 991 return nr; 992} 993 994#ifndef CONFIG_NUMA 995 996#define drain_alien_cache(cachep, alien) do { } while (0) 997#define reap_alien(cachep, l3) do { } while (0) 998 999static inline struct array_cache **alloc_alien_cache(int node, int limit) 1000{ 1001 return (struct array_cache **)BAD_ALIEN_MAGIC; 1002} 1003 1004static inline void free_alien_cache(struct array_cache **ac_ptr) 1005{ 1006} 1007 1008static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1009{ 1010 return 0; 1011} 1012 1013static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1014 gfp_t flags) 1015{ 1016 return NULL; 1017} 1018 1019static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1020 gfp_t flags, int nodeid) 1021{ 1022 return NULL; 1023} 1024 1025#else /* CONFIG_NUMA */ 1026 1027static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1028static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1029 1030static struct array_cache **alloc_alien_cache(int node, int limit) 1031{ 1032 struct array_cache **ac_ptr; 1033 int memsize = sizeof(void *) * nr_node_ids; 1034 int i; 1035 1036 if (limit > 1) 1037 limit = 12; 1038 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1039 if (ac_ptr) { 1040 for_each_node(i) { 1041 if (i == node || !node_online(i)) { 1042 ac_ptr[i] = NULL; 1043 continue; 1044 } 1045 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1046 if (!ac_ptr[i]) { 1047 for (i--; i <= 0; i--) 1048 kfree(ac_ptr[i]); 1049 kfree(ac_ptr); 1050 return NULL; 1051 } 1052 } 1053 } 1054 return ac_ptr; 1055} 1056 1057static void free_alien_cache(struct array_cache **ac_ptr) 1058{ 1059 int i; 1060 1061 if (!ac_ptr) 1062 return; 1063 for_each_node(i) 1064 kfree(ac_ptr[i]); 1065 kfree(ac_ptr); 1066} 1067 1068static void __drain_alien_cache(struct kmem_cache *cachep, 1069 struct array_cache *ac, int node) 1070{ 1071 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1072 1073 if (ac->avail) { 1074 spin_lock(&rl3->list_lock); 1075 /* 1076 * Stuff objects into the remote nodes shared array first. 1077 * That way we could avoid the overhead of putting the objects 1078 * into the free lists and getting them back later. 1079 */ 1080 if (rl3->shared) 1081 transfer_objects(rl3->shared, ac, ac->limit); 1082 1083 free_block(cachep, ac->entry, ac->avail, node); 1084 ac->avail = 0; 1085 spin_unlock(&rl3->list_lock); 1086 } 1087} 1088 1089/* 1090 * Called from cache_reap() to regularly drain alien caches round robin. 1091 */ 1092static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1093{ 1094 int node = __get_cpu_var(reap_node); 1095 1096 if (l3->alien) { 1097 struct array_cache *ac = l3->alien[node]; 1098 1099 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1100 __drain_alien_cache(cachep, ac, node); 1101 spin_unlock_irq(&ac->lock); 1102 } 1103 } 1104} 1105 1106static void drain_alien_cache(struct kmem_cache *cachep, 1107 struct array_cache **alien) 1108{ 1109 int i = 0; 1110 struct array_cache *ac; 1111 unsigned long flags; 1112 1113 for_each_online_node(i) { 1114 ac = alien[i]; 1115 if (ac) { 1116 spin_lock_irqsave(&ac->lock, flags); 1117 __drain_alien_cache(cachep, ac, i); 1118 spin_unlock_irqrestore(&ac->lock, flags); 1119 } 1120 } 1121} 1122 1123static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1124{ 1125 struct slab *slabp = virt_to_slab(objp); 1126 int nodeid = slabp->nodeid; 1127 struct kmem_list3 *l3; 1128 struct array_cache *alien = NULL; 1129 int node; 1130 1131 node = numa_node_id(); 1132 1133 /* 1134 * Make sure we are not freeing a object from another node to the array 1135 * cache on this cpu. 1136 */ 1137 if (likely(slabp->nodeid == node)) 1138 return 0; 1139 1140 l3 = cachep->nodelists[node]; 1141 STATS_INC_NODEFREES(cachep); 1142 if (l3->alien && l3->alien[nodeid]) { 1143 alien = l3->alien[nodeid]; 1144 spin_lock(&alien->lock); 1145 if (unlikely(alien->avail == alien->limit)) { 1146 STATS_INC_ACOVERFLOW(cachep); 1147 __drain_alien_cache(cachep, alien, nodeid); 1148 } 1149 alien->entry[alien->avail++] = objp; 1150 spin_unlock(&alien->lock); 1151 } else { 1152 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1153 free_block(cachep, &objp, 1, nodeid); 1154 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1155 } 1156 return 1; 1157} 1158#endif 1159 1160static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1161 unsigned long action, void *hcpu) 1162{ 1163 long cpu = (long)hcpu; 1164 struct kmem_cache *cachep; 1165 struct kmem_list3 *l3 = NULL; 1166 int node = cpu_to_node(cpu); 1167 const int memsize = sizeof(struct kmem_list3); 1168 1169 switch (action) { 1170 case CPU_LOCK_ACQUIRE: 1171 mutex_lock(&cache_chain_mutex); 1172 break; 1173 case CPU_UP_PREPARE: 1174 case CPU_UP_PREPARE_FROZEN: 1175 /* 1176 * We need to do this right in the beginning since 1177 * alloc_arraycache's are going to use this list. 1178 * kmalloc_node allows us to add the slab to the right 1179 * kmem_list3 and not this cpu's kmem_list3 1180 */ 1181 1182 list_for_each_entry(cachep, &cache_chain, next) { 1183 /* 1184 * Set up the size64 kmemlist for cpu before we can 1185 * begin anything. Make sure some other cpu on this 1186 * node has not already allocated this 1187 */ 1188 if (!cachep->nodelists[node]) { 1189 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1190 if (!l3) 1191 goto bad; 1192 kmem_list3_init(l3); 1193 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1194 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1195 1196 /* 1197 * The l3s don't come and go as CPUs come and 1198 * go. cache_chain_mutex is sufficient 1199 * protection here. 1200 */ 1201 cachep->nodelists[node] = l3; 1202 } 1203 1204 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1205 cachep->nodelists[node]->free_limit = 1206 (1 + nr_cpus_node(node)) * 1207 cachep->batchcount + cachep->num; 1208 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1209 } 1210 1211 /* 1212 * Now we can go ahead with allocating the shared arrays and 1213 * array caches 1214 */ 1215 list_for_each_entry(cachep, &cache_chain, next) { 1216 struct array_cache *nc; 1217 struct array_cache *shared = NULL; 1218 struct array_cache **alien = NULL; 1219 1220 nc = alloc_arraycache(node, cachep->limit, 1221 cachep->batchcount); 1222 if (!nc) 1223 goto bad; 1224 if (cachep->shared) { 1225 shared = alloc_arraycache(node, 1226 cachep->shared * cachep->batchcount, 1227 0xbaadf00d); 1228 if (!shared) 1229 goto bad; 1230 } 1231 if (use_alien_caches) { 1232 alien = alloc_alien_cache(node, cachep->limit); 1233 if (!alien) 1234 goto bad; 1235 } 1236 cachep->array[cpu] = nc; 1237 l3 = cachep->nodelists[node]; 1238 BUG_ON(!l3); 1239 1240 spin_lock_irq(&l3->list_lock); 1241 if (!l3->shared) { 1242 /* 1243 * We are serialised from CPU_DEAD or 1244 * CPU_UP_CANCELLED by the cpucontrol lock 1245 */ 1246 l3->shared = shared; 1247 shared = NULL; 1248 } 1249#ifdef CONFIG_NUMA 1250 if (!l3->alien) { 1251 l3->alien = alien; 1252 alien = NULL; 1253 } 1254#endif 1255 spin_unlock_irq(&l3->list_lock); 1256 kfree(shared); 1257 free_alien_cache(alien); 1258 } 1259 break; 1260 case CPU_ONLINE: 1261 case CPU_ONLINE_FROZEN: 1262 start_cpu_timer(cpu); 1263 break; 1264#ifdef CONFIG_HOTPLUG_CPU 1265 case CPU_DOWN_PREPARE: 1266 case CPU_DOWN_PREPARE_FROZEN: 1267 /* 1268 * Shutdown cache reaper. Note that the cache_chain_mutex is 1269 * held so that if cache_reap() is invoked it cannot do 1270 * anything expensive but will only modify reap_work 1271 * and reschedule the timer. 1272 */ 1273 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1274 /* Now the cache_reaper is guaranteed to be not running. */ 1275 per_cpu(reap_work, cpu).work.func = NULL; 1276 break; 1277 case CPU_DOWN_FAILED: 1278 case CPU_DOWN_FAILED_FROZEN: 1279 start_cpu_timer(cpu); 1280 break; 1281 case CPU_DEAD: 1282 case CPU_DEAD_FROZEN: 1283 /* 1284 * Even if all the cpus of a node are down, we don't free the 1285 * kmem_list3 of any cache. This to avoid a race between 1286 * cpu_down, and a kmalloc allocation from another cpu for 1287 * memory from the node of the cpu going down. The list3 1288 * structure is usually allocated from kmem_cache_create() and 1289 * gets destroyed at kmem_cache_destroy(). 1290 */ 1291 /* fall thru */ 1292#endif 1293 case CPU_UP_CANCELED: 1294 case CPU_UP_CANCELED_FROZEN: 1295 list_for_each_entry(cachep, &cache_chain, next) { 1296 struct array_cache *nc; 1297 struct array_cache *shared; 1298 struct array_cache **alien; 1299 cpumask_t mask; 1300 1301 mask = node_to_cpumask(node); 1302 /* cpu is dead; no one can alloc from it. */ 1303 nc = cachep->array[cpu]; 1304 cachep->array[cpu] = NULL; 1305 l3 = cachep->nodelists[node]; 1306 1307 if (!l3) 1308 goto free_array_cache; 1309 1310 spin_lock_irq(&l3->list_lock); 1311 1312 /* Free limit for this kmem_list3 */ 1313 l3->free_limit -= cachep->batchcount; 1314 if (nc) 1315 free_block(cachep, nc->entry, nc->avail, node); 1316 1317 if (!cpus_empty(mask)) { 1318 spin_unlock_irq(&l3->list_lock); 1319 goto free_array_cache; 1320 } 1321 1322 shared = l3->shared; 1323 if (shared) { 1324 free_block(cachep, shared->entry, 1325 shared->avail, node); 1326 l3->shared = NULL; 1327 } 1328 1329 alien = l3->alien; 1330 l3->alien = NULL; 1331 1332 spin_unlock_irq(&l3->list_lock); 1333 1334 kfree(shared); 1335 if (alien) { 1336 drain_alien_cache(cachep, alien); 1337 free_alien_cache(alien); 1338 } 1339free_array_cache: 1340 kfree(nc); 1341 } 1342 /* 1343 * In the previous loop, all the objects were freed to 1344 * the respective cache's slabs, now we can go ahead and 1345 * shrink each nodelist to its limit. 1346 */ 1347 list_for_each_entry(cachep, &cache_chain, next) { 1348 l3 = cachep->nodelists[node]; 1349 if (!l3) 1350 continue; 1351 drain_freelist(cachep, l3, l3->free_objects); 1352 } 1353 break; 1354 case CPU_LOCK_RELEASE: 1355 mutex_unlock(&cache_chain_mutex); 1356 break; 1357 } 1358 return NOTIFY_OK; 1359bad: 1360 return NOTIFY_BAD; 1361} 1362 1363static struct notifier_block __cpuinitdata cpucache_notifier = { 1364 &cpuup_callback, NULL, 0 1365}; 1366 1367/* 1368 * swap the static kmem_list3 with kmalloced memory 1369 */ 1370static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1371 int nodeid) 1372{ 1373 struct kmem_list3 *ptr; 1374 1375 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1376 BUG_ON(!ptr); 1377 1378 local_irq_disable(); 1379 memcpy(ptr, list, sizeof(struct kmem_list3)); 1380 /* 1381 * Do not assume that spinlocks can be initialized via memcpy: 1382 */ 1383 spin_lock_init(&ptr->list_lock); 1384 1385 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1386 cachep->nodelists[nodeid] = ptr; 1387 local_irq_enable(); 1388} 1389 1390/* 1391 * Initialisation. Called after the page allocator have been initialised and 1392 * before smp_init(). 1393 */ 1394void __init kmem_cache_init(void) 1395{ 1396 size_t left_over; 1397 struct cache_sizes *sizes; 1398 struct cache_names *names; 1399 int i; 1400 int order; 1401 int node; 1402 1403 if (num_possible_nodes() == 1) { 1404 use_alien_caches = 0; 1405 numa_platform = 0; 1406 } 1407 1408 for (i = 0; i < NUM_INIT_LISTS; i++) { 1409 kmem_list3_init(&initkmem_list3[i]); 1410 if (i < MAX_NUMNODES) 1411 cache_cache.nodelists[i] = NULL; 1412 } 1413 1414 /* 1415 * Fragmentation resistance on low memory - only use bigger 1416 * page orders on machines with more than 32MB of memory. 1417 */ 1418 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1419 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1420 1421 /* Bootstrap is tricky, because several objects are allocated 1422 * from caches that do not exist yet: 1423 * 1) initialize the cache_cache cache: it contains the struct 1424 * kmem_cache structures of all caches, except cache_cache itself: 1425 * cache_cache is statically allocated. 1426 * Initially an __init data area is used for the head array and the 1427 * kmem_list3 structures, it's replaced with a kmalloc allocated 1428 * array at the end of the bootstrap. 1429 * 2) Create the first kmalloc cache. 1430 * The struct kmem_cache for the new cache is allocated normally. 1431 * An __init data area is used for the head array. 1432 * 3) Create the remaining kmalloc caches, with minimally sized 1433 * head arrays. 1434 * 4) Replace the __init data head arrays for cache_cache and the first 1435 * kmalloc cache with kmalloc allocated arrays. 1436 * 5) Replace the __init data for kmem_list3 for cache_cache and 1437 * the other cache's with kmalloc allocated memory. 1438 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1439 */ 1440 1441 node = numa_node_id(); 1442 1443 /* 1) create the cache_cache */ 1444 INIT_LIST_HEAD(&cache_chain); 1445 list_add(&cache_cache.next, &cache_chain); 1446 cache_cache.colour_off = cache_line_size(); 1447 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1448 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1449 1450 /* 1451 * struct kmem_cache size depends on nr_node_ids, which 1452 * can be less than MAX_NUMNODES. 1453 */ 1454 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + 1455 nr_node_ids * sizeof(struct kmem_list3 *); 1456#if DEBUG 1457 cache_cache.obj_size = cache_cache.buffer_size; 1458#endif 1459 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1460 cache_line_size()); 1461 cache_cache.reciprocal_buffer_size = 1462 reciprocal_value(cache_cache.buffer_size); 1463 1464 for (order = 0; order < MAX_ORDER; order++) { 1465 cache_estimate(order, cache_cache.buffer_size, 1466 cache_line_size(), 0, &left_over, &cache_cache.num); 1467 if (cache_cache.num) 1468 break; 1469 } 1470 BUG_ON(!cache_cache.num); 1471 cache_cache.gfporder = order; 1472 cache_cache.colour = left_over / cache_cache.colour_off; 1473 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1474 sizeof(struct slab), cache_line_size()); 1475 1476 /* 2+3) create the kmalloc caches */ 1477 sizes = malloc_sizes; 1478 names = cache_names; 1479 1480 /* 1481 * Initialize the caches that provide memory for the array cache and the 1482 * kmem_list3 structures first. Without this, further allocations will 1483 * bug. 1484 */ 1485 1486 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1487 sizes[INDEX_AC].cs_size, 1488 ARCH_KMALLOC_MINALIGN, 1489 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1490 NULL); 1491 1492 if (INDEX_AC != INDEX_L3) { 1493 sizes[INDEX_L3].cs_cachep = 1494 kmem_cache_create(names[INDEX_L3].name, 1495 sizes[INDEX_L3].cs_size, 1496 ARCH_KMALLOC_MINALIGN, 1497 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1498 NULL); 1499 } 1500 1501 slab_early_init = 0; 1502 1503 while (sizes->cs_size != ULONG_MAX) { 1504 /* 1505 * For performance, all the general caches are L1 aligned. 1506 * This should be particularly beneficial on SMP boxes, as it 1507 * eliminates "false sharing". 1508 * Note for systems short on memory removing the alignment will 1509 * allow tighter packing of the smaller caches. 1510 */ 1511 if (!sizes->cs_cachep) { 1512 sizes->cs_cachep = kmem_cache_create(names->name, 1513 sizes->cs_size, 1514 ARCH_KMALLOC_MINALIGN, 1515 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1516 NULL); 1517 } 1518#ifdef CONFIG_ZONE_DMA 1519 sizes->cs_dmacachep = kmem_cache_create( 1520 names->name_dma, 1521 sizes->cs_size, 1522 ARCH_KMALLOC_MINALIGN, 1523 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1524 SLAB_PANIC, 1525 NULL); 1526#endif 1527 sizes++; 1528 names++; 1529 } 1530 /* 4) Replace the bootstrap head arrays */ 1531 { 1532 struct array_cache *ptr; 1533 1534 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1535 1536 local_irq_disable(); 1537 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1538 memcpy(ptr, cpu_cache_get(&cache_cache), 1539 sizeof(struct arraycache_init)); 1540 /* 1541 * Do not assume that spinlocks can be initialized via memcpy: 1542 */ 1543 spin_lock_init(&ptr->lock); 1544 1545 cache_cache.array[smp_processor_id()] = ptr; 1546 local_irq_enable(); 1547 1548 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1549 1550 local_irq_disable(); 1551 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1552 != &initarray_generic.cache); 1553 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1554 sizeof(struct arraycache_init)); 1555 /* 1556 * Do not assume that spinlocks can be initialized via memcpy: 1557 */ 1558 spin_lock_init(&ptr->lock); 1559 1560 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1561 ptr; 1562 local_irq_enable(); 1563 } 1564 /* 5) Replace the bootstrap kmem_list3's */ 1565 { 1566 int nid; 1567 1568 /* Replace the static kmem_list3 structures for the boot cpu */ 1569 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); 1570 1571 for_each_node_state(nid, N_NORMAL_MEMORY) { 1572 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1573 &initkmem_list3[SIZE_AC + nid], nid); 1574 1575 if (INDEX_AC != INDEX_L3) { 1576 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1577 &initkmem_list3[SIZE_L3 + nid], nid); 1578 } 1579 } 1580 } 1581 1582 /* 6) resize the head arrays to their final sizes */ 1583 { 1584 struct kmem_cache *cachep; 1585 mutex_lock(&cache_chain_mutex); 1586 list_for_each_entry(cachep, &cache_chain, next) 1587 if (enable_cpucache(cachep)) 1588 BUG(); 1589 mutex_unlock(&cache_chain_mutex); 1590 } 1591 1592 /* Annotate slab for lockdep -- annotate the malloc caches */ 1593 init_lock_keys(); 1594 1595 1596 /* Done! */ 1597 g_cpucache_up = FULL; 1598 1599 /* 1600 * Register a cpu startup notifier callback that initializes 1601 * cpu_cache_get for all new cpus 1602 */ 1603 register_cpu_notifier(&cpucache_notifier); 1604 1605 /* 1606 * The reap timers are started later, with a module init call: That part 1607 * of the kernel is not yet operational. 1608 */ 1609} 1610 1611static int __init cpucache_init(void) 1612{ 1613 int cpu; 1614 1615 /* 1616 * Register the timers that return unneeded pages to the page allocator 1617 */ 1618 for_each_online_cpu(cpu) 1619 start_cpu_timer(cpu); 1620 return 0; 1621} 1622__initcall(cpucache_init); 1623 1624/* 1625 * Interface to system's page allocator. No need to hold the cache-lock. 1626 * 1627 * If we requested dmaable memory, we will get it. Even if we 1628 * did not request dmaable memory, we might get it, but that 1629 * would be relatively rare and ignorable. 1630 */ 1631static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1632{ 1633 struct page *page; 1634 int nr_pages; 1635 int i; 1636 1637#ifndef CONFIG_MMU 1638 /* 1639 * Nommu uses slab's for process anonymous memory allocations, and thus 1640 * requires __GFP_COMP to properly refcount higher order allocations 1641 */ 1642 flags |= __GFP_COMP; 1643#endif 1644 1645 flags |= cachep->gfpflags; 1646 1647 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1648 if (!page) 1649 return NULL; 1650 1651 nr_pages = (1 << cachep->gfporder); 1652 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1653 add_zone_page_state(page_zone(page), 1654 NR_SLAB_RECLAIMABLE, nr_pages); 1655 else 1656 add_zone_page_state(page_zone(page), 1657 NR_SLAB_UNRECLAIMABLE, nr_pages); 1658 for (i = 0; i < nr_pages; i++) 1659 __SetPageSlab(page + i); 1660 return page_address(page); 1661} 1662 1663/* 1664 * Interface to system's page release. 1665 */ 1666static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1667{ 1668 unsigned long i = (1 << cachep->gfporder); 1669 struct page *page = virt_to_page(addr); 1670 const unsigned long nr_freed = i; 1671 1672 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1673 sub_zone_page_state(page_zone(page), 1674 NR_SLAB_RECLAIMABLE, nr_freed); 1675 else 1676 sub_zone_page_state(page_zone(page), 1677 NR_SLAB_UNRECLAIMABLE, nr_freed); 1678 while (i--) { 1679 BUG_ON(!PageSlab(page)); 1680 __ClearPageSlab(page); 1681 page++; 1682 } 1683 if (current->reclaim_state) 1684 current->reclaim_state->reclaimed_slab += nr_freed; 1685 free_pages((unsigned long)addr, cachep->gfporder); 1686} 1687 1688static void kmem_rcu_free(struct rcu_head *head) 1689{ 1690 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1691 struct kmem_cache *cachep = slab_rcu->cachep; 1692 1693 kmem_freepages(cachep, slab_rcu->addr); 1694 if (OFF_SLAB(cachep)) 1695 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1696} 1697 1698#if DEBUG 1699 1700#ifdef CONFIG_DEBUG_PAGEALLOC 1701static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1702 unsigned long caller) 1703{ 1704 int size = obj_size(cachep); 1705 1706 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1707 1708 if (size < 5 * sizeof(unsigned long)) 1709 return; 1710 1711 *addr++ = 0x12345678; 1712 *addr++ = caller; 1713 *addr++ = smp_processor_id(); 1714 size -= 3 * sizeof(unsigned long); 1715 { 1716 unsigned long *sptr = &caller; 1717 unsigned long svalue; 1718 1719 while (!kstack_end(sptr)) { 1720 svalue = *sptr++; 1721 if (kernel_text_address(svalue)) { 1722 *addr++ = svalue; 1723 size -= sizeof(unsigned long); 1724 if (size <= sizeof(unsigned long)) 1725 break; 1726 } 1727 } 1728 1729 } 1730 *addr++ = 0x87654321; 1731} 1732#endif 1733 1734static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1735{ 1736 int size = obj_size(cachep); 1737 addr = &((char *)addr)[obj_offset(cachep)]; 1738 1739 memset(addr, val, size); 1740 *(unsigned char *)(addr + size - 1) = POISON_END; 1741} 1742 1743static void dump_line(char *data, int offset, int limit) 1744{ 1745 int i; 1746 unsigned char error = 0; 1747 int bad_count = 0; 1748 1749 printk(KERN_ERR "%03x:", offset); 1750 for (i = 0; i < limit; i++) { 1751 if (data[offset + i] != POISON_FREE) { 1752 error = data[offset + i]; 1753 bad_count++; 1754 } 1755 printk(" %02x", (unsigned char)data[offset + i]); 1756 } 1757 printk("\n"); 1758 1759 if (bad_count == 1) { 1760 error ^= POISON_FREE; 1761 if (!(error & (error - 1))) { 1762 printk(KERN_ERR "Single bit error detected. Probably " 1763 "bad RAM.\n"); 1764#ifdef CONFIG_X86 1765 printk(KERN_ERR "Run memtest86+ or a similar memory " 1766 "test tool.\n"); 1767#else 1768 printk(KERN_ERR "Run a memory test tool.\n"); 1769#endif 1770 } 1771 } 1772} 1773#endif 1774 1775#if DEBUG 1776 1777static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1778{ 1779 int i, size; 1780 char *realobj; 1781 1782 if (cachep->flags & SLAB_RED_ZONE) { 1783 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1784 *dbg_redzone1(cachep, objp), 1785 *dbg_redzone2(cachep, objp)); 1786 } 1787 1788 if (cachep->flags & SLAB_STORE_USER) { 1789 printk(KERN_ERR "Last user: [<%p>]", 1790 *dbg_userword(cachep, objp)); 1791 print_symbol("(%s)", 1792 (unsigned long)*dbg_userword(cachep, objp)); 1793 printk("\n"); 1794 } 1795 realobj = (char *)objp + obj_offset(cachep); 1796 size = obj_size(cachep); 1797 for (i = 0; i < size && lines; i += 16, lines--) { 1798 int limit; 1799 limit = 16; 1800 if (i + limit > size) 1801 limit = size - i; 1802 dump_line(realobj, i, limit); 1803 } 1804} 1805 1806static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1807{ 1808 char *realobj; 1809 int size, i; 1810 int lines = 0; 1811 1812 realobj = (char *)objp + obj_offset(cachep); 1813 size = obj_size(cachep); 1814 1815 for (i = 0; i < size; i++) { 1816 char exp = POISON_FREE; 1817 if (i == size - 1) 1818 exp = POISON_END; 1819 if (realobj[i] != exp) { 1820 int limit; 1821 /* Mismatch ! */ 1822 /* Print header */ 1823 if (lines == 0) { 1824 printk(KERN_ERR 1825 "Slab corruption: %s start=%p, len=%d\n", 1826 cachep->name, realobj, size); 1827 print_objinfo(cachep, objp, 0); 1828 } 1829 /* Hexdump the affected line */ 1830 i = (i / 16) * 16; 1831 limit = 16; 1832 if (i + limit > size) 1833 limit = size - i; 1834 dump_line(realobj, i, limit); 1835 i += 16; 1836 lines++; 1837 /* Limit to 5 lines */ 1838 if (lines > 5) 1839 break; 1840 } 1841 } 1842 if (lines != 0) { 1843 /* Print some data about the neighboring objects, if they 1844 * exist: 1845 */ 1846 struct slab *slabp = virt_to_slab(objp); 1847 unsigned int objnr; 1848 1849 objnr = obj_to_index(cachep, slabp, objp); 1850 if (objnr) { 1851 objp = index_to_obj(cachep, slabp, objnr - 1); 1852 realobj = (char *)objp + obj_offset(cachep); 1853 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1854 realobj, size); 1855 print_objinfo(cachep, objp, 2); 1856 } 1857 if (objnr + 1 < cachep->num) { 1858 objp = index_to_obj(cachep, slabp, objnr + 1); 1859 realobj = (char *)objp + obj_offset(cachep); 1860 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1861 realobj, size); 1862 print_objinfo(cachep, objp, 2); 1863 } 1864 } 1865} 1866#endif 1867 1868#if DEBUG 1869/** 1870 * slab_destroy_objs - destroy a slab and its objects 1871 * @cachep: cache pointer being destroyed 1872 * @slabp: slab pointer being destroyed 1873 * 1874 * Call the registered destructor for each object in a slab that is being 1875 * destroyed. 1876 */ 1877static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1878{ 1879 int i; 1880 for (i = 0; i < cachep->num; i++) { 1881 void *objp = index_to_obj(cachep, slabp, i); 1882 1883 if (cachep->flags & SLAB_POISON) { 1884#ifdef CONFIG_DEBUG_PAGEALLOC 1885 if (cachep->buffer_size % PAGE_SIZE == 0 && 1886 OFF_SLAB(cachep)) 1887 kernel_map_pages(virt_to_page(objp), 1888 cachep->buffer_size / PAGE_SIZE, 1); 1889 else 1890 check_poison_obj(cachep, objp); 1891#else 1892 check_poison_obj(cachep, objp); 1893#endif 1894 } 1895 if (cachep->flags & SLAB_RED_ZONE) { 1896 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1897 slab_error(cachep, "start of a freed object " 1898 "was overwritten"); 1899 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1900 slab_error(cachep, "end of a freed object " 1901 "was overwritten"); 1902 } 1903 } 1904} 1905#else 1906static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1907{ 1908} 1909#endif 1910 1911/** 1912 * slab_destroy - destroy and release all objects in a slab 1913 * @cachep: cache pointer being destroyed 1914 * @slabp: slab pointer being destroyed 1915 * 1916 * Destroy all the objs in a slab, and release the mem back to the system. 1917 * Before calling the slab must have been unlinked from the cache. The 1918 * cache-lock is not held/needed. 1919 */ 1920static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1921{ 1922 void *addr = slabp->s_mem - slabp->colouroff; 1923 1924 slab_destroy_objs(cachep, slabp); 1925 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1926 struct slab_rcu *slab_rcu; 1927 1928 slab_rcu = (struct slab_rcu *)slabp; 1929 slab_rcu->cachep = cachep; 1930 slab_rcu->addr = addr; 1931 call_rcu(&slab_rcu->head, kmem_rcu_free); 1932 } else { 1933 kmem_freepages(cachep, addr); 1934 if (OFF_SLAB(cachep)) 1935 kmem_cache_free(cachep->slabp_cache, slabp); 1936 } 1937} 1938 1939/* 1940 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1941 * size of kmem_list3. 1942 */ 1943static void __init set_up_list3s(struct kmem_cache *cachep, int index) 1944{ 1945 int node; 1946 1947 for_each_node_state(node, N_NORMAL_MEMORY) { 1948 cachep->nodelists[node] = &initkmem_list3[index + node]; 1949 cachep->nodelists[node]->next_reap = jiffies + 1950 REAPTIMEOUT_LIST3 + 1951 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1952 } 1953} 1954 1955static void __kmem_cache_destroy(struct kmem_cache *cachep) 1956{ 1957 int i; 1958 struct kmem_list3 *l3; 1959 1960 for_each_online_cpu(i) 1961 kfree(cachep->array[i]); 1962 1963 /* NUMA: free the list3 structures */ 1964 for_each_online_node(i) { 1965 l3 = cachep->nodelists[i]; 1966 if (l3) { 1967 kfree(l3->shared); 1968 free_alien_cache(l3->alien); 1969 kfree(l3); 1970 } 1971 } 1972 kmem_cache_free(&cache_cache, cachep); 1973} 1974 1975 1976/** 1977 * calculate_slab_order - calculate size (page order) of slabs 1978 * @cachep: pointer to the cache that is being created 1979 * @size: size of objects to be created in this cache. 1980 * @align: required alignment for the objects. 1981 * @flags: slab allocation flags 1982 * 1983 * Also calculates the number of objects per slab. 1984 * 1985 * This could be made much more intelligent. For now, try to avoid using 1986 * high order pages for slabs. When the gfp() functions are more friendly 1987 * towards high-order requests, this should be changed. 1988 */ 1989static size_t calculate_slab_order(struct kmem_cache *cachep, 1990 size_t size, size_t align, unsigned long flags) 1991{ 1992 unsigned long offslab_limit; 1993 size_t left_over = 0; 1994 int gfporder; 1995 1996 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1997 unsigned int num; 1998 size_t remainder; 1999 2000 cache_estimate(gfporder, size, align, flags, &remainder, &num); 2001 if (!num) 2002 continue; 2003 2004 if (flags & CFLGS_OFF_SLAB) { 2005 /* 2006 * Max number of objs-per-slab for caches which 2007 * use off-slab slabs. Needed to avoid a possible 2008 * looping condition in cache_grow(). 2009 */ 2010 offslab_limit = size - sizeof(struct slab); 2011 offslab_limit /= sizeof(kmem_bufctl_t); 2012 2013 if (num > offslab_limit) 2014 break; 2015 } 2016 2017 /* Found something acceptable - save it away */ 2018 cachep->num = num; 2019 cachep->gfporder = gfporder; 2020 left_over = remainder; 2021 2022 /* 2023 * A VFS-reclaimable slab tends to have most allocations 2024 * as GFP_NOFS and we really don't want to have to be allocating 2025 * higher-order pages when we are unable to shrink dcache. 2026 */ 2027 if (flags & SLAB_RECLAIM_ACCOUNT) 2028 break; 2029 2030 /* 2031 * Large number of objects is good, but very large slabs are 2032 * currently bad for the gfp()s. 2033 */ 2034 if (gfporder >= slab_break_gfp_order) 2035 break; 2036 2037 /* 2038 * Acceptable internal fragmentation? 2039 */ 2040 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2041 break; 2042 } 2043 return left_over; 2044} 2045 2046static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) 2047{ 2048 if (g_cpucache_up == FULL) 2049 return enable_cpucache(cachep); 2050 2051 if (g_cpucache_up == NONE) { 2052 /* 2053 * Note: the first kmem_cache_create must create the cache 2054 * that's used by kmalloc(24), otherwise the creation of 2055 * further caches will BUG(). 2056 */ 2057 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2058 2059 /* 2060 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2061 * the first cache, then we need to set up all its list3s, 2062 * otherwise the creation of further caches will BUG(). 2063 */ 2064 set_up_list3s(cachep, SIZE_AC); 2065 if (INDEX_AC == INDEX_L3) 2066 g_cpucache_up = PARTIAL_L3; 2067 else 2068 g_cpucache_up = PARTIAL_AC; 2069 } else { 2070 cachep->array[smp_processor_id()] = 2071 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2072 2073 if (g_cpucache_up == PARTIAL_AC) { 2074 set_up_list3s(cachep, SIZE_L3); 2075 g_cpucache_up = PARTIAL_L3; 2076 } else { 2077 int node; 2078 for_each_node_state(node, N_NORMAL_MEMORY) { 2079 cachep->nodelists[node] = 2080 kmalloc_node(sizeof(struct kmem_list3), 2081 GFP_KERNEL, node); 2082 BUG_ON(!cachep->nodelists[node]); 2083 kmem_list3_init(cachep->nodelists[node]); 2084 } 2085 } 2086 } 2087 cachep->nodelists[numa_node_id()]->next_reap = 2088 jiffies + REAPTIMEOUT_LIST3 + 2089 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2090 2091 cpu_cache_get(cachep)->avail = 0; 2092 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2093 cpu_cache_get(cachep)->batchcount = 1; 2094 cpu_cache_get(cachep)->touched = 0; 2095 cachep->batchcount = 1; 2096 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2097 return 0; 2098} 2099 2100/** 2101 * kmem_cache_create - Create a cache. 2102 * @name: A string which is used in /proc/slabinfo to identify this cache. 2103 * @size: The size of objects to be created in this cache. 2104 * @align: The required alignment for the objects. 2105 * @flags: SLAB flags 2106 * @ctor: A constructor for the objects. 2107 * 2108 * Returns a ptr to the cache on success, NULL on failure. 2109 * Cannot be called within a int, but can be interrupted. 2110 * The @ctor is run when new pages are allocated by the cache. 2111 * 2112 * @name must be valid until the cache is destroyed. This implies that 2113 * the module calling this has to destroy the cache before getting unloaded. 2114 * 2115 * The flags are 2116 * 2117 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2118 * to catch references to uninitialised memory. 2119 * 2120 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2121 * for buffer overruns. 2122 * 2123 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2124 * cacheline. This can be beneficial if you're counting cycles as closely 2125 * as davem. 2126 */ 2127struct kmem_cache * 2128kmem_cache_create (const char *name, size_t size, size_t align, 2129 unsigned long flags, 2130 void (*ctor)(void*, struct kmem_cache *, unsigned long)) 2131{ 2132 size_t left_over, slab_size, ralign; 2133 struct kmem_cache *cachep = NULL, *pc; 2134 2135 /* 2136 * Sanity checks... these are all serious usage bugs. 2137 */ 2138 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2139 size > KMALLOC_MAX_SIZE) { 2140 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2141 name); 2142 BUG(); 2143 } 2144 2145 /* 2146 * We use cache_chain_mutex to ensure a consistent view of 2147 * cpu_online_map as well. Please see cpuup_callback 2148 */ 2149 mutex_lock(&cache_chain_mutex); 2150 2151 list_for_each_entry(pc, &cache_chain, next) { 2152 char tmp; 2153 int res; 2154 2155 /* 2156 * This happens when the module gets unloaded and doesn't 2157 * destroy its slab cache and no-one else reuses the vmalloc 2158 * area of the module. Print a warning. 2159 */ 2160 res = probe_kernel_address(pc->name, tmp); 2161 if (res) { 2162 printk(KERN_ERR 2163 "SLAB: cache with size %d has lost its name\n", 2164 pc->buffer_size); 2165 continue; 2166 } 2167 2168 if (!strcmp(pc->name, name)) { 2169 printk(KERN_ERR 2170 "kmem_cache_create: duplicate cache %s\n", name); 2171 dump_stack(); 2172 goto oops; 2173 } 2174 } 2175 2176#if DEBUG 2177 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2178#if FORCED_DEBUG 2179 /* 2180 * Enable redzoning and last user accounting, except for caches with 2181 * large objects, if the increased size would increase the object size 2182 * above the next power of two: caches with object sizes just above a 2183 * power of two have a significant amount of internal fragmentation. 2184 */ 2185 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2186 2 * sizeof(unsigned long long))) 2187 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2188 if (!(flags & SLAB_DESTROY_BY_RCU)) 2189 flags |= SLAB_POISON; 2190#endif 2191 if (flags & SLAB_DESTROY_BY_RCU) 2192 BUG_ON(flags & SLAB_POISON); 2193#endif 2194 /* 2195 * Always checks flags, a caller might be expecting debug support which 2196 * isn't available. 2197 */ 2198 BUG_ON(flags & ~CREATE_MASK); 2199 2200 /* 2201 * Check that size is in terms of words. This is needed to avoid 2202 * unaligned accesses for some archs when redzoning is used, and makes 2203 * sure any on-slab bufctl's are also correctly aligned. 2204 */ 2205 if (size & (BYTES_PER_WORD - 1)) { 2206 size += (BYTES_PER_WORD - 1); 2207 size &= ~(BYTES_PER_WORD - 1); 2208 } 2209 2210 /* calculate the final buffer alignment: */ 2211 2212 /* 1) arch recommendation: can be overridden for debug */ 2213 if (flags & SLAB_HWCACHE_ALIGN) { 2214 /* 2215 * Default alignment: as specified by the arch code. Except if 2216 * an object is really small, then squeeze multiple objects into 2217 * one cacheline. 2218 */ 2219 ralign = cache_line_size(); 2220 while (size <= ralign / 2) 2221 ralign /= 2; 2222 } else { 2223 ralign = BYTES_PER_WORD; 2224 } 2225 2226 /* 2227 * Redzoning and user store require word alignment or possibly larger. 2228 * Note this will be overridden by architecture or caller mandated 2229 * alignment if either is greater than BYTES_PER_WORD. 2230 */ 2231 if (flags & SLAB_STORE_USER) 2232 ralign = BYTES_PER_WORD; 2233 2234 if (flags & SLAB_RED_ZONE) { 2235 ralign = REDZONE_ALIGN; 2236 /* If redzoning, ensure that the second redzone is suitably 2237 * aligned, by adjusting the object size accordingly. */ 2238 size += REDZONE_ALIGN - 1; 2239 size &= ~(REDZONE_ALIGN - 1); 2240 } 2241 2242 /* 2) arch mandated alignment */ 2243 if (ralign < ARCH_SLAB_MINALIGN) { 2244 ralign = ARCH_SLAB_MINALIGN; 2245 } 2246 /* 3) caller mandated alignment */ 2247 if (ralign < align) { 2248 ralign = align; 2249 } 2250 /* disable debug if necessary */ 2251 if (ralign > __alignof__(unsigned long long)) 2252 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2253 /* 2254 * 4) Store it. 2255 */ 2256 align = ralign; 2257 2258 /* Get cache's description obj. */ 2259 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2260 if (!cachep) 2261 goto oops; 2262 2263#if DEBUG 2264 cachep->obj_size = size; 2265 2266 /* 2267 * Both debugging options require word-alignment which is calculated 2268 * into align above. 2269 */ 2270 if (flags & SLAB_RED_ZONE) { 2271 /* add space for red zone words */ 2272 cachep->obj_offset += sizeof(unsigned long long); 2273 size += 2 * sizeof(unsigned long long); 2274 } 2275 if (flags & SLAB_STORE_USER) { 2276 /* user store requires one word storage behind the end of 2277 * the real object. But if the second red zone needs to be 2278 * aligned to 64 bits, we must allow that much space. 2279 */ 2280 if (flags & SLAB_RED_ZONE) 2281 size += REDZONE_ALIGN; 2282 else 2283 size += BYTES_PER_WORD; 2284 } 2285#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2286 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2287 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2288 cachep->obj_offset += PAGE_SIZE - size; 2289 size = PAGE_SIZE; 2290 } 2291#endif 2292#endif 2293 2294 /* 2295 * Determine if the slab management is 'on' or 'off' slab. 2296 * (bootstrapping cannot cope with offslab caches so don't do 2297 * it too early on.) 2298 */ 2299 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2300 /* 2301 * Size is large, assume best to place the slab management obj 2302 * off-slab (should allow better packing of objs). 2303 */ 2304 flags |= CFLGS_OFF_SLAB; 2305 2306 size = ALIGN(size, align); 2307 2308 left_over = calculate_slab_order(cachep, size, align, flags); 2309 2310 if (!cachep->num) { 2311 printk(KERN_ERR 2312 "kmem_cache_create: couldn't create cache %s.\n", name); 2313 kmem_cache_free(&cache_cache, cachep); 2314 cachep = NULL; 2315 goto oops; 2316 } 2317 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2318 + sizeof(struct slab), align); 2319 2320 /* 2321 * If the slab has been placed off-slab, and we have enough space then 2322 * move it on-slab. This is at the expense of any extra colouring. 2323 */ 2324 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2325 flags &= ~CFLGS_OFF_SLAB; 2326 left_over -= slab_size; 2327 } 2328 2329 if (flags & CFLGS_OFF_SLAB) { 2330 /* really off slab. No need for manual alignment */ 2331 slab_size = 2332 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2333 } 2334 2335 cachep->colour_off = cache_line_size(); 2336 /* Offset must be a multiple of the alignment. */ 2337 if (cachep->colour_off < align) 2338 cachep->colour_off = align; 2339 cachep->colour = left_over / cachep->colour_off; 2340 cachep->slab_size = slab_size; 2341 cachep->flags = flags; 2342 cachep->gfpflags = 0; 2343 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2344 cachep->gfpflags |= GFP_DMA; 2345 cachep->buffer_size = size; 2346 cachep->reciprocal_buffer_size = reciprocal_value(size); 2347 2348 if (flags & CFLGS_OFF_SLAB) { 2349 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2350 /* 2351 * This is a possibility for one of the malloc_sizes caches. 2352 * But since we go off slab only for object size greater than 2353 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2354 * this should not happen at all. 2355 * But leave a BUG_ON for some lucky dude. 2356 */ 2357 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); 2358 } 2359 cachep->ctor = ctor; 2360 cachep->name = name; 2361 2362 if (setup_cpu_cache(cachep)) { 2363 __kmem_cache_destroy(cachep); 2364 cachep = NULL; 2365 goto oops; 2366 } 2367 2368 /* cache setup completed, link it into the list */ 2369 list_add(&cachep->next, &cache_chain); 2370oops: 2371 if (!cachep && (flags & SLAB_PANIC)) 2372 panic("kmem_cache_create(): failed to create slab `%s'\n", 2373 name); 2374 mutex_unlock(&cache_chain_mutex); 2375 return cachep; 2376} 2377EXPORT_SYMBOL(kmem_cache_create); 2378 2379#if DEBUG 2380static void check_irq_off(void) 2381{ 2382 BUG_ON(!irqs_disabled()); 2383} 2384 2385static void check_irq_on(void) 2386{ 2387 BUG_ON(irqs_disabled()); 2388} 2389 2390static void check_spinlock_acquired(struct kmem_cache *cachep) 2391{ 2392#ifdef CONFIG_SMP 2393 check_irq_off(); 2394 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2395#endif 2396} 2397 2398static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2399{ 2400#ifdef CONFIG_SMP 2401 check_irq_off(); 2402 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2403#endif 2404} 2405 2406#else 2407#define check_irq_off() do { } while(0) 2408#define check_irq_on() do { } while(0) 2409#define check_spinlock_acquired(x) do { } while(0) 2410#define check_spinlock_acquired_node(x, y) do { } while(0) 2411#endif 2412 2413static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2414 struct array_cache *ac, 2415 int force, int node); 2416 2417static void do_drain(void *arg) 2418{ 2419 struct kmem_cache *cachep = arg; 2420 struct array_cache *ac; 2421 int node = numa_node_id(); 2422 2423 check_irq_off(); 2424 ac = cpu_cache_get(cachep); 2425 spin_lock(&cachep->nodelists[node]->list_lock); 2426 free_block(cachep, ac->entry, ac->avail, node); 2427 spin_unlock(&cachep->nodelists[node]->list_lock); 2428 ac->avail = 0; 2429} 2430 2431static void drain_cpu_caches(struct kmem_cache *cachep) 2432{ 2433 struct kmem_list3 *l3; 2434 int node; 2435 2436 on_each_cpu(do_drain, cachep, 1, 1); 2437 check_irq_on(); 2438 for_each_online_node(node) { 2439 l3 = cachep->nodelists[node]; 2440 if (l3 && l3->alien) 2441 drain_alien_cache(cachep, l3->alien); 2442 } 2443 2444 for_each_online_node(node) { 2445 l3 = cachep->nodelists[node]; 2446 if (l3) 2447 drain_array(cachep, l3, l3->shared, 1, node); 2448 } 2449} 2450 2451/* 2452 * Remove slabs from the list of free slabs. 2453 * Specify the number of slabs to drain in tofree. 2454 * 2455 * Returns the actual number of slabs released. 2456 */ 2457static int drain_freelist(struct kmem_cache *cache, 2458 struct kmem_list3 *l3, int tofree) 2459{ 2460 struct list_head *p; 2461 int nr_freed; 2462 struct slab *slabp; 2463 2464 nr_freed = 0; 2465 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2466 2467 spin_lock_irq(&l3->list_lock); 2468 p = l3->slabs_free.prev; 2469 if (p == &l3->slabs_free) { 2470 spin_unlock_irq(&l3->list_lock); 2471 goto out; 2472 } 2473 2474 slabp = list_entry(p, struct slab, list); 2475#if DEBUG 2476 BUG_ON(slabp->inuse); 2477#endif 2478 list_del(&slabp->list); 2479 /* 2480 * Safe to drop the lock. The slab is no longer linked 2481 * to the cache. 2482 */ 2483 l3->free_objects -= cache->num; 2484 spin_unlock_irq(&l3->list_lock); 2485 slab_destroy(cache, slabp); 2486 nr_freed++; 2487 } 2488out: 2489 return nr_freed; 2490} 2491 2492/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2493static int __cache_shrink(struct kmem_cache *cachep) 2494{ 2495 int ret = 0, i = 0; 2496 struct kmem_list3 *l3; 2497 2498 drain_cpu_caches(cachep); 2499 2500 check_irq_on(); 2501 for_each_online_node(i) { 2502 l3 = cachep->nodelists[i]; 2503 if (!l3) 2504 continue; 2505 2506 drain_freelist(cachep, l3, l3->free_objects); 2507 2508 ret += !list_empty(&l3->slabs_full) || 2509 !list_empty(&l3->slabs_partial); 2510 } 2511 return (ret ? 1 : 0); 2512} 2513 2514/** 2515 * kmem_cache_shrink - Shrink a cache. 2516 * @cachep: The cache to shrink. 2517 * 2518 * Releases as many slabs as possible for a cache. 2519 * To help debugging, a zero exit status indicates all slabs were released. 2520 */ 2521int kmem_cache_shrink(struct kmem_cache *cachep) 2522{ 2523 int ret; 2524 BUG_ON(!cachep || in_interrupt()); 2525 2526 mutex_lock(&cache_chain_mutex); 2527 ret = __cache_shrink(cachep); 2528 mutex_unlock(&cache_chain_mutex); 2529 return ret; 2530} 2531EXPORT_SYMBOL(kmem_cache_shrink); 2532 2533/** 2534 * kmem_cache_destroy - delete a cache 2535 * @cachep: the cache to destroy 2536 * 2537 * Remove a &struct kmem_cache object from the slab cache. 2538 * 2539 * It is expected this function will be called by a module when it is 2540 * unloaded. This will remove the cache completely, and avoid a duplicate 2541 * cache being allocated each time a module is loaded and unloaded, if the 2542 * module doesn't have persistent in-kernel storage across loads and unloads. 2543 * 2544 * The cache must be empty before calling this function. 2545 * 2546 * The caller must guarantee that noone will allocate memory from the cache 2547 * during the kmem_cache_destroy(). 2548 */ 2549void kmem_cache_destroy(struct kmem_cache *cachep) 2550{ 2551 BUG_ON(!cachep || in_interrupt()); 2552 2553 /* Find the cache in the chain of caches. */ 2554 mutex_lock(&cache_chain_mutex); 2555 /* 2556 * the chain is never empty, cache_cache is never destroyed 2557 */ 2558 list_del(&cachep->next); 2559 if (__cache_shrink(cachep)) { 2560 slab_error(cachep, "Can't free all objects"); 2561 list_add(&cachep->next, &cache_chain); 2562 mutex_unlock(&cache_chain_mutex); 2563 return; 2564 } 2565 2566 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2567 synchronize_rcu(); 2568 2569 __kmem_cache_destroy(cachep); 2570 mutex_unlock(&cache_chain_mutex); 2571} 2572EXPORT_SYMBOL(kmem_cache_destroy); 2573 2574/* 2575 * Get the memory for a slab management obj. 2576 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2577 * always come from malloc_sizes caches. The slab descriptor cannot 2578 * come from the same cache which is getting created because, 2579 * when we are searching for an appropriate cache for these 2580 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2581 * If we are creating a malloc_sizes cache here it would not be visible to 2582 * kmem_find_general_cachep till the initialization is complete. 2583 * Hence we cannot have slabp_cache same as the original cache. 2584 */ 2585static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2586 int colour_off, gfp_t local_flags, 2587 int nodeid) 2588{ 2589 struct slab *slabp; 2590 2591 if (OFF_SLAB(cachep)) { 2592 /* Slab management obj is off-slab. */ 2593 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2594 local_flags & ~GFP_THISNODE, nodeid); 2595 if (!slabp) 2596 return NULL; 2597 } else { 2598 slabp = objp + colour_off; 2599 colour_off += cachep->slab_size; 2600 } 2601 slabp->inuse = 0; 2602 slabp->colouroff = colour_off; 2603 slabp->s_mem = objp + colour_off; 2604 slabp->nodeid = nodeid; 2605 return slabp; 2606} 2607 2608static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2609{ 2610 return (kmem_bufctl_t *) (slabp + 1); 2611} 2612 2613static void cache_init_objs(struct kmem_cache *cachep, 2614 struct slab *slabp) 2615{ 2616 int i; 2617 2618 for (i = 0; i < cachep->num; i++) { 2619 void *objp = index_to_obj(cachep, slabp, i); 2620#if DEBUG 2621 /* need to poison the objs? */ 2622 if (cachep->flags & SLAB_POISON) 2623 poison_obj(cachep, objp, POISON_FREE); 2624 if (cachep->flags & SLAB_STORE_USER) 2625 *dbg_userword(cachep, objp) = NULL; 2626 2627 if (cachep->flags & SLAB_RED_ZONE) { 2628 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2629 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2630 } 2631 /* 2632 * Constructors are not allowed to allocate memory from the same 2633 * cache which they are a constructor for. Otherwise, deadlock. 2634 * They must also be threaded. 2635 */ 2636 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2637 cachep->ctor(objp + obj_offset(cachep), cachep, 2638 0); 2639 2640 if (cachep->flags & SLAB_RED_ZONE) { 2641 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2642 slab_error(cachep, "constructor overwrote the" 2643 " end of an object"); 2644 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2645 slab_error(cachep, "constructor overwrote the" 2646 " start of an object"); 2647 } 2648 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2649 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2650 kernel_map_pages(virt_to_page(objp), 2651 cachep->buffer_size / PAGE_SIZE, 0); 2652#else 2653 if (cachep->ctor) 2654 cachep->ctor(objp, cachep, 0); 2655#endif 2656 slab_bufctl(slabp)[i] = i + 1; 2657 } 2658 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2659 slabp->free = 0; 2660} 2661 2662static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2663{ 2664 if (CONFIG_ZONE_DMA_FLAG) { 2665 if (flags & GFP_DMA) 2666 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2667 else 2668 BUG_ON(cachep->gfpflags & GFP_DMA); 2669 } 2670} 2671 2672static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2673 int nodeid) 2674{ 2675 void *objp = index_to_obj(cachep, slabp, slabp->free); 2676 kmem_bufctl_t next; 2677 2678 slabp->inuse++; 2679 next = slab_bufctl(slabp)[slabp->free]; 2680#if DEBUG 2681 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2682 WARN_ON(slabp->nodeid != nodeid); 2683#endif 2684 slabp->free = next; 2685 2686 return objp; 2687} 2688 2689static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2690 void *objp, int nodeid) 2691{ 2692 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2693 2694#if DEBUG 2695 /* Verify that the slab belongs to the intended node */ 2696 WARN_ON(slabp->nodeid != nodeid); 2697 2698 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2699 printk(KERN_ERR "slab: double free detected in cache " 2700 "'%s', objp %p\n", cachep->name, objp); 2701 BUG(); 2702 } 2703#endif 2704 slab_bufctl(slabp)[objnr] = slabp->free; 2705 slabp->free = objnr; 2706 slabp->inuse--; 2707} 2708 2709/* 2710 * Map pages beginning at addr to the given cache and slab. This is required 2711 * for the slab allocator to be able to lookup the cache and slab of a 2712 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2713 */ 2714static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2715 void *addr) 2716{ 2717 int nr_pages; 2718 struct page *page; 2719 2720 page = virt_to_page(addr); 2721 2722 nr_pages = 1; 2723 if (likely(!PageCompound(page))) 2724 nr_pages <<= cache->gfporder; 2725 2726 do { 2727 page_set_cache(page, cache); 2728 page_set_slab(page, slab); 2729 page++; 2730 } while (--nr_pages); 2731} 2732 2733/* 2734 * Grow (by 1) the number of slabs within a cache. This is called by 2735 * kmem_cache_alloc() when there are no active objs left in a cache. 2736 */ 2737static int cache_grow(struct kmem_cache *cachep, 2738 gfp_t flags, int nodeid, void *objp) 2739{ 2740 struct slab *slabp; 2741 size_t offset; 2742 gfp_t local_flags; 2743 struct kmem_list3 *l3; 2744 2745 /* 2746 * Be lazy and only check for valid flags here, keeping it out of the 2747 * critical path in kmem_cache_alloc(). 2748 */ 2749 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK)); 2750 2751 local_flags = (flags & GFP_LEVEL_MASK); 2752 /* Take the l3 list lock to change the colour_next on this node */ 2753 check_irq_off(); 2754 l3 = cachep->nodelists[nodeid]; 2755 spin_lock(&l3->list_lock); 2756 2757 /* Get colour for the slab, and cal the next value. */ 2758 offset = l3->colour_next; 2759 l3->colour_next++; 2760 if (l3->colour_next >= cachep->colour) 2761 l3->colour_next = 0; 2762 spin_unlock(&l3->list_lock); 2763 2764 offset *= cachep->colour_off; 2765 2766 if (local_flags & __GFP_WAIT) 2767 local_irq_enable(); 2768 2769 /* 2770 * The test for missing atomic flag is performed here, rather than 2771 * the more obvious place, simply to reduce the critical path length 2772 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2773 * will eventually be caught here (where it matters). 2774 */ 2775 kmem_flagcheck(cachep, flags); 2776 2777 /* 2778 * Get mem for the objs. Attempt to allocate a physical page from 2779 * 'nodeid'. 2780 */ 2781 if (!objp) 2782 objp = kmem_getpages(cachep, local_flags, nodeid); 2783 if (!objp) 2784 goto failed; 2785 2786 /* Get slab management. */ 2787 slabp = alloc_slabmgmt(cachep, objp, offset, 2788 local_flags & ~GFP_THISNODE, nodeid); 2789 if (!slabp) 2790 goto opps1; 2791 2792 slabp->nodeid = nodeid; 2793 slab_map_pages(cachep, slabp, objp); 2794 2795 cache_init_objs(cachep, slabp); 2796 2797 if (local_flags & __GFP_WAIT) 2798 local_irq_disable(); 2799 check_irq_off(); 2800 spin_lock(&l3->list_lock); 2801 2802 /* Make slab active. */ 2803 list_add_tail(&slabp->list, &(l3->slabs_free)); 2804 STATS_INC_GROWN(cachep); 2805 l3->free_objects += cachep->num; 2806 spin_unlock(&l3->list_lock); 2807 return 1; 2808opps1: 2809 kmem_freepages(cachep, objp); 2810failed: 2811 if (local_flags & __GFP_WAIT) 2812 local_irq_disable(); 2813 return 0; 2814} 2815 2816#if DEBUG 2817 2818/* 2819 * Perform extra freeing checks: 2820 * - detect bad pointers. 2821 * - POISON/RED_ZONE checking 2822 */ 2823static void kfree_debugcheck(const void *objp) 2824{ 2825 if (!virt_addr_valid(objp)) { 2826 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2827 (unsigned long)objp); 2828 BUG(); 2829 } 2830} 2831 2832static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2833{ 2834 unsigned long long redzone1, redzone2; 2835 2836 redzone1 = *dbg_redzone1(cache, obj); 2837 redzone2 = *dbg_redzone2(cache, obj); 2838 2839 /* 2840 * Redzone is ok. 2841 */ 2842 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2843 return; 2844 2845 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2846 slab_error(cache, "double free detected"); 2847 else 2848 slab_error(cache, "memory outside object was overwritten"); 2849 2850 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2851 obj, redzone1, redzone2); 2852} 2853 2854static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2855 void *caller) 2856{ 2857 struct page *page; 2858 unsigned int objnr; 2859 struct slab *slabp; 2860 2861 objp -= obj_offset(cachep); 2862 kfree_debugcheck(objp); 2863 page = virt_to_head_page(objp); 2864 2865 slabp = page_get_slab(page); 2866 2867 if (cachep->flags & SLAB_RED_ZONE) { 2868 verify_redzone_free(cachep, objp); 2869 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2870 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2871 } 2872 if (cachep->flags & SLAB_STORE_USER) 2873 *dbg_userword(cachep, objp) = caller; 2874 2875 objnr = obj_to_index(cachep, slabp, objp); 2876 2877 BUG_ON(objnr >= cachep->num); 2878 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2879 2880#ifdef CONFIG_DEBUG_SLAB_LEAK 2881 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2882#endif 2883 if (cachep->flags & SLAB_POISON) { 2884#ifdef CONFIG_DEBUG_PAGEALLOC 2885 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2886 store_stackinfo(cachep, objp, (unsigned long)caller); 2887 kernel_map_pages(virt_to_page(objp), 2888 cachep->buffer_size / PAGE_SIZE, 0); 2889 } else { 2890 poison_obj(cachep, objp, POISON_FREE); 2891 } 2892#else 2893 poison_obj(cachep, objp, POISON_FREE); 2894#endif 2895 } 2896 return objp; 2897} 2898 2899static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2900{ 2901 kmem_bufctl_t i; 2902 int entries = 0; 2903 2904 /* Check slab's freelist to see if this obj is there. */ 2905 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2906 entries++; 2907 if (entries > cachep->num || i >= cachep->num) 2908 goto bad; 2909 } 2910 if (entries != cachep->num - slabp->inuse) { 2911bad: 2912 printk(KERN_ERR "slab: Internal list corruption detected in " 2913 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2914 cachep->name, cachep->num, slabp, slabp->inuse); 2915 for (i = 0; 2916 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2917 i++) { 2918 if (i % 16 == 0) 2919 printk("\n%03x:", i); 2920 printk(" %02x", ((unsigned char *)slabp)[i]); 2921 } 2922 printk("\n"); 2923 BUG(); 2924 } 2925} 2926#else 2927#define kfree_debugcheck(x) do { } while(0) 2928#define cache_free_debugcheck(x,objp,z) (objp) 2929#define check_slabp(x,y) do { } while(0) 2930#endif 2931 2932static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2933{ 2934 int batchcount; 2935 struct kmem_list3 *l3; 2936 struct array_cache *ac; 2937 int node; 2938 2939 node = numa_node_id(); 2940 2941 check_irq_off(); 2942 ac = cpu_cache_get(cachep); 2943retry: 2944 batchcount = ac->batchcount; 2945 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2946 /* 2947 * If there was little recent activity on this cache, then 2948 * perform only a partial refill. Otherwise we could generate 2949 * refill bouncing. 2950 */ 2951 batchcount = BATCHREFILL_LIMIT; 2952 } 2953 l3 = cachep->nodelists[node]; 2954 2955 BUG_ON(ac->avail > 0 || !l3); 2956 spin_lock(&l3->list_lock); 2957 2958 /* See if we can refill from the shared array */ 2959 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2960 goto alloc_done; 2961 2962 while (batchcount > 0) { 2963 struct list_head *entry; 2964 struct slab *slabp; 2965 /* Get slab alloc is to come from. */ 2966 entry = l3->slabs_partial.next; 2967 if (entry == &l3->slabs_partial) { 2968 l3->free_touched = 1; 2969 entry = l3->slabs_free.next; 2970 if (entry == &l3->slabs_free) 2971 goto must_grow; 2972 } 2973 2974 slabp = list_entry(entry, struct slab, list); 2975 check_slabp(cachep, slabp); 2976 check_spinlock_acquired(cachep); 2977 2978 /* 2979 * The slab was either on partial or free list so 2980 * there must be at least one object available for 2981 * allocation. 2982 */ 2983 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 2984 2985 while (slabp->inuse < cachep->num && batchcount--) { 2986 STATS_INC_ALLOCED(cachep); 2987 STATS_INC_ACTIVE(cachep); 2988 STATS_SET_HIGH(cachep); 2989 2990 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2991 node); 2992 } 2993 check_slabp(cachep, slabp); 2994 2995 /* move slabp to correct slabp list: */ 2996 list_del(&slabp->list); 2997 if (slabp->free == BUFCTL_END) 2998 list_add(&slabp->list, &l3->slabs_full); 2999 else 3000 list_add(&slabp->list, &l3->slabs_partial); 3001 } 3002 3003must_grow: 3004 l3->free_objects -= ac->avail; 3005alloc_done: 3006 spin_unlock(&l3->list_lock); 3007 3008 if (unlikely(!ac->avail)) { 3009 int x; 3010 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 3011 3012 /* cache_grow can reenable interrupts, then ac could change. */ 3013 ac = cpu_cache_get(cachep); 3014 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3015 return NULL; 3016 3017 if (!ac->avail) /* objects refilled by interrupt? */ 3018 goto retry; 3019 } 3020 ac->touched = 1; 3021 return ac->entry[--ac->avail]; 3022} 3023 3024static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3025 gfp_t flags) 3026{ 3027 might_sleep_if(flags & __GFP_WAIT); 3028#if DEBUG 3029 kmem_flagcheck(cachep, flags); 3030#endif 3031} 3032 3033#if DEBUG 3034static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3035 gfp_t flags, void *objp, void *caller) 3036{ 3037 if (!objp) 3038 return objp; 3039 if (cachep->flags & SLAB_POISON) { 3040#ifdef CONFIG_DEBUG_PAGEALLOC 3041 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3042 kernel_map_pages(virt_to_page(objp), 3043 cachep->buffer_size / PAGE_SIZE, 1); 3044 else 3045 check_poison_obj(cachep, objp); 3046#else 3047 check_poison_obj(cachep, objp); 3048#endif 3049 poison_obj(cachep, objp, POISON_INUSE); 3050 } 3051 if (cachep->flags & SLAB_STORE_USER) 3052 *dbg_userword(cachep, objp) = caller; 3053 3054 if (cachep->flags & SLAB_RED_ZONE) { 3055 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3056 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3057 slab_error(cachep, "double free, or memory outside" 3058 " object was overwritten"); 3059 printk(KERN_ERR 3060 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3061 objp, *dbg_redzone1(cachep, objp), 3062 *dbg_redzone2(cachep, objp)); 3063 } 3064 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3065 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3066 } 3067#ifdef CONFIG_DEBUG_SLAB_LEAK 3068 { 3069 struct slab *slabp; 3070 unsigned objnr; 3071 3072 slabp = page_get_slab(virt_to_head_page(objp)); 3073 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3074 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3075 } 3076#endif 3077 objp += obj_offset(cachep); 3078 if (cachep->ctor && cachep->flags & SLAB_POISON) 3079 cachep->ctor(objp, cachep, 0); 3080#if ARCH_SLAB_MINALIGN 3081 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3082 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3083 objp, ARCH_SLAB_MINALIGN); 3084 } 3085#endif 3086 return objp; 3087} 3088#else 3089#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3090#endif 3091 3092#ifdef CONFIG_FAILSLAB 3093 3094static struct failslab_attr { 3095 3096 struct fault_attr attr; 3097 3098 u32 ignore_gfp_wait; 3099#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3100 struct dentry *ignore_gfp_wait_file; 3101#endif 3102 3103} failslab = { 3104 .attr = FAULT_ATTR_INITIALIZER, 3105 .ignore_gfp_wait = 1, 3106}; 3107 3108static int __init setup_failslab(char *str) 3109{ 3110 return setup_fault_attr(&failslab.attr, str); 3111} 3112__setup("failslab=", setup_failslab); 3113 3114static int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3115{ 3116 if (cachep == &cache_cache) 3117 return 0; 3118 if (flags & __GFP_NOFAIL) 3119 return 0; 3120 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) 3121 return 0; 3122 3123 return should_fail(&failslab.attr, obj_size(cachep)); 3124} 3125 3126#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3127 3128static int __init failslab_debugfs(void) 3129{ 3130 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 3131 struct dentry *dir; 3132 int err; 3133 3134 err = init_fault_attr_dentries(&failslab.attr, "failslab"); 3135 if (err) 3136 return err; 3137 dir = failslab.attr.dentries.dir; 3138 3139 failslab.ignore_gfp_wait_file = 3140 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3141 &failslab.ignore_gfp_wait); 3142 3143 if (!failslab.ignore_gfp_wait_file) { 3144 err = -ENOMEM; 3145 debugfs_remove(failslab.ignore_gfp_wait_file); 3146 cleanup_fault_attr_dentries(&failslab.attr); 3147 } 3148 3149 return err; 3150} 3151 3152late_initcall(failslab_debugfs); 3153 3154#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3155 3156#else /* CONFIG_FAILSLAB */ 3157 3158static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3159{ 3160 return 0; 3161} 3162 3163#endif /* CONFIG_FAILSLAB */ 3164 3165static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3166{ 3167 void *objp; 3168 struct array_cache *ac; 3169 3170 check_irq_off(); 3171 3172 ac = cpu_cache_get(cachep); 3173 if (likely(ac->avail)) { 3174 STATS_INC_ALLOCHIT(cachep); 3175 ac->touched = 1; 3176 objp = ac->entry[--ac->avail]; 3177 } else { 3178 STATS_INC_ALLOCMISS(cachep); 3179 objp = cache_alloc_refill(cachep, flags); 3180 } 3181 return objp; 3182} 3183 3184#ifdef CONFIG_NUMA 3185/* 3186 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3187 * 3188 * If we are in_interrupt, then process context, including cpusets and 3189 * mempolicy, may not apply and should not be used for allocation policy. 3190 */ 3191static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3192{ 3193 int nid_alloc, nid_here; 3194 3195 if (in_interrupt() || (flags & __GFP_THISNODE)) 3196 return NULL; 3197 nid_alloc = nid_here = numa_node_id(); 3198 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3199 nid_alloc = cpuset_mem_spread_node(); 3200 else if (current->mempolicy) 3201 nid_alloc = slab_node(current->mempolicy); 3202 if (nid_alloc != nid_here) 3203 return ____cache_alloc_node(cachep, flags, nid_alloc); 3204 return NULL; 3205} 3206 3207/* 3208 * Fallback function if there was no memory available and no objects on a 3209 * certain node and fall back is permitted. First we scan all the 3210 * available nodelists for available objects. If that fails then we 3211 * perform an allocation without specifying a node. This allows the page 3212 * allocator to do its reclaim / fallback magic. We then insert the 3213 * slab into the proper nodelist and then allocate from it. 3214 */ 3215static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3216{ 3217 struct zonelist *zonelist; 3218 gfp_t local_flags; 3219 struct zone **z; 3220 void *obj = NULL; 3221 int nid; 3222 3223 if (flags & __GFP_THISNODE) 3224 return NULL; 3225 3226 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3227 ->node_zonelists[gfp_zone(flags)]; 3228 local_flags = (flags & GFP_LEVEL_MASK); 3229 3230retry: 3231 /* 3232 * Look through allowed nodes for objects available 3233 * from existing per node queues. 3234 */ 3235 for (z = zonelist->zones; *z && !obj; z++) { 3236 nid = zone_to_nid(*z); 3237 3238 if (cpuset_zone_allowed_hardwall(*z, flags) && 3239 cache->nodelists[nid] && 3240 cache->nodelists[nid]->free_objects) 3241 obj = ____cache_alloc_node(cache, 3242 flags | GFP_THISNODE, nid); 3243 } 3244 3245 if (!obj) { 3246 /* 3247 * This allocation will be performed within the constraints 3248 * of the current cpuset / memory policy requirements. 3249 * We may trigger various forms of reclaim on the allowed 3250 * set and go into memory reserves if necessary. 3251 */ 3252 if (local_flags & __GFP_WAIT) 3253 local_irq_enable(); 3254 kmem_flagcheck(cache, flags); 3255 obj = kmem_getpages(cache, flags, -1); 3256 if (local_flags & __GFP_WAIT) 3257 local_irq_disable(); 3258 if (obj) { 3259 /* 3260 * Insert into the appropriate per node queues 3261 */ 3262 nid = page_to_nid(virt_to_page(obj)); 3263 if (cache_grow(cache, flags, nid, obj)) { 3264 obj = ____cache_alloc_node(cache, 3265 flags | GFP_THISNODE, nid); 3266 if (!obj) 3267 /* 3268 * Another processor may allocate the 3269 * objects in the slab since we are 3270 * not holding any locks. 3271 */ 3272 goto retry; 3273 } else { 3274 /* cache_grow already freed obj */ 3275 obj = NULL; 3276 } 3277 } 3278 } 3279 return obj; 3280} 3281 3282/* 3283 * A interface to enable slab creation on nodeid 3284 */ 3285static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3286 int nodeid) 3287{ 3288 struct list_head *entry; 3289 struct slab *slabp; 3290 struct kmem_list3 *l3; 3291 void *obj; 3292 int x; 3293 3294 l3 = cachep->nodelists[nodeid]; 3295 BUG_ON(!l3); 3296 3297retry: 3298 check_irq_off(); 3299 spin_lock(&l3->list_lock); 3300 entry = l3->slabs_partial.next; 3301 if (entry == &l3->slabs_partial) { 3302 l3->free_touched = 1; 3303 entry = l3->slabs_free.next; 3304 if (entry == &l3->slabs_free) 3305 goto must_grow; 3306 } 3307 3308 slabp = list_entry(entry, struct slab, list); 3309 check_spinlock_acquired_node(cachep, nodeid); 3310 check_slabp(cachep, slabp); 3311 3312 STATS_INC_NODEALLOCS(cachep); 3313 STATS_INC_ACTIVE(cachep); 3314 STATS_SET_HIGH(cachep); 3315 3316 BUG_ON(slabp->inuse == cachep->num); 3317 3318 obj = slab_get_obj(cachep, slabp, nodeid); 3319 check_slabp(cachep, slabp); 3320 l3->free_objects--; 3321 /* move slabp to correct slabp list: */ 3322 list_del(&slabp->list); 3323 3324 if (slabp->free == BUFCTL_END) 3325 list_add(&slabp->list, &l3->slabs_full); 3326 else 3327 list_add(&slabp->list, &l3->slabs_partial); 3328 3329 spin_unlock(&l3->list_lock); 3330 goto done; 3331 3332must_grow: 3333 spin_unlock(&l3->list_lock); 3334 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3335 if (x) 3336 goto retry; 3337 3338 return fallback_alloc(cachep, flags); 3339 3340done: 3341 return obj; 3342} 3343 3344/** 3345 * kmem_cache_alloc_node - Allocate an object on the specified node 3346 * @cachep: The cache to allocate from. 3347 * @flags: See kmalloc(). 3348 * @nodeid: node number of the target node. 3349 * @caller: return address of caller, used for debug information 3350 * 3351 * Identical to kmem_cache_alloc but it will allocate memory on the given 3352 * node, which can improve the performance for cpu bound structures. 3353 * 3354 * Fallback to other node is possible if __GFP_THISNODE is not set. 3355 */ 3356static __always_inline void * 3357__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3358 void *caller) 3359{ 3360 unsigned long save_flags; 3361 void *ptr; 3362 3363 if (should_failslab(cachep, flags)) 3364 return NULL; 3365 3366 cache_alloc_debugcheck_before(cachep, flags); 3367 local_irq_save(save_flags); 3368 3369 if (unlikely(nodeid == -1)) 3370 nodeid = numa_node_id(); 3371 3372 if (unlikely(!cachep->nodelists[nodeid])) { 3373 /* Node not bootstrapped yet */ 3374 ptr = fallback_alloc(cachep, flags); 3375 goto out; 3376 } 3377 3378 if (nodeid == numa_node_id()) { 3379 /* 3380 * Use the locally cached objects if possible. 3381 * However ____cache_alloc does not allow fallback 3382 * to other nodes. It may fail while we still have 3383 * objects on other nodes available. 3384 */ 3385 ptr = ____cache_alloc(cachep, flags); 3386 if (ptr) 3387 goto out; 3388 } 3389 /* ___cache_alloc_node can fall back to other nodes */ 3390 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3391 out: 3392 local_irq_restore(save_flags); 3393 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3394 3395 if (unlikely((flags & __GFP_ZERO) && ptr)) 3396 memset(ptr, 0, obj_size(cachep)); 3397 3398 return ptr; 3399} 3400 3401static __always_inline void * 3402__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3403{ 3404 void *objp; 3405 3406 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { 3407 objp = alternate_node_alloc(cache, flags); 3408 if (objp) 3409 goto out; 3410 } 3411 objp = ____cache_alloc(cache, flags); 3412 3413 /* 3414 * We may just have run out of memory on the local node. 3415 * ____cache_alloc_node() knows how to locate memory on other nodes 3416 */ 3417 if (!objp) 3418 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3419 3420 out: 3421 return objp; 3422} 3423#else 3424 3425static __always_inline void * 3426__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3427{ 3428 return ____cache_alloc(cachep, flags); 3429} 3430 3431#endif /* CONFIG_NUMA */ 3432 3433static __always_inline void * 3434__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3435{ 3436 unsigned long save_flags; 3437 void *objp; 3438 3439 if (should_failslab(cachep, flags)) 3440 return NULL; 3441 3442 cache_alloc_debugcheck_before(cachep, flags); 3443 local_irq_save(save_flags); 3444 objp = __do_cache_alloc(cachep, flags); 3445 local_irq_restore(save_flags); 3446 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3447 prefetchw(objp); 3448 3449 if (unlikely((flags & __GFP_ZERO) && objp)) 3450 memset(objp, 0, obj_size(cachep)); 3451 3452 return objp; 3453} 3454 3455/* 3456 * Caller needs to acquire correct kmem_list's list_lock 3457 */ 3458static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3459 int node) 3460{ 3461 int i; 3462 struct kmem_list3 *l3; 3463 3464 for (i = 0; i < nr_objects; i++) { 3465 void *objp = objpp[i]; 3466 struct slab *slabp; 3467 3468 slabp = virt_to_slab(objp); 3469 l3 = cachep->nodelists[node]; 3470 list_del(&slabp->list); 3471 check_spinlock_acquired_node(cachep, node); 3472 check_slabp(cachep, slabp); 3473 slab_put_obj(cachep, slabp, objp, node); 3474 STATS_DEC_ACTIVE(cachep); 3475 l3->free_objects++; 3476 check_slabp(cachep, slabp); 3477 3478 /* fixup slab chains */ 3479 if (slabp->inuse == 0) { 3480 if (l3->free_objects > l3->free_limit) { 3481 l3->free_objects -= cachep->num; 3482 /* No need to drop any previously held 3483 * lock here, even if we have a off-slab slab 3484 * descriptor it is guaranteed to come from 3485 * a different cache, refer to comments before 3486 * alloc_slabmgmt. 3487 */ 3488 slab_destroy(cachep, slabp); 3489 } else { 3490 list_add(&slabp->list, &l3->slabs_free); 3491 } 3492 } else { 3493 /* Unconditionally move a slab to the end of the 3494 * partial list on free - maximum time for the 3495 * other objects to be freed, too. 3496 */ 3497 list_add_tail(&slabp->list, &l3->slabs_partial); 3498 } 3499 } 3500} 3501 3502static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3503{ 3504 int batchcount; 3505 struct kmem_list3 *l3; 3506 int node = numa_node_id(); 3507 3508 batchcount = ac->batchcount; 3509#if DEBUG 3510 BUG_ON(!batchcount || batchcount > ac->avail); 3511#endif 3512 check_irq_off(); 3513 l3 = cachep->nodelists[node]; 3514 spin_lock(&l3->list_lock); 3515 if (l3->shared) { 3516 struct array_cache *shared_array = l3->shared; 3517 int max = shared_array->limit - shared_array->avail; 3518 if (max) { 3519 if (batchcount > max) 3520 batchcount = max; 3521 memcpy(&(shared_array->entry[shared_array->avail]), 3522 ac->entry, sizeof(void *) * batchcount); 3523 shared_array->avail += batchcount; 3524 goto free_done; 3525 } 3526 } 3527 3528 free_block(cachep, ac->entry, batchcount, node); 3529free_done: 3530#if STATS 3531 { 3532 int i = 0; 3533 struct list_head *p; 3534 3535 p = l3->slabs_free.next; 3536 while (p != &(l3->slabs_free)) { 3537 struct slab *slabp; 3538 3539 slabp = list_entry(p, struct slab, list); 3540 BUG_ON(slabp->inuse); 3541 3542 i++; 3543 p = p->next; 3544 } 3545 STATS_SET_FREEABLE(cachep, i); 3546 } 3547#endif 3548 spin_unlock(&l3->list_lock); 3549 ac->avail -= batchcount; 3550 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3551} 3552 3553/* 3554 * Release an obj back to its cache. If the obj has a constructed state, it must 3555 * be in this state _before_ it is released. Called with disabled ints. 3556 */ 3557static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3558{ 3559 struct array_cache *ac = cpu_cache_get(cachep); 3560 3561 check_irq_off(); 3562 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3563 3564 /* 3565 * Skip calling cache_free_alien() when the platform is not numa. 3566 * This will avoid cache misses that happen while accessing slabp (which 3567 * is per page memory reference) to get nodeid. Instead use a global 3568 * variable to skip the call, which is mostly likely to be present in 3569 * the cache. 3570 */ 3571 if (numa_platform && cache_free_alien(cachep, objp)) 3572 return; 3573 3574 if (likely(ac->avail < ac->limit)) { 3575 STATS_INC_FREEHIT(cachep); 3576 ac->entry[ac->avail++] = objp; 3577 return; 3578 } else { 3579 STATS_INC_FREEMISS(cachep); 3580 cache_flusharray(cachep, ac); 3581 ac->entry[ac->avail++] = objp; 3582 } 3583} 3584 3585/** 3586 * kmem_cache_alloc - Allocate an object 3587 * @cachep: The cache to allocate from. 3588 * @flags: See kmalloc(). 3589 * 3590 * Allocate an object from this cache. The flags are only relevant 3591 * if the cache has no available objects. 3592 */ 3593void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3594{ 3595 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3596} 3597EXPORT_SYMBOL(kmem_cache_alloc); 3598 3599/** 3600 * kmem_ptr_validate - check if an untrusted pointer might 3601 * be a slab entry. 3602 * @cachep: the cache we're checking against 3603 * @ptr: pointer to validate 3604 * 3605 * This verifies that the untrusted pointer looks sane: 3606 * it is _not_ a guarantee that the pointer is actually 3607 * part of the slab cache in question, but it at least 3608 * validates that the pointer can be dereferenced and 3609 * looks half-way sane. 3610 * 3611 * Currently only used for dentry validation. 3612 */ 3613int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3614{ 3615 unsigned long addr = (unsigned long)ptr; 3616 unsigned long min_addr = PAGE_OFFSET; 3617 unsigned long align_mask = BYTES_PER_WORD - 1; 3618 unsigned long size = cachep->buffer_size; 3619 struct page *page; 3620 3621 if (unlikely(addr < min_addr)) 3622 goto out; 3623 if (unlikely(addr > (unsigned long)high_memory - size)) 3624 goto out; 3625 if (unlikely(addr & align_mask)) 3626 goto out; 3627 if (unlikely(!kern_addr_valid(addr))) 3628 goto out; 3629 if (unlikely(!kern_addr_valid(addr + size - 1))) 3630 goto out; 3631 page = virt_to_page(ptr); 3632 if (unlikely(!PageSlab(page))) 3633 goto out; 3634 if (unlikely(page_get_cache(page) != cachep)) 3635 goto out; 3636 return 1; 3637out: 3638 return 0; 3639} 3640 3641#ifdef CONFIG_NUMA 3642void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3643{ 3644 return __cache_alloc_node(cachep, flags, nodeid, 3645 __builtin_return_address(0)); 3646} 3647EXPORT_SYMBOL(kmem_cache_alloc_node); 3648 3649static __always_inline void * 3650__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3651{ 3652 struct kmem_cache *cachep; 3653 3654 cachep = kmem_find_general_cachep(size, flags); 3655 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3656 return cachep; 3657 return kmem_cache_alloc_node(cachep, flags, node); 3658} 3659 3660#ifdef CONFIG_DEBUG_SLAB 3661void *__kmalloc_node(size_t size, gfp_t flags, int node) 3662{ 3663 return __do_kmalloc_node(size, flags, node, 3664 __builtin_return_address(0)); 3665} 3666EXPORT_SYMBOL(__kmalloc_node); 3667 3668void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3669 int node, void *caller) 3670{ 3671 return __do_kmalloc_node(size, flags, node, caller); 3672} 3673EXPORT_SYMBOL(__kmalloc_node_track_caller); 3674#else 3675void *__kmalloc_node(size_t size, gfp_t flags, int node) 3676{ 3677 return __do_kmalloc_node(size, flags, node, NULL); 3678} 3679EXPORT_SYMBOL(__kmalloc_node); 3680#endif /* CONFIG_DEBUG_SLAB */ 3681#endif /* CONFIG_NUMA */ 3682 3683/** 3684 * __do_kmalloc - allocate memory 3685 * @size: how many bytes of memory are required. 3686 * @flags: the type of memory to allocate (see kmalloc). 3687 * @caller: function caller for debug tracking of the caller 3688 */ 3689static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3690 void *caller) 3691{ 3692 struct kmem_cache *cachep; 3693 3694 /* If you want to save a few bytes .text space: replace 3695 * __ with kmem_. 3696 * Then kmalloc uses the uninlined functions instead of the inline 3697 * functions. 3698 */ 3699 cachep = __find_general_cachep(size, flags); 3700 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3701 return cachep; 3702 return __cache_alloc(cachep, flags, caller); 3703} 3704 3705 3706#ifdef CONFIG_DEBUG_SLAB 3707void *__kmalloc(size_t size, gfp_t flags) 3708{ 3709 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3710} 3711EXPORT_SYMBOL(__kmalloc); 3712 3713void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3714{ 3715 return __do_kmalloc(size, flags, caller); 3716} 3717EXPORT_SYMBOL(__kmalloc_track_caller); 3718 3719#else 3720void *__kmalloc(size_t size, gfp_t flags) 3721{ 3722 return __do_kmalloc(size, flags, NULL); 3723} 3724EXPORT_SYMBOL(__kmalloc); 3725#endif 3726 3727/** 3728 * kmem_cache_free - Deallocate an object 3729 * @cachep: The cache the allocation was from. 3730 * @objp: The previously allocated object. 3731 * 3732 * Free an object which was previously allocated from this 3733 * cache. 3734 */ 3735void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3736{ 3737 unsigned long flags; 3738 3739 BUG_ON(virt_to_cache(objp) != cachep); 3740 3741 local_irq_save(flags); 3742 debug_check_no_locks_freed(objp, obj_size(cachep)); 3743 __cache_free(cachep, objp); 3744 local_irq_restore(flags); 3745} 3746EXPORT_SYMBOL(kmem_cache_free); 3747 3748/** 3749 * kfree - free previously allocated memory 3750 * @objp: pointer returned by kmalloc. 3751 * 3752 * If @objp is NULL, no operation is performed. 3753 * 3754 * Don't free memory not originally allocated by kmalloc() 3755 * or you will run into trouble. 3756 */ 3757void kfree(const void *objp) 3758{ 3759 struct kmem_cache *c; 3760 unsigned long flags; 3761 3762 if (unlikely(ZERO_OR_NULL_PTR(objp))) 3763 return; 3764 local_irq_save(flags); 3765 kfree_debugcheck(objp); 3766 c = virt_to_cache(objp); 3767 debug_check_no_locks_freed(objp, obj_size(c)); 3768 __cache_free(c, (void *)objp); 3769 local_irq_restore(flags); 3770} 3771EXPORT_SYMBOL(kfree); 3772 3773unsigned int kmem_cache_size(struct kmem_cache *cachep) 3774{ 3775 return obj_size(cachep); 3776} 3777EXPORT_SYMBOL(kmem_cache_size); 3778 3779const char *kmem_cache_name(struct kmem_cache *cachep) 3780{ 3781 return cachep->name; 3782} 3783EXPORT_SYMBOL_GPL(kmem_cache_name); 3784 3785/* 3786 * This initializes kmem_list3 or resizes varioius caches for all nodes. 3787 */ 3788static int alloc_kmemlist(struct kmem_cache *cachep) 3789{ 3790 int node; 3791 struct kmem_list3 *l3; 3792 struct array_cache *new_shared; 3793 struct array_cache **new_alien = NULL; 3794 3795 for_each_node_state(node, N_NORMAL_MEMORY) { 3796 3797 if (use_alien_caches) { 3798 new_alien = alloc_alien_cache(node, cachep->limit); 3799 if (!new_alien) 3800 goto fail; 3801 } 3802 3803 new_shared = NULL; 3804 if (cachep->shared) { 3805 new_shared = alloc_arraycache(node, 3806 cachep->shared*cachep->batchcount, 3807 0xbaadf00d); 3808 if (!new_shared) { 3809 free_alien_cache(new_alien); 3810 goto fail; 3811 } 3812 } 3813 3814 l3 = cachep->nodelists[node]; 3815 if (l3) { 3816 struct array_cache *shared = l3->shared; 3817 3818 spin_lock_irq(&l3->list_lock); 3819 3820 if (shared) 3821 free_block(cachep, shared->entry, 3822 shared->avail, node); 3823 3824 l3->shared = new_shared; 3825 if (!l3->alien) { 3826 l3->alien = new_alien; 3827 new_alien = NULL; 3828 } 3829 l3->free_limit = (1 + nr_cpus_node(node)) * 3830 cachep->batchcount + cachep->num; 3831 spin_unlock_irq(&l3->list_lock); 3832 kfree(shared); 3833 free_alien_cache(new_alien); 3834 continue; 3835 } 3836 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3837 if (!l3) { 3838 free_alien_cache(new_alien); 3839 kfree(new_shared); 3840 goto fail; 3841 } 3842 3843 kmem_list3_init(l3); 3844 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3845 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3846 l3->shared = new_shared; 3847 l3->alien = new_alien; 3848 l3->free_limit = (1 + nr_cpus_node(node)) * 3849 cachep->batchcount + cachep->num; 3850 cachep->nodelists[node] = l3; 3851 } 3852 return 0; 3853 3854fail: 3855 if (!cachep->next.next) { 3856 /* Cache is not active yet. Roll back what we did */ 3857 node--; 3858 while (node >= 0) { 3859 if (cachep->nodelists[node]) { 3860 l3 = cachep->nodelists[node]; 3861 3862 kfree(l3->shared); 3863 free_alien_cache(l3->alien); 3864 kfree(l3); 3865 cachep->nodelists[node] = NULL; 3866 } 3867 node--; 3868 } 3869 } 3870 return -ENOMEM; 3871} 3872 3873struct ccupdate_struct { 3874 struct kmem_cache *cachep; 3875 struct array_cache *new[NR_CPUS]; 3876}; 3877 3878static void do_ccupdate_local(void *info) 3879{ 3880 struct ccupdate_struct *new = info; 3881 struct array_cache *old; 3882 3883 check_irq_off(); 3884 old = cpu_cache_get(new->cachep); 3885 3886 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3887 new->new[smp_processor_id()] = old; 3888} 3889 3890/* Always called with the cache_chain_mutex held */ 3891static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3892 int batchcount, int shared) 3893{ 3894 struct ccupdate_struct *new; 3895 int i; 3896 3897 new = kzalloc(sizeof(*new), GFP_KERNEL); 3898 if (!new) 3899 return -ENOMEM; 3900 3901 for_each_online_cpu(i) { 3902 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3903 batchcount); 3904 if (!new->new[i]) { 3905 for (i--; i >= 0; i--) 3906 kfree(new->new[i]); 3907 kfree(new); 3908 return -ENOMEM; 3909 } 3910 } 3911 new->cachep = cachep; 3912 3913 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3914 3915 check_irq_on(); 3916 cachep->batchcount = batchcount; 3917 cachep->limit = limit; 3918 cachep->shared = shared; 3919 3920 for_each_online_cpu(i) { 3921 struct array_cache *ccold = new->new[i]; 3922 if (!ccold) 3923 continue; 3924 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3925 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3926 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3927 kfree(ccold); 3928 } 3929 kfree(new); 3930 return alloc_kmemlist(cachep); 3931} 3932 3933/* Called with cache_chain_mutex held always */ 3934static int enable_cpucache(struct kmem_cache *cachep) 3935{ 3936 int err; 3937 int limit, shared; 3938 3939 /* 3940 * The head array serves three purposes: 3941 * - create a LIFO ordering, i.e. return objects that are cache-warm 3942 * - reduce the number of spinlock operations. 3943 * - reduce the number of linked list operations on the slab and 3944 * bufctl chains: array operations are cheaper. 3945 * The numbers are guessed, we should auto-tune as described by 3946 * Bonwick. 3947 */ 3948 if (cachep->buffer_size > 131072) 3949 limit = 1; 3950 else if (cachep->buffer_size > PAGE_SIZE) 3951 limit = 8; 3952 else if (cachep->buffer_size > 1024) 3953 limit = 24; 3954 else if (cachep->buffer_size > 256) 3955 limit = 54; 3956 else 3957 limit = 120; 3958 3959 /* 3960 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 3961 * allocation behaviour: Most allocs on one cpu, most free operations 3962 * on another cpu. For these cases, an efficient object passing between 3963 * cpus is necessary. This is provided by a shared array. The array 3964 * replaces Bonwick's magazine layer. 3965 * On uniprocessor, it's functionally equivalent (but less efficient) 3966 * to a larger limit. Thus disabled by default. 3967 */ 3968 shared = 0; 3969 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) 3970 shared = 8; 3971 3972#if DEBUG 3973 /* 3974 * With debugging enabled, large batchcount lead to excessively long 3975 * periods with disabled local interrupts. Limit the batchcount 3976 */ 3977 if (limit > 32) 3978 limit = 32; 3979#endif 3980 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 3981 if (err) 3982 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 3983 cachep->name, -err); 3984 return err; 3985} 3986 3987/* 3988 * Drain an array if it contains any elements taking the l3 lock only if 3989 * necessary. Note that the l3 listlock also protects the array_cache 3990 * if drain_array() is used on the shared array. 3991 */ 3992void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 3993 struct array_cache *ac, int force, int node) 3994{ 3995 int tofree; 3996 3997 if (!ac || !ac->avail) 3998 return; 3999 if (ac->touched && !force) { 4000 ac->touched = 0; 4001 } else { 4002 spin_lock_irq(&l3->list_lock); 4003 if (ac->avail) { 4004 tofree = force ? ac->avail : (ac->limit + 4) / 5; 4005 if (tofree > ac->avail) 4006 tofree = (ac->avail + 1) / 2; 4007 free_block(cachep, ac->entry, tofree, node); 4008 ac->avail -= tofree; 4009 memmove(ac->entry, &(ac->entry[tofree]), 4010 sizeof(void *) * ac->avail); 4011 } 4012 spin_unlock_irq(&l3->list_lock); 4013 } 4014} 4015 4016/** 4017 * cache_reap - Reclaim memory from caches. 4018 * @w: work descriptor 4019 * 4020 * Called from workqueue/eventd every few seconds. 4021 * Purpose: 4022 * - clear the per-cpu caches for this CPU. 4023 * - return freeable pages to the main free memory pool. 4024 * 4025 * If we cannot acquire the cache chain mutex then just give up - we'll try 4026 * again on the next iteration. 4027 */ 4028static void cache_reap(struct work_struct *w) 4029{ 4030 struct kmem_cache *searchp; 4031 struct kmem_list3 *l3; 4032 int node = numa_node_id(); 4033 struct delayed_work *work = 4034 container_of(w, struct delayed_work, work); 4035 4036 if (!mutex_trylock(&cache_chain_mutex)) 4037 /* Give up. Setup the next iteration. */ 4038 goto out; 4039 4040 list_for_each_entry(searchp, &cache_chain, next) { 4041 check_irq_on(); 4042 4043 /* 4044 * We only take the l3 lock if absolutely necessary and we 4045 * have established with reasonable certainty that 4046 * we can do some work if the lock was obtained. 4047 */ 4048 l3 = searchp->nodelists[node]; 4049 4050 reap_alien(searchp, l3); 4051 4052 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4053 4054 /* 4055 * These are racy checks but it does not matter 4056 * if we skip one check or scan twice. 4057 */ 4058 if (time_after(l3->next_reap, jiffies)) 4059 goto next; 4060 4061 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4062 4063 drain_array(searchp, l3, l3->shared, 0, node); 4064 4065 if (l3->free_touched) 4066 l3->free_touched = 0; 4067 else { 4068 int freed; 4069 4070 freed = drain_freelist(searchp, l3, (l3->free_limit + 4071 5 * searchp->num - 1) / (5 * searchp->num)); 4072 STATS_ADD_REAPED(searchp, freed); 4073 } 4074next: 4075 cond_resched(); 4076 } 4077 check_irq_on(); 4078 mutex_unlock(&cache_chain_mutex); 4079 next_reap_node(); 4080out: 4081 /* Set up the next iteration */ 4082 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4083} 4084 4085#ifdef CONFIG_PROC_FS 4086 4087static void print_slabinfo_header(struct seq_file *m) 4088{ 4089 /* 4090 * Output format version, so at least we can change it 4091 * without _too_ many complaints. 4092 */ 4093#if STATS 4094 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 4095#else 4096 seq_puts(m, "slabinfo - version: 2.1\n"); 4097#endif 4098 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4099 "<objperslab> <pagesperslab>"); 4100 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4101 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4102#if STATS 4103 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 4104 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 4105 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 4106#endif 4107 seq_putc(m, '\n'); 4108} 4109 4110static void *s_start(struct seq_file *m, loff_t *pos) 4111{ 4112 loff_t n = *pos; 4113 4114 mutex_lock(&cache_chain_mutex); 4115 if (!n) 4116 print_slabinfo_header(m); 4117 4118 return seq_list_start(&cache_chain, *pos); 4119} 4120 4121static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4122{ 4123 return seq_list_next(p, &cache_chain, pos); 4124} 4125 4126static void s_stop(struct seq_file *m, void *p) 4127{ 4128 mutex_unlock(&cache_chain_mutex); 4129} 4130 4131static int s_show(struct seq_file *m, void *p) 4132{ 4133 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); 4134 struct slab *slabp; 4135 unsigned long active_objs; 4136 unsigned long num_objs; 4137 unsigned long active_slabs = 0; 4138 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4139 const char *name; 4140 char *error = NULL; 4141 int node; 4142 struct kmem_list3 *l3; 4143 4144 active_objs = 0; 4145 num_slabs = 0; 4146 for_each_online_node(node) { 4147 l3 = cachep->nodelists[node]; 4148 if (!l3) 4149 continue; 4150 4151 check_irq_on(); 4152 spin_lock_irq(&l3->list_lock); 4153 4154 list_for_each_entry(slabp, &l3->slabs_full, list) { 4155 if (slabp->inuse != cachep->num && !error) 4156 error = "slabs_full accounting error"; 4157 active_objs += cachep->num; 4158 active_slabs++; 4159 } 4160 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4161 if (slabp->inuse == cachep->num && !error) 4162 error = "slabs_partial inuse accounting error"; 4163 if (!slabp->inuse && !error) 4164 error = "slabs_partial/inuse accounting error"; 4165 active_objs += slabp->inuse; 4166 active_slabs++; 4167 } 4168 list_for_each_entry(slabp, &l3->slabs_free, list) { 4169 if (slabp->inuse && !error) 4170 error = "slabs_free/inuse accounting error"; 4171 num_slabs++; 4172 } 4173 free_objects += l3->free_objects; 4174 if (l3->shared) 4175 shared_avail += l3->shared->avail; 4176 4177 spin_unlock_irq(&l3->list_lock); 4178 } 4179 num_slabs += active_slabs; 4180 num_objs = num_slabs * cachep->num; 4181 if (num_objs - active_objs != free_objects && !error) 4182 error = "free_objects accounting error"; 4183 4184 name = cachep->name; 4185 if (error) 4186 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4187 4188 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4189 name, active_objs, num_objs, cachep->buffer_size, 4190 cachep->num, (1 << cachep->gfporder)); 4191 seq_printf(m, " : tunables %4u %4u %4u", 4192 cachep->limit, cachep->batchcount, cachep->shared); 4193 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4194 active_slabs, num_slabs, shared_avail); 4195#if STATS 4196 { /* list3 stats */ 4197 unsigned long high = cachep->high_mark; 4198 unsigned long allocs = cachep->num_allocations; 4199 unsigned long grown = cachep->grown; 4200 unsigned long reaped = cachep->reaped; 4201 unsigned long errors = cachep->errors; 4202 unsigned long max_freeable = cachep->max_freeable; 4203 unsigned long node_allocs = cachep->node_allocs; 4204 unsigned long node_frees = cachep->node_frees; 4205 unsigned long overflows = cachep->node_overflow; 4206 4207 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4208 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4209 reaped, errors, max_freeable, node_allocs, 4210 node_frees, overflows); 4211 } 4212 /* cpu stats */ 4213 { 4214 unsigned long allochit = atomic_read(&cachep->allochit); 4215 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4216 unsigned long freehit = atomic_read(&cachep->freehit); 4217 unsigned long freemiss = atomic_read(&cachep->freemiss); 4218 4219 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4220 allochit, allocmiss, freehit, freemiss); 4221 } 4222#endif 4223 seq_putc(m, '\n'); 4224 return 0; 4225} 4226 4227/* 4228 * slabinfo_op - iterator that generates /proc/slabinfo 4229 * 4230 * Output layout: 4231 * cache-name 4232 * num-active-objs 4233 * total-objs 4234 * object size 4235 * num-active-slabs 4236 * total-slabs 4237 * num-pages-per-slab 4238 * + further values on SMP and with statistics enabled 4239 */ 4240 4241const struct seq_operations slabinfo_op = { 4242 .start = s_start, 4243 .next = s_next, 4244 .stop = s_stop, 4245 .show = s_show, 4246}; 4247 4248#define MAX_SLABINFO_WRITE 128 4249/** 4250 * slabinfo_write - Tuning for the slab allocator 4251 * @file: unused 4252 * @buffer: user buffer 4253 * @count: data length 4254 * @ppos: unused 4255 */ 4256ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4257 size_t count, loff_t *ppos) 4258{ 4259 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4260 int limit, batchcount, shared, res; 4261 struct kmem_cache *cachep; 4262 4263 if (count > MAX_SLABINFO_WRITE) 4264 return -EINVAL; 4265 if (copy_from_user(&kbuf, buffer, count)) 4266 return -EFAULT; 4267 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4268 4269 tmp = strchr(kbuf, ' '); 4270 if (!tmp) 4271 return -EINVAL; 4272 *tmp = '\0'; 4273 tmp++; 4274 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4275 return -EINVAL; 4276 4277 /* Find the cache in the chain of caches. */ 4278 mutex_lock(&cache_chain_mutex); 4279 res = -EINVAL; 4280 list_for_each_entry(cachep, &cache_chain, next) { 4281 if (!strcmp(cachep->name, kbuf)) { 4282 if (limit < 1 || batchcount < 1 || 4283 batchcount > limit || shared < 0) { 4284 res = 0; 4285 } else { 4286 res = do_tune_cpucache(cachep, limit, 4287 batchcount, shared); 4288 } 4289 break; 4290 } 4291 } 4292 mutex_unlock(&cache_chain_mutex); 4293 if (res >= 0) 4294 res = count; 4295 return res; 4296} 4297 4298#ifdef CONFIG_DEBUG_SLAB_LEAK 4299 4300static void *leaks_start(struct seq_file *m, loff_t *pos) 4301{ 4302 mutex_lock(&cache_chain_mutex); 4303 return seq_list_start(&cache_chain, *pos); 4304} 4305 4306static inline int add_caller(unsigned long *n, unsigned long v) 4307{ 4308 unsigned long *p; 4309 int l; 4310 if (!v) 4311 return 1; 4312 l = n[1]; 4313 p = n + 2; 4314 while (l) { 4315 int i = l/2; 4316 unsigned long *q = p + 2 * i; 4317 if (*q == v) { 4318 q[1]++; 4319 return 1; 4320 } 4321 if (*q > v) { 4322 l = i; 4323 } else { 4324 p = q + 2; 4325 l -= i + 1; 4326 } 4327 } 4328 if (++n[1] == n[0]) 4329 return 0; 4330 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4331 p[0] = v; 4332 p[1] = 1; 4333 return 1; 4334} 4335 4336static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4337{ 4338 void *p; 4339 int i; 4340 if (n[0] == n[1]) 4341 return; 4342 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4343 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4344 continue; 4345 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4346 return; 4347 } 4348} 4349 4350static void show_symbol(struct seq_file *m, unsigned long address) 4351{ 4352#ifdef CONFIG_KALLSYMS 4353 unsigned long offset, size; 4354 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; 4355 4356 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4357 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4358 if (modname[0]) 4359 seq_printf(m, " [%s]", modname); 4360 return; 4361 } 4362#endif 4363 seq_printf(m, "%p", (void *)address); 4364} 4365 4366static int leaks_show(struct seq_file *m, void *p) 4367{ 4368 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next); 4369 struct slab *slabp; 4370 struct kmem_list3 *l3; 4371 const char *name; 4372 unsigned long *n = m->private; 4373 int node; 4374 int i; 4375 4376 if (!(cachep->flags & SLAB_STORE_USER)) 4377 return 0; 4378 if (!(cachep->flags & SLAB_RED_ZONE)) 4379 return 0; 4380 4381 /* OK, we can do it */ 4382 4383 n[1] = 0; 4384 4385 for_each_online_node(node) { 4386 l3 = cachep->nodelists[node]; 4387 if (!l3) 4388 continue; 4389 4390 check_irq_on(); 4391 spin_lock_irq(&l3->list_lock); 4392 4393 list_for_each_entry(slabp, &l3->slabs_full, list) 4394 handle_slab(n, cachep, slabp); 4395 list_for_each_entry(slabp, &l3->slabs_partial, list) 4396 handle_slab(n, cachep, slabp); 4397 spin_unlock_irq(&l3->list_lock); 4398 } 4399 name = cachep->name; 4400 if (n[0] == n[1]) { 4401 /* Increase the buffer size */ 4402 mutex_unlock(&cache_chain_mutex); 4403 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4404 if (!m->private) { 4405 /* Too bad, we are really out */ 4406 m->private = n; 4407 mutex_lock(&cache_chain_mutex); 4408 return -ENOMEM; 4409 } 4410 *(unsigned long *)m->private = n[0] * 2; 4411 kfree(n); 4412 mutex_lock(&cache_chain_mutex); 4413 /* Now make sure this entry will be retried */ 4414 m->count = m->size; 4415 return 0; 4416 } 4417 for (i = 0; i < n[1]; i++) { 4418 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4419 show_symbol(m, n[2*i+2]); 4420 seq_putc(m, '\n'); 4421 } 4422 4423 return 0; 4424} 4425 4426const struct seq_operations slabstats_op = { 4427 .start = leaks_start, 4428 .next = s_next, 4429 .stop = s_stop, 4430 .show = leaks_show, 4431}; 4432#endif 4433#endif 4434 4435/** 4436 * ksize - get the actual amount of memory allocated for a given object 4437 * @objp: Pointer to the object 4438 * 4439 * kmalloc may internally round up allocations and return more memory 4440 * than requested. ksize() can be used to determine the actual amount of 4441 * memory allocated. The caller may use this additional memory, even though 4442 * a smaller amount of memory was initially specified with the kmalloc call. 4443 * The caller must guarantee that objp points to a valid object previously 4444 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4445 * must not be freed during the duration of the call. 4446 */ 4447size_t ksize(const void *objp) 4448{ 4449 BUG_ON(!objp); 4450 if (unlikely(objp == ZERO_SIZE_PTR)) 4451 return 0; 4452 4453 return obj_size(virt_to_cache(objp)); 4454} 4455