slob.c revision 039ca4e74a1cf60bd7487324a564ecf5c981f254
1/* 2 * SLOB Allocator: Simple List Of Blocks 3 * 4 * Matt Mackall <mpm@selenic.com> 12/30/03 5 * 6 * NUMA support by Paul Mundt, 2007. 7 * 8 * How SLOB works: 9 * 10 * The core of SLOB is a traditional K&R style heap allocator, with 11 * support for returning aligned objects. The granularity of this 12 * allocator is as little as 2 bytes, however typically most architectures 13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit. 14 * 15 * The slob heap is a set of linked list of pages from alloc_pages(), 16 * and within each page, there is a singly-linked list of free blocks 17 * (slob_t). The heap is grown on demand. To reduce fragmentation, 18 * heap pages are segregated into three lists, with objects less than 19 * 256 bytes, objects less than 1024 bytes, and all other objects. 20 * 21 * Allocation from heap involves first searching for a page with 22 * sufficient free blocks (using a next-fit-like approach) followed by 23 * a first-fit scan of the page. Deallocation inserts objects back 24 * into the free list in address order, so this is effectively an 25 * address-ordered first fit. 26 * 27 * Above this is an implementation of kmalloc/kfree. Blocks returned 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls 30 * alloc_pages() directly, allocating compound pages so the page order 31 * does not have to be separately tracked, and also stores the exact 32 * allocation size in page->private so that it can be used to accurately 33 * provide ksize(). These objects are detected in kfree() because slob_page() 34 * is false for them. 35 * 36 * SLAB is emulated on top of SLOB by simply calling constructors and 37 * destructors for every SLAB allocation. Objects are returned with the 38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which 39 * case the low-level allocator will fragment blocks to create the proper 40 * alignment. Again, objects of page-size or greater are allocated by 41 * calling alloc_pages(). As SLAB objects know their size, no separate 42 * size bookkeeping is necessary and there is essentially no allocation 43 * space overhead, and compound pages aren't needed for multi-page 44 * allocations. 45 * 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real 47 * logic down to the page allocator, and simply doing the node accounting 48 * on the upper levels. In the event that a node id is explicitly 49 * provided, alloc_pages_exact_node() with the specified node id is used 50 * instead. The common case (or when the node id isn't explicitly provided) 51 * will default to the current node, as per numa_node_id(). 52 * 53 * Node aware pages are still inserted in to the global freelist, and 54 * these are scanned for by matching against the node id encoded in the 55 * page flags. As a result, block allocations that can be satisfied from 56 * the freelist will only be done so on pages residing on the same node, 57 * in order to prevent random node placement. 58 */ 59 60#include <linux/kernel.h> 61#include <linux/slab.h> 62#include <linux/mm.h> 63#include <linux/swap.h> /* struct reclaim_state */ 64#include <linux/cache.h> 65#include <linux/init.h> 66#include <linux/module.h> 67#include <linux/rcupdate.h> 68#include <linux/list.h> 69#include <linux/kmemleak.h> 70 71#include <trace/events/kmem.h> 72 73#include <asm/atomic.h> 74 75/* 76 * slob_block has a field 'units', which indicates size of block if +ve, 77 * or offset of next block if -ve (in SLOB_UNITs). 78 * 79 * Free blocks of size 1 unit simply contain the offset of the next block. 80 * Those with larger size contain their size in the first SLOB_UNIT of 81 * memory, and the offset of the next free block in the second SLOB_UNIT. 82 */ 83#if PAGE_SIZE <= (32767 * 2) 84typedef s16 slobidx_t; 85#else 86typedef s32 slobidx_t; 87#endif 88 89struct slob_block { 90 slobidx_t units; 91}; 92typedef struct slob_block slob_t; 93 94/* 95 * We use struct page fields to manage some slob allocation aspects, 96 * however to avoid the horrible mess in include/linux/mm_types.h, we'll 97 * just define our own struct page type variant here. 98 */ 99struct slob_page { 100 union { 101 struct { 102 unsigned long flags; /* mandatory */ 103 atomic_t _count; /* mandatory */ 104 slobidx_t units; /* free units left in page */ 105 unsigned long pad[2]; 106 slob_t *free; /* first free slob_t in page */ 107 struct list_head list; /* linked list of free pages */ 108 }; 109 struct page page; 110 }; 111}; 112static inline void struct_slob_page_wrong_size(void) 113{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } 114 115/* 116 * free_slob_page: call before a slob_page is returned to the page allocator. 117 */ 118static inline void free_slob_page(struct slob_page *sp) 119{ 120 reset_page_mapcount(&sp->page); 121 sp->page.mapping = NULL; 122} 123 124/* 125 * All partially free slob pages go on these lists. 126 */ 127#define SLOB_BREAK1 256 128#define SLOB_BREAK2 1024 129static LIST_HEAD(free_slob_small); 130static LIST_HEAD(free_slob_medium); 131static LIST_HEAD(free_slob_large); 132 133/* 134 * is_slob_page: True for all slob pages (false for bigblock pages) 135 */ 136static inline int is_slob_page(struct slob_page *sp) 137{ 138 return PageSlab((struct page *)sp); 139} 140 141static inline void set_slob_page(struct slob_page *sp) 142{ 143 __SetPageSlab((struct page *)sp); 144} 145 146static inline void clear_slob_page(struct slob_page *sp) 147{ 148 __ClearPageSlab((struct page *)sp); 149} 150 151static inline struct slob_page *slob_page(const void *addr) 152{ 153 return (struct slob_page *)virt_to_page(addr); 154} 155 156/* 157 * slob_page_free: true for pages on free_slob_pages list. 158 */ 159static inline int slob_page_free(struct slob_page *sp) 160{ 161 return PageSlobFree((struct page *)sp); 162} 163 164static void set_slob_page_free(struct slob_page *sp, struct list_head *list) 165{ 166 list_add(&sp->list, list); 167 __SetPageSlobFree((struct page *)sp); 168} 169 170static inline void clear_slob_page_free(struct slob_page *sp) 171{ 172 list_del(&sp->list); 173 __ClearPageSlobFree((struct page *)sp); 174} 175 176#define SLOB_UNIT sizeof(slob_t) 177#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) 178#define SLOB_ALIGN L1_CACHE_BYTES 179 180/* 181 * struct slob_rcu is inserted at the tail of allocated slob blocks, which 182 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free 183 * the block using call_rcu. 184 */ 185struct slob_rcu { 186 struct rcu_head head; 187 int size; 188}; 189 190/* 191 * slob_lock protects all slob allocator structures. 192 */ 193static DEFINE_SPINLOCK(slob_lock); 194 195/* 196 * Encode the given size and next info into a free slob block s. 197 */ 198static void set_slob(slob_t *s, slobidx_t size, slob_t *next) 199{ 200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); 201 slobidx_t offset = next - base; 202 203 if (size > 1) { 204 s[0].units = size; 205 s[1].units = offset; 206 } else 207 s[0].units = -offset; 208} 209 210/* 211 * Return the size of a slob block. 212 */ 213static slobidx_t slob_units(slob_t *s) 214{ 215 if (s->units > 0) 216 return s->units; 217 return 1; 218} 219 220/* 221 * Return the next free slob block pointer after this one. 222 */ 223static slob_t *slob_next(slob_t *s) 224{ 225 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); 226 slobidx_t next; 227 228 if (s[0].units < 0) 229 next = -s[0].units; 230 else 231 next = s[1].units; 232 return base+next; 233} 234 235/* 236 * Returns true if s is the last free block in its page. 237 */ 238static int slob_last(slob_t *s) 239{ 240 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 241} 242 243static void *slob_new_pages(gfp_t gfp, int order, int node) 244{ 245 void *page; 246 247#ifdef CONFIG_NUMA 248 if (node != -1) 249 page = alloc_pages_exact_node(node, gfp, order); 250 else 251#endif 252 page = alloc_pages(gfp, order); 253 254 if (!page) 255 return NULL; 256 257 return page_address(page); 258} 259 260static void slob_free_pages(void *b, int order) 261{ 262 if (current->reclaim_state) 263 current->reclaim_state->reclaimed_slab += 1 << order; 264 free_pages((unsigned long)b, order); 265} 266 267/* 268 * Allocate a slob block within a given slob_page sp. 269 */ 270static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 271{ 272 slob_t *prev, *cur, *aligned = NULL; 273 int delta = 0, units = SLOB_UNITS(size); 274 275 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 276 slobidx_t avail = slob_units(cur); 277 278 if (align) { 279 aligned = (slob_t *)ALIGN((unsigned long)cur, align); 280 delta = aligned - cur; 281 } 282 if (avail >= units + delta) { /* room enough? */ 283 slob_t *next; 284 285 if (delta) { /* need to fragment head to align? */ 286 next = slob_next(cur); 287 set_slob(aligned, avail - delta, next); 288 set_slob(cur, delta, aligned); 289 prev = cur; 290 cur = aligned; 291 avail = slob_units(cur); 292 } 293 294 next = slob_next(cur); 295 if (avail == units) { /* exact fit? unlink. */ 296 if (prev) 297 set_slob(prev, slob_units(prev), next); 298 else 299 sp->free = next; 300 } else { /* fragment */ 301 if (prev) 302 set_slob(prev, slob_units(prev), cur + units); 303 else 304 sp->free = cur + units; 305 set_slob(cur + units, avail - units, next); 306 } 307 308 sp->units -= units; 309 if (!sp->units) 310 clear_slob_page_free(sp); 311 return cur; 312 } 313 if (slob_last(cur)) 314 return NULL; 315 } 316} 317 318/* 319 * slob_alloc: entry point into the slob allocator. 320 */ 321static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) 322{ 323 struct slob_page *sp; 324 struct list_head *prev; 325 struct list_head *slob_list; 326 slob_t *b = NULL; 327 unsigned long flags; 328 329 if (size < SLOB_BREAK1) 330 slob_list = &free_slob_small; 331 else if (size < SLOB_BREAK2) 332 slob_list = &free_slob_medium; 333 else 334 slob_list = &free_slob_large; 335 336 spin_lock_irqsave(&slob_lock, flags); 337 /* Iterate through each partially free page, try to find room */ 338 list_for_each_entry(sp, slob_list, list) { 339#ifdef CONFIG_NUMA 340 /* 341 * If there's a node specification, search for a partial 342 * page with a matching node id in the freelist. 343 */ 344 if (node != -1 && page_to_nid(&sp->page) != node) 345 continue; 346#endif 347 /* Enough room on this page? */ 348 if (sp->units < SLOB_UNITS(size)) 349 continue; 350 351 /* Attempt to alloc */ 352 prev = sp->list.prev; 353 b = slob_page_alloc(sp, size, align); 354 if (!b) 355 continue; 356 357 /* Improve fragment distribution and reduce our average 358 * search time by starting our next search here. (see 359 * Knuth vol 1, sec 2.5, pg 449) */ 360 if (prev != slob_list->prev && 361 slob_list->next != prev->next) 362 list_move_tail(slob_list, prev->next); 363 break; 364 } 365 spin_unlock_irqrestore(&slob_lock, flags); 366 367 /* Not enough space: must allocate a new page */ 368 if (!b) { 369 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); 370 if (!b) 371 return NULL; 372 sp = slob_page(b); 373 set_slob_page(sp); 374 375 spin_lock_irqsave(&slob_lock, flags); 376 sp->units = SLOB_UNITS(PAGE_SIZE); 377 sp->free = b; 378 INIT_LIST_HEAD(&sp->list); 379 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); 380 set_slob_page_free(sp, slob_list); 381 b = slob_page_alloc(sp, size, align); 382 BUG_ON(!b); 383 spin_unlock_irqrestore(&slob_lock, flags); 384 } 385 if (unlikely((gfp & __GFP_ZERO) && b)) 386 memset(b, 0, size); 387 return b; 388} 389 390/* 391 * slob_free: entry point into the slob allocator. 392 */ 393static void slob_free(void *block, int size) 394{ 395 struct slob_page *sp; 396 slob_t *prev, *next, *b = (slob_t *)block; 397 slobidx_t units; 398 unsigned long flags; 399 400 if (unlikely(ZERO_OR_NULL_PTR(block))) 401 return; 402 BUG_ON(!size); 403 404 sp = slob_page(block); 405 units = SLOB_UNITS(size); 406 407 spin_lock_irqsave(&slob_lock, flags); 408 409 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { 410 /* Go directly to page allocator. Do not pass slob allocator */ 411 if (slob_page_free(sp)) 412 clear_slob_page_free(sp); 413 spin_unlock_irqrestore(&slob_lock, flags); 414 clear_slob_page(sp); 415 free_slob_page(sp); 416 slob_free_pages(b, 0); 417 return; 418 } 419 420 if (!slob_page_free(sp)) { 421 /* This slob page is about to become partially free. Easy! */ 422 sp->units = units; 423 sp->free = b; 424 set_slob(b, units, 425 (void *)((unsigned long)(b + 426 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); 427 set_slob_page_free(sp, &free_slob_small); 428 goto out; 429 } 430 431 /* 432 * Otherwise the page is already partially free, so find reinsertion 433 * point. 434 */ 435 sp->units += units; 436 437 if (b < sp->free) { 438 if (b + units == sp->free) { 439 units += slob_units(sp->free); 440 sp->free = slob_next(sp->free); 441 } 442 set_slob(b, units, sp->free); 443 sp->free = b; 444 } else { 445 prev = sp->free; 446 next = slob_next(prev); 447 while (b > next) { 448 prev = next; 449 next = slob_next(prev); 450 } 451 452 if (!slob_last(prev) && b + units == next) { 453 units += slob_units(next); 454 set_slob(b, units, slob_next(next)); 455 } else 456 set_slob(b, units, next); 457 458 if (prev + slob_units(prev) == b) { 459 units = slob_units(b) + slob_units(prev); 460 set_slob(prev, units, slob_next(b)); 461 } else 462 set_slob(prev, slob_units(prev), b); 463 } 464out: 465 spin_unlock_irqrestore(&slob_lock, flags); 466} 467 468/* 469 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. 470 */ 471 472void *__kmalloc_node(size_t size, gfp_t gfp, int node) 473{ 474 unsigned int *m; 475 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 476 void *ret; 477 478 lockdep_trace_alloc(gfp); 479 480 if (size < PAGE_SIZE - align) { 481 if (!size) 482 return ZERO_SIZE_PTR; 483 484 m = slob_alloc(size + align, gfp, align, node); 485 486 if (!m) 487 return NULL; 488 *m = size; 489 ret = (void *)m + align; 490 491 trace_kmalloc_node(_RET_IP_, ret, 492 size, size + align, gfp, node); 493 } else { 494 unsigned int order = get_order(size); 495 496 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); 497 if (ret) { 498 struct page *page; 499 page = virt_to_page(ret); 500 page->private = size; 501 } 502 503 trace_kmalloc_node(_RET_IP_, ret, 504 size, PAGE_SIZE << order, gfp, node); 505 } 506 507 kmemleak_alloc(ret, size, 1, gfp); 508 return ret; 509} 510EXPORT_SYMBOL(__kmalloc_node); 511 512void kfree(const void *block) 513{ 514 struct slob_page *sp; 515 516 trace_kfree(_RET_IP_, block); 517 518 if (unlikely(ZERO_OR_NULL_PTR(block))) 519 return; 520 kmemleak_free(block); 521 522 sp = slob_page(block); 523 if (is_slob_page(sp)) { 524 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 525 unsigned int *m = (unsigned int *)(block - align); 526 slob_free(m, *m + align); 527 } else 528 put_page(&sp->page); 529} 530EXPORT_SYMBOL(kfree); 531 532/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ 533size_t ksize(const void *block) 534{ 535 struct slob_page *sp; 536 537 BUG_ON(!block); 538 if (unlikely(block == ZERO_SIZE_PTR)) 539 return 0; 540 541 sp = slob_page(block); 542 if (is_slob_page(sp)) { 543 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 544 unsigned int *m = (unsigned int *)(block - align); 545 return SLOB_UNITS(*m) * SLOB_UNIT; 546 } else 547 return sp->page.private; 548} 549EXPORT_SYMBOL(ksize); 550 551struct kmem_cache { 552 unsigned int size, align; 553 unsigned long flags; 554 const char *name; 555 void (*ctor)(void *); 556}; 557 558struct kmem_cache *kmem_cache_create(const char *name, size_t size, 559 size_t align, unsigned long flags, void (*ctor)(void *)) 560{ 561 struct kmem_cache *c; 562 563 c = slob_alloc(sizeof(struct kmem_cache), 564 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); 565 566 if (c) { 567 c->name = name; 568 c->size = size; 569 if (flags & SLAB_DESTROY_BY_RCU) { 570 /* leave room for rcu footer at the end of object */ 571 c->size += sizeof(struct slob_rcu); 572 } 573 c->flags = flags; 574 c->ctor = ctor; 575 /* ignore alignment unless it's forced */ 576 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 577 if (c->align < ARCH_SLAB_MINALIGN) 578 c->align = ARCH_SLAB_MINALIGN; 579 if (c->align < align) 580 c->align = align; 581 } else if (flags & SLAB_PANIC) 582 panic("Cannot create slab cache %s\n", name); 583 584 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); 585 return c; 586} 587EXPORT_SYMBOL(kmem_cache_create); 588 589void kmem_cache_destroy(struct kmem_cache *c) 590{ 591 kmemleak_free(c); 592 if (c->flags & SLAB_DESTROY_BY_RCU) 593 rcu_barrier(); 594 slob_free(c, sizeof(struct kmem_cache)); 595} 596EXPORT_SYMBOL(kmem_cache_destroy); 597 598void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 599{ 600 void *b; 601 602 if (c->size < PAGE_SIZE) { 603 b = slob_alloc(c->size, flags, c->align, node); 604 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, 605 SLOB_UNITS(c->size) * SLOB_UNIT, 606 flags, node); 607 } else { 608 b = slob_new_pages(flags, get_order(c->size), node); 609 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, 610 PAGE_SIZE << get_order(c->size), 611 flags, node); 612 } 613 614 if (c->ctor) 615 c->ctor(b); 616 617 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); 618 return b; 619} 620EXPORT_SYMBOL(kmem_cache_alloc_node); 621 622static void __kmem_cache_free(void *b, int size) 623{ 624 if (size < PAGE_SIZE) 625 slob_free(b, size); 626 else 627 slob_free_pages(b, get_order(size)); 628} 629 630static void kmem_rcu_free(struct rcu_head *head) 631{ 632 struct slob_rcu *slob_rcu = (struct slob_rcu *)head; 633 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); 634 635 __kmem_cache_free(b, slob_rcu->size); 636} 637 638void kmem_cache_free(struct kmem_cache *c, void *b) 639{ 640 kmemleak_free_recursive(b, c->flags); 641 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { 642 struct slob_rcu *slob_rcu; 643 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); 644 INIT_RCU_HEAD(&slob_rcu->head); 645 slob_rcu->size = c->size; 646 call_rcu(&slob_rcu->head, kmem_rcu_free); 647 } else { 648 __kmem_cache_free(b, c->size); 649 } 650 651 trace_kmem_cache_free(_RET_IP_, b); 652} 653EXPORT_SYMBOL(kmem_cache_free); 654 655unsigned int kmem_cache_size(struct kmem_cache *c) 656{ 657 return c->size; 658} 659EXPORT_SYMBOL(kmem_cache_size); 660 661const char *kmem_cache_name(struct kmem_cache *c) 662{ 663 return c->name; 664} 665EXPORT_SYMBOL(kmem_cache_name); 666 667int kmem_cache_shrink(struct kmem_cache *d) 668{ 669 return 0; 670} 671EXPORT_SYMBOL(kmem_cache_shrink); 672 673int kmem_ptr_validate(struct kmem_cache *a, const void *b) 674{ 675 return 0; 676} 677 678static unsigned int slob_ready __read_mostly; 679 680int slab_is_available(void) 681{ 682 return slob_ready; 683} 684 685void __init kmem_cache_init(void) 686{ 687 slob_ready = 1; 688} 689 690void __init kmem_cache_init_late(void) 691{ 692 /* Nothing to do */ 693} 694