Lines Matching defs:size

28  * from kmalloc are prepended with a 4-byte header with the kmalloc size.
39 * alignment. Again, objects of page-size or greater are allocated by
40 * calling alloc_pages(). As SLAB objects know their size, no separate
41 * size bookkeeping is necessary and there is essentially no allocation
77 * slob_block has a field 'units', which indicates size of block if +ve,
80 * Free blocks of size 1 unit simply contain the offset of the next block.
81 * Those with larger size contain their size in the first SLOB_UNIT of
125 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
134 int size;
143 * Encode the given size and next info into a free slob block s.
145 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
150 if (size > 1) {
151 s[0].units = size;
158 * Return the size of a slob block.
217 static void *slob_page_alloc(struct page *sp, size_t size, int align)
220 int delta = 0, units = SLOB_UNITS(size);
268 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
276 if (size < SLOB_BREAK1)
278 else if (size < SLOB_BREAK2)
295 if (sp->units < SLOB_UNITS(size))
300 b = slob_page_alloc(sp, size, align);
328 b = slob_page_alloc(sp, size, align);
333 memset(b, 0, size);
340 static void slob_free(void *block, int size)
350 BUG_ON(!size);
353 units = SLOB_UNITS(size);
375 if (size < SLOB_BREAK1)
377 else if (size < SLOB_BREAK2)
427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
437 if (size < PAGE_SIZE - align) {
438 if (!size)
441 m = slob_alloc(size + align, gfp, align, node);
445 *m = size;
449 size, size + align, gfp, node);
451 unsigned int order = get_order(size);
458 size, PAGE_SIZE << order, gfp, node);
461 kmemleak_alloc(ret, size, 1, gfp);
465 void *__kmalloc(size_t size, gfp_t gfp)
467 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
471 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
473 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
477 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
480 return __do_kmalloc_node(size, gfp, node, caller);
529 c->size += sizeof(struct slob_rcu);
543 if (c->size < PAGE_SIZE) {
544 b = slob_alloc(c->size, flags, c->align, node);
546 SLOB_UNITS(c->size) * SLOB_UNIT,
549 b = slob_new_pages(flags, get_order(c->size), node);
551 PAGE_SIZE << get_order(c->size),
558 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
570 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
572 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
583 static void __kmem_cache_free(void *b, int size)
585 if (size < PAGE_SIZE)
586 slob_free(b, size);
588 slob_free_pages(b, get_order(size));
594 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
596 __kmem_cache_free(b, slob_rcu->size);
604 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
605 slob_rcu->size = c->size;
608 __kmem_cache_free(b, c->size);
628 .size = sizeof(struct kmem_cache),