Searched refs:size (Results 1 - 25 of 42) sorted by relevance

12

/mm/
H A Dnobootmem.c35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, argument
44 addr = memblock_find_in_range_node(size, align, goal, limit, nid);
48 if (memblock_reserve(addr, size))
52 memset(ptr, 0, size);
57 kmemleak_alloc(ptr, size, 0, 0);
64 * @size: size of the range in bytes
70 void __init free_bootmem_late(unsigned long addr, unsigned long size) argument
74 kmemleak_free_part(__va(addr), size); local
77 end = PFN_DOWN(addr + size);
129 phys_addr_t size; local
201 free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) argument
216 free_bootmem(unsigned long addr, unsigned long size) argument
221 ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
259 __alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument
267 ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
295 __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) argument
303 ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
330 __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
339 ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
369 __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
378 __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
401 __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) argument
407 __alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument
430 __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
[all...]
H A Dmaccess.c12 * @size: size of the data chunk
18 long __weak probe_kernel_read(void *dst, const void *src, size_t size)
21 long __probe_kernel_read(void *dst, const void *src, size_t size) argument
29 (__force const void __user *)src, size);
41 * @size: size of the data chunk
46 long __weak probe_kernel_write(void *dst, const void *src, size_t size)
49 long __probe_kernel_write(void *dst, const void *src, size_t size) argument
56 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
[all...]
H A Dearly_ioremap.c97 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) argument
116 __func__, (u64)phys_addr, size))
119 /* Don't allow wraparound or zero size */
120 last_addr = phys_addr + size - 1;
121 if (WARN_ON(!size || last_addr < phys_addr))
124 prev_size[slot] = size;
130 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
135 nrpages = size >> PAGE_SHIFT;
153 __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
159 void __init early_iounmap(void __iomem *addr, unsigned long size) argument
208 early_ioremap(resource_size_t phys_addr, unsigned long size) argument
215 early_memremap(resource_size_t phys_addr, unsigned long size) argument
223 early_ioremap(resource_size_t phys_addr, unsigned long size) argument
230 early_memremap(resource_size_t phys_addr, unsigned long size) argument
235 early_iounmap(void __iomem *addr, unsigned long size) argument
242 early_memunmap(void *addr, unsigned long size) argument
[all...]
H A Dmemblock.c73 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
74 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) argument
76 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
89 phys_addr_t base, phys_addr_t size)
95 phys_addr_t rgnsize = type->regions[i].size;
96 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
107 * @size
88 memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) argument
117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid) argument
149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid) argument
191 memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument
251 memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) argument
465 memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
498 memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
577 memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) argument
583 memblock_add(phys_addr_t base, phys_addr_t size) argument
605 memblock_isolate_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int *start_rgn, int *end_rgn) argument
665 memblock_remove_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size) argument
680 memblock_remove(phys_addr_t base, phys_addr_t size) argument
686 memblock_free(phys_addr_t base, phys_addr_t size) argument
693 kmemleak_free_part(__va(base), size); local
697 memblock_reserve_region(phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
712 memblock_reserve(phys_addr_t base, phys_addr_t size) argument
727 memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) argument
753 memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) argument
1022 memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid) argument
1040 memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument
1055 kmemleak_alloc(__va(found), size, 0, 0); local
1061 memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end) argument
1067 memblock_alloc_base_nid(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr, int nid) argument
1074 memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) argument
1079 __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) argument
1084 memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) argument
1097 memblock_alloc(phys_addr_t size, phys_addr_t align) argument
1102 memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) argument
1138 memblock_virt_alloc_internal( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) argument
1219 memblock_virt_alloc_try_nid_nopanic( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) argument
1249 memblock_virt_alloc_try_nid( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) argument
1278 __memblock_free_early(phys_addr_t base, phys_addr_t size) argument
1283 kmemleak_free_part(__va(base), size); local
1296 __memblock_free_late(phys_addr_t base, phys_addr_t size) argument
1303 kmemleak_free_part(__va(base), size); local
1431 memblock_is_region_memory(phys_addr_t base, phys_addr_t size) argument
1453 memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) argument
1496 unsigned long long base, size; local
[all...]
H A Dslab_common.c62 * Determine the size of a slab object
71 static int kmem_cache_sanity_check(const char *name, size_t size) argument
75 if (!name || in_interrupt() || size < sizeof(void *) ||
76 size > KMALLOC_MAX_SIZE) {
92 pr_err("Slab cache with size %d has lost its name\n",
102 static inline int kmem_cache_sanity_check(const char *name, size_t size) argument
112 size_t size; local
118 size = offsetof(struct memcg_cache_params, memcg_caches);
119 size += memcg_limited_groups_array_size * sizeof(void *);
121 size
143 int size; local
227 find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)) argument
275 calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) argument
299 do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align, unsigned long flags, void (*ctor)(void *), struct mem_cgroup *memcg, struct kmem_cache *root_cache) argument
363 kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument
571 create_boot_cache(struct kmem_cache *s, const char *name, size_t size, unsigned long flags) argument
588 create_kmalloc_cache(const char *name, size_t size, unsigned long flags) argument
652 kmalloc_slab(size_t size, gfp_t flags) argument
765 int size = kmalloc_size(i); local
783 kmalloc_order(size_t size, gfp_t flags, unsigned int order) argument
797 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) argument
[all...]
H A Dbootmem.c65 * bootmem_bootmap_pages - calculate bitmap size in pages
151 * @size: size of the range in bytes
157 void __init free_bootmem_late(unsigned long physaddr, unsigned long size) argument
161 kmemleak_free_part(__va(physaddr), size); local
164 end = PFN_DOWN(physaddr + size);
386 * @size: size of the range in bytes
393 unsigned long size)
397 kmemleak_free_part(__va(physaddr), size); local
392 free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) argument
414 free_bootmem(unsigned long physaddr, unsigned long size) argument
418 kmemleak_free_part(__va(physaddr), size); local
437 reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) argument
458 reserve_bootmem(unsigned long addr, unsigned long size, int flags) argument
492 alloc_bootmem_bdata(struct bootmem_data *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
599 alloc_bootmem_core(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
624 ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
656 __alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument
664 ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
692 __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) argument
700 ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
730 __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
739 ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument
769 __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
778 __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
824 __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) argument
830 __alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument
853 __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument
[all...]
H A Dslob.c28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
39 * alignment. Again, objects of page-size or greater are allocated by
40 * calling alloc_pages(). As SLAB objects know their size, no separate
41 * size bookkeeping is necessary and there is essentially no allocation
77 * slob_block has a field 'units', which indicates size of block if +ve,
80 * Free blocks of size 1 unit simply contain the offset of the next block.
81 * Those with larger size contain their size in the first SLOB_UNIT of
125 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNI
134 int size; member in struct:slob_rcu
145 set_slob(slob_t *s, slobidx_t size, slob_t *next) argument
217 slob_page_alloc(struct page *sp, size_t size, int align) argument
268 slob_alloc(size_t size, gfp_t gfp, int align, int node) argument
340 slob_free(void *block, int size) argument
427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) argument
465 __kmalloc(size_t size, gfp_t gfp) argument
471 __kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) argument
477 __kmalloc_node_track_caller(size_t size, gfp_t gfp, int node, unsigned long caller) argument
570 __kmalloc_node(size_t size, gfp_t gfp, int node) argument
583 __kmem_cache_free(void *b, int size) argument
[all...]
H A Dreadahead.c246 * Set the initial window size, round to next power of 2 and square
247 * for small size, x 4 for medium, and x 2 for large
251 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) argument
253 unsigned long newsize = roundup_pow_of_two(size);
266 * Get the previous window size, ramp it up, and
267 * return it as the new window size.
272 unsigned long cur = ra->size;
290 * |------------------- size -------------------->|
299 * will be equal to size, for maximum pipelining.
303 * page at (start+size
349 pgoff_t size; local
[all...]
H A Dsparse-vmemmap.c13 * for free if we use the same page size as the 1-1 mappings. In that
39 unsigned long size,
43 return memblock_virt_alloc_try_nid(size, align, goal,
50 void * __meminit vmemmap_alloc_block(unsigned long size, int node) argument
59 get_order(size));
63 get_order(size));
68 return __earlyonly_bootmem_alloc(node, size, size,
72 /* need to make sure size is all the same during early stage */
73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, in argument
38 __earlyonly_bootmem_alloc(int node, unsigned long size, unsigned long align, unsigned long goal) argument
201 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; local
[all...]
H A Ddmapool.c12 * This allocator returns small blocks of a given size which are DMA-able by
14 * new pages, then splits them up into blocks of the required size.
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
48 size_t size; member in struct:dma_pool
71 unsigned size; local
77 size = PAGE_SIZE;
79 temp = scnprintf(next, size, "poolinfo - 0.1\n");
80 size -= temp;
96 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98 pages * (pool->allocation / pool->size),
131 dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t boundary) argument
498 dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) argument
[all...]
H A Dfremap.c103 unsigned long size, pgoff_t pgoff)
113 size -= PAGE_SIZE;
116 } while (size);
125 * @size: size of the remapped virtual memory range
142 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
162 size = size & PAGE_MASK;
165 if (start + size <= start)
169 if (pgoff + (size >> PAGE_SHIF
102 generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, unsigned long size, pgoff_t pgoff) argument
[all...]
H A Dcma.c149 * @size: Size of the reserved area (in bytes),
155 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, argument
167 if (!size || !memblock_is_region_reserved(base, size))
177 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
186 cma->count = size >> PAGE_SHIFT;
197 * @size: Size of the reserved area (in bytes),
213 phys_addr_t size, phys_addr_t limit,
221 pr_debug("%s(size
212 cma_declare_contiguous(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, struct cma **res_cma) argument
[all...]
H A Dkmemcheck.c61 size_t size)
83 kmemcheck_mark_initialized(object, size);
89 kmemcheck_mark_uninitialized(object, size);
93 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) argument
97 kmemcheck_mark_freed(object, size);
60 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, size_t size) argument
H A Dvmalloc.c341 * Allocate a region of KVA of the specified size and alignment, within the
344 static struct vmap_area *alloc_vmap_area(unsigned long size, argument
355 BUG_ON(!size);
356 BUG_ON(size & ~PAGE_MASK);
375 * the vmap_area cached in free_vmap_cache: if size fits
382 size < cached_hole_size ||
399 if (addr + size < addr)
404 if (addr + size < addr)
427 while (addr + size > first->va_start && addr + size <
905 vb_alloc(unsigned long size, gfp_t gfp_mask) argument
963 vb_free(const void *addr, unsigned long size) argument
1063 unsigned long size = count << PAGE_SHIFT; local
1098 unsigned long size = count << PAGE_SHIFT; local
1229 map_kernel_range_noflush(unsigned long addr, unsigned long size, pgprot_t prot, struct page **pages) argument
1249 unmap_kernel_range_noflush(unsigned long addr, unsigned long size) argument
1263 unmap_kernel_range(unsigned long addr, unsigned long size) argument
1309 __get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) argument
1344 __get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) argument
1352 __get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) argument
1369 get_vm_area(unsigned long size, unsigned long flags) argument
1376 get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) argument
1632 __vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) argument
1689 __vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) argument
1697 __vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) argument
1704 __vmalloc_node_flags(unsigned long size, int node, gfp_t flags) argument
1720 vmalloc(unsigned long size) argument
1737 vzalloc(unsigned long size) argument
1751 vmalloc_user(unsigned long size) argument
1779 vmalloc_node(unsigned long size, int node) argument
1798 vzalloc_node(unsigned long size, int node) argument
1821 vmalloc_exec(unsigned long size) argument
1842 vmalloc_32(unsigned long size) argument
1856 vmalloc_32_user(unsigned long size) argument
2125 remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long size) argument
2221 alloc_vm_area(size_t size, pte_t **ptes) argument
[all...]
H A Dkmemleak.c112 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
125 size_t size; member in struct:kmemleak_scan_area
149 size_t size; member in struct:kmemleak_object
252 size_t size; /* memory block size */ member in struct:early_log
299 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
357 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
358 object->pointer, object->size);
383 pr_notice("Object 0x%08lx (size %zu):\n",
384 object->pointer, object->size);
516 create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp) argument
649 delete_object_part(unsigned long ptr, size_t size) argument
737 add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) argument
803 log_early(int op_type, const void *ptr, size_t size, int min_count) argument
892 kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) argument
913 kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) argument
959 kmemleak_free_part(const void *ptr, size_t size) argument
1075 kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) argument
1650 kmemleak_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) argument
[all...]
H A Dzsmalloc.c20 * any object of size PAGE_SIZE/2 or larger would occupy an entire page.
29 * For simplicity, zsmalloc can only allocate objects of size up to PAGE_SIZE
32 * uncompressed form). For allocation requests larger than this size, failure
64 * zspage (class->zspage_order * PAGE_SIZE / class->size)
145 * On systems with 4K page size, this gives 255 size classes! There is a
147 * - Large number of size classes is potentially wasteful as free page are
149 * - Small number of size classes causes large internal fragmentation
150 * - Probably its better to use specific size classes (empirically
194 int size; member in struct:size_class
256 zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) argument
361 get_size_class_index(int size) argument
761 __zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
769 __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
800 __zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
828 __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
949 int size; local
998 zs_malloc(struct zs_pool *pool, size_t size) argument
[all...]
H A Dsparse.c265 unsigned long size)
284 p = memblock_virt_alloc_try_nid_nopanic(size,
335 unsigned long size)
337 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
353 int size = usemap_size(); local
356 size * usemap_count);
366 usemap += size;
375 unsigned long size; local
381 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
382 map = memblock_virt_alloc_try_nid(size,
264 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) argument
334 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) argument
394 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; local
529 int size; local
593 memblock_free_early(__pa(usemap_map), size); local
[all...]
H A Dfailslab.c14 bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) argument
25 return should_fail(&failslab.attr, size);
H A Dpercpu.c23 * Allocation is done in offset-size areas of single unit space. Ie,
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
107 int contig_hint; /* max contiguous size hint */
205 static int __pcpu_size_to_slot(int size) argument
207 int highbit = fls(size); /* size is in bytes */
211 static int pcpu_size_to_slot(int size) argument
213 if (size == pcpu_unit_size)
215 return __pcpu_size_to_slot(size);
294 pcpu_mem_zalloc(size_t size) argument
312 pcpu_mem_free(void *ptr, size_t size) argument
504 pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, int size, int align, bool pop_only) argument
556 pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, bool pop_only, int *occ_pages_p) argument
873 pcpu_alloc(size_t size, size_t align, bool reserved, gfp_t gfp) argument
1067 __alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) argument
1080 __alloc_percpu(size_t size, size_t align) argument
1102 __alloc_reserved_percpu(size_t size, size_t align) argument
2184 pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) argument
2191 pcpu_dfl_fc_free(void *ptr, size_t size) argument
2193 memblock_free_early(__pa(ptr), size); local
2273 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); local
[all...]
H A Dslab.h20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */ member in struct:kmem_cache
53 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
70 unsigned long align, unsigned long size);
76 /* Find the kmalloc slab corresponding for a certain size */
84 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
87 size_t size, unsigned long flags);
92 struct kmem_cache *find_mergeable(size_t size, size_t align,
96 __kmem_cache_alias(const char *name, size_t size, size_
104 __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument
[all...]
H A Dslub.c236 if (object < base || object >= base + page->objects * s->size ||
237 (object - base) % s->size) {
273 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
274 __p += (__s)->size)
278 __p += (__s)->size, __idx++)
283 return (p - addr) / s->size;
307 return s->size;
310 static inline int order_objects(int order, unsigned long size, int reserved) argument
312 return ((PAGE_SIZE << order) - reserved) / size;
316 unsigned long size, in
315 oo_make(int order, unsigned long size, int reserved) argument
1226 kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) argument
2471 kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) argument
2493 kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) argument
2736 slab_order(int size, int min_objects, int max_order, int fract_leftover, int reserved) argument
2765 calculate_order(int size, int reserved) argument
2946 unsigned long size = s->object_size; local
3239 __kmalloc(size_t size, gfp_t flags) argument
3261 kmalloc_large_node(size_t size, gfp_t flags, int node) argument
3275 __kmalloc_node(size_t size, gfp_t flags, int node) argument
3617 __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument
3711 __kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) argument
3733 __kmalloc_node_track_caller(size_t size, gfp_t gfpflags, int node, unsigned long caller) argument
[all...]
H A Dslab.c350 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
351 * cachep->size - 1* BYTES_PER_WORD: last caller address
370 return (unsigned long long *)(objp + cachep->size -
373 return (unsigned long long *) (objp + cachep->size -
380 return (void **)(objp + cachep->size - BYTES_PER_WORD);
443 return page->s_mem + cache->size * idx;
447 * We want to avoid an expensive divide : (offset / cache->size)
448 * Using the fact that size is a constant for a particular cache,
449 * we can replace (offset / cache->size) by
464 .size
1662 int size = cachep->object_size; local
1694 int size = cachep->object_size; local
1737 int i, size; local
1765 int size, i; local
1923 calculate_slab_order(struct kmem_cache *cachep, size_t size, size_t align, unsigned long flags) argument
1991 size_t size; local
2055 __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument
2101 size_t size = cachep->size; local
3401 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) argument
3439 kmem_cache_alloc_node_trace(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t size) argument
3457 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) argument
3467 __kmalloc_node(size_t size, gfp_t flags, int node) argument
3473 __kmalloc_node_track_caller(size_t size, gfp_t flags, int node, unsigned long caller) argument
3487 __do_kmalloc(size_t size, gfp_t flags, unsigned long caller) argument
3504 __kmalloc(size_t size, gfp_t flags) argument
3510 __kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) argument
4102 unsigned long offset, size; local
[all...]
H A Dzbud.c30 * zbud pages are divided into "chunks". The size of the chunks is fixed at
63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
80 * the lists each zbud page is added to depends on the size of
107 * @first_chunks: the size of the first buddy in chunks, 0 if free
108 * @last_chunks: the size of the last buddy in chunks, 0 if free
143 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, argument
146 return zbud_alloc(pool, size, gfp, handle);
212 /* Converts an allocation size in bytes to size in zbud chunks */
213 static int size_to_chunks(size_t size) argument
337 zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) argument
[all...]
H A Diov_iter.c411 size_t size = i->count; local
414 if (!size)
419 if (n >= size)
420 return res | size;
421 size -= n;
423 while (size > (++iov)->iov_len) {
425 size -= iov->iov_len;
427 res |= (unsigned long)iov->iov_base | size;
514 size_t size = i->count; local
519 for (n = 0; size
756 size_t size = i->count; local
818 size_t size = i->count; local
897 iov_iter_advance(struct iov_iter *i, size_t size) argument
[all...]
H A Dmemory_hotplug.c130 static struct resource *register_memory_resource(u64 start, u64 size) argument
138 res->end = start + size - 1;
749 * @nr_pages: number of pages to remove (must be multiple of section size)
761 resource_size_t start, size; local
771 size = nr_pages * PAGE_SIZE;
772 ret = release_mem_region_adjustable(&iomem_resource, start, size);
774 resource_size_t endres = start + size - 1;
1171 static int check_hotplug_memory_range(u64 start, u64 size) argument
1174 u64 nr_pages = size >> PAGE_SHIFT;
1179 pr_err("Section-unaligned hotplug range: start 0x%llx, size
1193 should_add_memory_movable(int nid, u64 start, u64 size) argument
1208 zone_for_memory(int nid, u64 start, u64 size, int zone_default) argument
1217 add_memory(int nid, u64 start, u64 size) argument
1999 remove_memory(int nid, u64 start, u64 size) argument
[all...]

Completed in 219 milliseconds

12