/mm/ |
H A D | maccess.c | 12 * @size: size of the data chunk 18 long __weak probe_kernel_read(void *dst, const void *src, size_t size) 21 long __probe_kernel_read(void *dst, const void *src, size_t size) argument 29 (__force const void __user *)src, size); 41 * @size: size of the data chunk 46 long __weak probe_kernel_write(void *dst, const void *src, size_t size) 49 long __probe_kernel_write(void *dst, const void *src, size_t size) argument 56 ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); [all...] |
H A D | failslab.c | 14 bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) argument 25 return should_fail(&failslab.attr, size);
|
H A D | kmemcheck.c | 61 size_t size) 83 kmemcheck_mark_initialized(object, size); 89 kmemcheck_mark_uninitialized(object, size); 93 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) argument 97 kmemcheck_mark_freed(object, size); 60 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, size_t size) argument
|
H A D | list_lru.c | 130 size_t size = sizeof(*lru->node) * nr_node_ids; local 132 lru->node = kzalloc(size, GFP_KERNEL);
|
H A D | early_ioremap.c | 97 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) argument 116 __func__, (u64)phys_addr, size)) 119 /* Don't allow wraparound or zero size */ 120 last_addr = phys_addr + size - 1; 121 if (WARN_ON(!size || last_addr < phys_addr)) 124 prev_size[slot] = size; 130 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 135 nrpages = size >> PAGE_SHIFT; 153 __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]); 159 void __init early_iounmap(void __iomem *addr, unsigned long size) argument 208 early_ioremap(resource_size_t phys_addr, unsigned long size) argument 215 early_memremap(resource_size_t phys_addr, unsigned long size) argument 223 early_ioremap(resource_size_t phys_addr, unsigned long size) argument 230 early_memremap(resource_size_t phys_addr, unsigned long size) argument 235 early_iounmap(void __iomem *addr, unsigned long size) argument 242 early_memunmap(void *addr, unsigned long size) argument [all...] |
H A D | fremap.c | 103 unsigned long size, pgoff_t pgoff) 113 size -= PAGE_SIZE; 116 } while (size); 125 * @size: size of the remapped virtual memory range 142 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 162 size = size & PAGE_MASK; 165 if (start + size <= start) 169 if (pgoff + (size >> PAGE_SHIF 102 generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, unsigned long size, pgoff_t pgoff) argument [all...] |
H A D | mempool.c | 58 * this function creates and allocates a guaranteed size, preallocated 120 * size immediately, but new mempool_free() calls will refill it. 352 size_t size = (size_t)pool_data; local 353 return kmalloc(size, gfp_mask);
|
H A D | sparse-vmemmap.c | 13 * for free if we use the same page size as the 1-1 mappings. In that 39 unsigned long size, 43 return memblock_virt_alloc_try_nid(size, align, goal, 50 void * __meminit vmemmap_alloc_block(unsigned long size, int node) argument 59 get_order(size)); 63 get_order(size)); 68 return __earlyonly_bootmem_alloc(node, size, size, 72 /* need to make sure size is all the same during early stage */ 73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, in argument 38 __earlyonly_bootmem_alloc(int node, unsigned long size, unsigned long align, unsigned long goal) argument 201 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; local [all...] |
H A D | zpool.c | 231 * @size The amount of memory to allocate. 244 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, argument 247 return zpool->driver->malloc(zpool->pool, size, gfp, handle); 270 * zpool_shrink() - Shrink the pool size 275 * This attempts to shrink the actual memory size of the pool 336 * zpool_get_total_size() - The total size of the pool 339 * This returns the total size in bytes of the pool. 341 * Returns: Total size of the zpool in bytes.
|
H A D | cma.c | 149 * @size: Size of the reserved area (in bytes), 155 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, argument 167 if (!size || !memblock_is_region_reserved(base, size)) 177 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) 186 cma->count = size >> PAGE_SHIFT; 197 * @size: Size of the reserved area (in bytes), 213 phys_addr_t size, phys_addr_t limit, 221 pr_debug("%s(size 212 cma_declare_contiguous(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, struct cma **res_cma) argument [all...] |
H A D | dmapool.c | 12 * This allocator returns small blocks of a given size which are DMA-able by 14 * new pages, then splits them up into blocks of the required size. 20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 48 size_t size; member in struct:dma_pool 71 unsigned size; local 77 size = PAGE_SIZE; 79 temp = scnprintf(next, size, "poolinfo - 0.1\n"); 80 size -= temp; 96 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 98 pages * (pool->allocation / pool->size), 131 dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t boundary) argument 498 dmam_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) argument [all...] |
H A D | filemap_xip.c | 227 pgoff_t size; local 235 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 236 if (vmf->pgoff >= size)
|
H A D | hugetlb_cgroup.c | 311 static char *mem_fmt(char *buf, int size, unsigned long hsize) argument 314 snprintf(buf, size, "%luGB", hsize >> 30); 316 snprintf(buf, size, "%luMB", hsize >> 20); 318 snprintf(buf, size, "%luKB", hsize >> 10); 328 /* format the size */
|
H A D | nobootmem.c | 35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, argument 44 addr = memblock_find_in_range_node(size, align, goal, limit, nid); 48 if (memblock_reserve(addr, size)) 52 memset(ptr, 0, size); 57 kmemleak_alloc(ptr, size, 0, 0); 64 * @size: size of the range in bytes 70 void __init free_bootmem_late(unsigned long addr, unsigned long size) argument 74 kmemleak_free_part(__va(addr), size); local 77 end = PFN_DOWN(addr + size); 129 phys_addr_t size; local 201 free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) argument 216 free_bootmem(unsigned long addr, unsigned long size) argument 221 ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 259 __alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument 267 ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 295 __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) argument 303 ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 330 __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument 339 ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 369 __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument 378 __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument 401 __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) argument 407 __alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument 430 __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument [all...] |
H A D | readahead.c | 246 * Set the initial window size, round to next power of 2 and square 247 * for small size, x 4 for medium, and x 2 for large 251 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) argument 253 unsigned long newsize = roundup_pow_of_two(size); 266 * Get the previous window size, ramp it up, and 267 * return it as the new window size. 272 unsigned long cur = ra->size; 290 * |------------------- size -------------------->| 299 * will be equal to size, for maximum pipelining. 303 * page at (start+size 349 pgoff_t size; local [all...] |
H A D | iov_iter.c | 411 size_t size = i->count; local 414 if (!size) 419 if (n >= size) 420 return res | size; 421 size -= n; 423 while (size > (++iov)->iov_len) { 425 size -= iov->iov_len; 427 res |= (unsigned long)iov->iov_base | size; 514 size_t size = i->count; local 519 for (n = 0; size 756 size_t size = i->count; local 818 size_t size = i->count; local 897 iov_iter_advance(struct iov_iter *i, size_t size) argument [all...] |
H A D | mlock.c | 159 * returns the size of the page as a page mask (0 for normal page, 846 int user_shm_lock(size_t size, struct user_struct *user) argument 851 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 868 void user_shm_unlock(size_t size, struct user_struct *user) argument 871 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
H A D | page_cgroup.c | 109 static void *__meminit alloc_page_cgroup(size_t size, int nid) argument 114 addr = alloc_pages_exact_nid(nid, size, flags); 116 kmemleak_alloc(addr, size, 1, flags); 121 addr = vzalloc_node(size, nid); 123 addr = vzalloc(size);
|
H A D | slab.h | 20 unsigned int object_size;/* The original size of the object */ 21 unsigned int size; /* The aligned/padded/added on size */ member in struct:kmem_cache 53 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 70 unsigned long align, unsigned long size); 76 /* Find the kmalloc slab corresponding for a certain size */ 84 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 87 size_t size, unsigned long flags); 92 struct kmem_cache *find_mergeable(size_t size, size_t align, 96 __kmem_cache_alias(const char *name, size_t size, size_ 104 __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument [all...] |
H A D | slob.c | 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size. 39 * alignment. Again, objects of page-size or greater are allocated by 40 * calling alloc_pages(). As SLAB objects know their size, no separate 41 * size bookkeeping is necessary and there is essentially no allocation 77 * slob_block has a field 'units', which indicates size of block if +ve, 80 * Free blocks of size 1 unit simply contain the offset of the next block. 81 * Those with larger size contain their size in the first SLOB_UNIT of 125 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNI 134 int size; member in struct:slob_rcu 145 set_slob(slob_t *s, slobidx_t size, slob_t *next) argument 217 slob_page_alloc(struct page *sp, size_t size, int align) argument 268 slob_alloc(size_t size, gfp_t gfp, int align, int node) argument 340 slob_free(void *block, int size) argument 427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) argument 465 __kmalloc(size_t size, gfp_t gfp) argument 471 __kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) argument 477 __kmalloc_node_track_caller(size_t size, gfp_t gfp, int node, unsigned long caller) argument 570 __kmalloc_node(size_t size, gfp_t gfp, int node) argument 583 __kmem_cache_free(void *b, int size) argument [all...] |
H A D | zbud.c | 30 * zbud pages are divided into "chunks". The size of the chunks is fixed at 63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk 80 * the lists each zbud page is added to depends on the size of 107 * @first_chunks: the size of the first buddy in chunks, 0 if free 108 * @last_chunks: the size of the last buddy in chunks, 0 if free 143 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, argument 146 return zbud_alloc(pool, size, gfp, handle); 212 /* Converts an allocation size in bytes to size in zbud chunks */ 213 static int size_to_chunks(size_t size) argument 337 zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) argument [all...] |
H A D | bootmem.c | 65 * bootmem_bootmap_pages - calculate bitmap size in pages 151 * @size: size of the range in bytes 157 void __init free_bootmem_late(unsigned long physaddr, unsigned long size) argument 161 kmemleak_free_part(__va(physaddr), size); local 164 end = PFN_DOWN(physaddr + size); 386 * @size: size of the range in bytes 393 unsigned long size) 397 kmemleak_free_part(__va(physaddr), size); local 392 free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) argument 414 free_bootmem(unsigned long physaddr, unsigned long size) argument 418 kmemleak_free_part(__va(physaddr), size); local 437 reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) argument 458 reserve_bootmem(unsigned long addr, unsigned long size, int flags) argument 492 alloc_bootmem_bdata(struct bootmem_data *bdata, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 599 alloc_bootmem_core(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 624 ___alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 656 __alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument 664 ___alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 692 __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) argument 700 ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 730 __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument 739 ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit) argument 769 __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument 778 __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument 824 __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) argument 830 __alloc_bootmem_low_nopanic(unsigned long size, unsigned long align, unsigned long goal) argument 853 __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) argument [all...] |
H A D | slab_common.c | 62 * Determine the size of a slab object 71 static int kmem_cache_sanity_check(const char *name, size_t size) argument 75 if (!name || in_interrupt() || size < sizeof(void *) || 76 size > KMALLOC_MAX_SIZE) { 92 pr_err("Slab cache with size %d has lost its name\n", 102 static inline int kmem_cache_sanity_check(const char *name, size_t size) argument 112 size_t size; local 118 size = offsetof(struct memcg_cache_params, memcg_caches); 119 size += memcg_limited_groups_array_size * sizeof(void *); 121 size 143 int size; local 227 find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)) argument 275 calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) argument 299 do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align, unsigned long flags, void (*ctor)(void *), struct mem_cgroup *memcg, struct kmem_cache *root_cache) argument 363 kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument 571 create_boot_cache(struct kmem_cache *s, const char *name, size_t size, unsigned long flags) argument 588 create_kmalloc_cache(const char *name, size_t size, unsigned long flags) argument 652 kmalloc_slab(size_t size, gfp_t flags) argument 765 int size = kmalloc_size(i); local 783 kmalloc_order(size_t size, gfp_t flags, unsigned int order) argument 797 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) argument [all...] |
H A D | sparse.c | 265 unsigned long size) 284 p = memblock_virt_alloc_try_nid_nopanic(size, 335 unsigned long size) 337 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); 353 int size = usemap_size(); local 356 size * usemap_count); 366 usemap += size; 375 unsigned long size; local 381 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 382 map = memblock_virt_alloc_try_nid(size, 264 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) argument 334 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) argument 394 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; local 529 int size; local 593 memblock_free_early(__pa(usemap_map), size); local [all...] |
H A D | filemap.c | 1446 * It is going insane. Fix it by quickly scaling down the readahead size. 1707 loff_t size; local 1711 size = i_size_read(inode); 1732 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size) { 1819 ra->size = ra_pages; 1879 loff_t size; local 1882 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1883 if (offset >= size >> PAGE_CACHE_SHIFT) 1932 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1933 if (unlikely(offset >= size >> PAGE_CACHE_SHIF 1997 loff_t size; local [all...] |