Searched defs:base (Results 1 - 12 of 12) sorted by relevance

/mm/
H A Dcma.c148 * @base: Base address of the reserved area
155 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, argument
167 if (!size || !memblock_is_region_reserved(base, size))
177 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
185 cma->base_pfn = PFN_DOWN(base);
196 * @base: Base address of the reserved area optional, use 0 for any
209 * If @fixed is true, reserve contiguous area at exactly @base. If false,
210 * reserve in range from @base to @limit.
212 int __init cma_declare_contiguous(phys_addr_t base, argument
[all...]
H A Dinternal.h160 unsigned long free_pfn; /* isolate_freepages search base */
161 unsigned long migrate_pfn; /* isolate_migratepages search base */
283 * the maximally aligned gigantic page 'base'. Handle any discontiguity
286 static inline struct page *mem_map_offset(struct page *base, int offset) argument
289 return nth_page(base, offset);
290 return base + offset;
295 * page 'base'. Handle any discontiguity in the mem_map.
298 struct page *base, int offset)
301 unsigned long pfn = page_to_pfn(base) + offset;
297 mem_map_next(struct page *iter, struct page *base, int offset) argument
H A Diov_iter.c304 const struct iovec *iov, size_t base, size_t bytes)
309 char __user *buf = iov->iov_base + base;
310 int copy = min(bytes, iov->iov_len - base);
312 base = 0;
360 size_t base = i->iov_offset; local
370 copy = min(bytes, iov->iov_len - base);
374 base += copy;
375 if (iov->iov_len == base) {
378 base = 0;
382 i->iov_offset = base;
303 __iovec_copy_from_user_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) argument
700 size_t base = i->iov_offset; local
725 size_t base = i->iov_offset; local
[all...]
H A Dpage_cgroup.c28 struct page_cgroup *base; local
30 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
38 if (unlikely(!base))
42 return base + offset;
47 struct page_cgroup *base; local
57 base = memblock_virt_alloc_try_nid_nopanic(
60 if (!base)
62 NODE_DATA(nid)->node_page_cgroup = base;
131 struct page_cgroup *base; local
140 base
182 struct page_cgroup *base; local
[all...]
H A Dslob.c147 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); local
148 slobidx_t offset = next - base;
172 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); local
179 return base+next;
H A Dbootmem.c472 unsigned long base = bdata->node_min_pfn; local
479 return ALIGN(base + idx, step) - base;
485 unsigned long base = PFN_PHYS(bdata->node_min_pfn); local
489 return ALIGN(base + off, align) - base;
H A Dmemblock.c73 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
74 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) argument
76 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
89 phys_addr_t base, phys_addr_t size)
94 phys_addr_t rgnbase = type->regions[i].base;
96 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
270 type->regions[0].base = 0;
437 if (this->base + this->size != next->base ||
441 BUG_ON(this->base
88 memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) argument
465 memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
498 memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
577 memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) argument
583 memblock_add(phys_addr_t base, phys_addr_t size) argument
605 memblock_isolate_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int *start_rgn, int *end_rgn) argument
665 memblock_remove_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size) argument
680 memblock_remove(phys_addr_t base, phys_addr_t size) argument
686 memblock_free(phys_addr_t base, phys_addr_t size) argument
697 memblock_reserve_region(phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
712 memblock_reserve(phys_addr_t base, phys_addr_t size) argument
727 memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) argument
753 memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) argument
1022 memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid) argument
1278 __memblock_free_early(phys_addr_t base, phys_addr_t size) argument
1296 __memblock_free_late(phys_addr_t base, phys_addr_t size) argument
1431 memblock_is_region_memory(phys_addr_t base, phys_addr_t size) argument
1453 memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) argument
1496 unsigned long long base, size; local
[all...]
H A Dnommu.c1154 void *base; local
1211 base = page_address(pages);
1213 region->vm_start = (unsigned long) base;
1230 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1238 memset(base + ret, 0, len - ret);
H A Dpercpu.c27 * Percpu access can be done by configuring percpu base registers
108 void *base_addr; /* base address of this chunk */
1028 /* clear the areas and return address relative to base address */
1285 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); local
1289 void *start = per_cpu_ptr(base, cpu);
1324 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); local
1340 void *start = per_cpu_ptr(base, cpu);
1951 void *base = (void *)ULONG_MAX; local
1971 /* allocate, copy and determine base address */
1991 base
[all...]
H A Dvmalloc.c1385 * @addr: base address
1404 * @addr: base address
1475 * @addr: memory base address
1506 * @addr: memory base address
2357 * existing vmap_area, the base address is pulled down to fit the
2370 unsigned long base, start, end, last_end; local
2425 base = vmalloc_end - last_end;
2428 base = pvm_determine_end(&next, &prev, align) - end;
2431 BUG_ON(next && next->va_end <= base + end);
2432 BUG_ON(prev && prev->va_end > base
[all...]
H A Dhugetlb.c1961 * the base kernel, on the hugetlb module.
3402 * Shared mappings base their reservation on the number of pages that
3509 unsigned long base = addr & PUD_MASK; local
3510 unsigned long end = base + PUD_SIZE;
3516 vma->vm_start <= base && end <= vma->vm_end)
H A Dslub.c230 void *base; local
235 base = page_address(page);
236 if (object < base || object >= base + page->objects * s->size ||
237 (object - base) % s->size) {

Completed in 128 milliseconds