/mm/ |
H A D | sparse-vmemmap.c | 11 * architectures already map their physical space using 1-1 mappings 12 * via TLBs. For those arches the virtual memory map is essentially 33 * Allocate a block of memory to be used to back the virtual memory map 179 struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); local 180 int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); 184 return map; 215 printk(KERN_ERR "%s: sparsemem memory map backing failed "
|
H A D | page_cgroup.c | 324 struct page **map; member in struct:swap_cgroup_ctrl 365 ctrl->map[idx] = page; 371 __free_page(ctrl->map[idx]); 387 mappage = ctrl->map[offset / SC_PER_PAGE]; 476 ctrl->map = array; 480 ctrl->map = NULL; 498 struct page **map; local 507 map = ctrl->map; 509 ctrl->map [all...] |
H A D | bootmem.c | 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 187 unsigned long *map, idx, vec; local 189 map = bdata->node_bootmem_map; 191 vec = ~map[idx / BITS_PER_LONG]; 761 * @section_nr: sparse map section to allocate from
|
H A D | sparse.c | 384 struct page *map; local 387 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); 388 if (map) 389 return map; 392 map = __alloc_bootmem_node_high(NODE_DATA(nid), size, 394 return map; 401 void *map; local 405 map = alloc_remap(nodeid, size * map_count); 406 if (map) { 410 map_map[pnum] = map; 458 struct page *map; local 484 struct page *map; local 606 sparse_init_one_section(__nr_to_section(pnum), pnum, map, local [all...] |
H A D | percpu.c | 39 * on chunk->map. A positive value in the map represents a free 41 * by scanning this map sequentially and serving the first matching 78 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 105 int map_used; /* # of map entries used */ 106 int map_alloc; /* # of map entries allocated */ 107 int *map; /* allocation map */ member in struct:pcpu_chunk 342 * pcpu_need_to_extend - determine whether chunk area map needs to be extended 345 * Determine whether area map o 1917 int *map; local [all...] |
H A D | swapfile.c | 2085 /* OK, set up the swap map and apply the bad block list */ 2183 * Verify that a swap entry is valid and increment its swap map count. 2315 if (end > si->max) /* don't go beyond end of map */ 2421 unsigned char *map; local 2424 * If the previous map said no continuation, but we've found 2430 map = kmap_atomic(list_page, KM_USER0) + offset; 2431 count = *map; 2432 kunmap_atomic(map, KM_USER0); 2465 unsigned char *map; local 2475 map [all...] |
H A D | vmalloc.c | 1083 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1200 * map_kernel_range_noflush - map kernel VM area with the specified pages 1201 * @addr: start of the VM area to map 1202 * @size: size of the VM area to map 1204 * @pages: pages to map 1535 * vmap - map an array of pages into virtually contiguous space 1537 * @count: number of pages to map 1725 * allocator and map them into contiguous kernel virtual space. 1740 * allocator and map them into contiguous kernel virtual space. 1782 * allocator and map the 1909 void *map = kmap_atomic(p, KM_USER0); local 1948 void *map = kmap_atomic(p, KM_USER0); local [all...] |
H A D | hugetlb.c | 264 * bits of the reservation map pointer, which are always clear due to 282 * manner to a shared mapping. A shared mapping has a region map associated 283 * with the underlying file, this region map represents the backing file 285 * after the page is instantiated. A private mapping has a region map 287 * reference it, this region map represents those offsets which have consumed 322 /* Clear out any active regions before we release the map. */ 336 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) argument 342 HPAGE_RESV_MASK) | (unsigned long)map); 1014 /* Mark this page used in the map. */ 2060 * This new VMA should share its siblings reservation map i [all...] |
H A D | page_alloc.c | 506 * The bottom level table contains the map for the smallest allocatable 3850 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 4347 struct page *map; local 4358 map = alloc_remap(pgdat->node_id, size); 4359 if (!map) 4360 map = alloc_bootmem_node_nopanic(pgdat, size); 4361 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4421 * This function should be called after node map is populated and sorted. 4432 * populated node map. 4580 * As the map i [all...] |
H A D | slub.c | 441 * Determine a map of object in use on a page. 446 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) argument 452 set_bit(slab_index(p, s, addr), map); local 3095 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * local 3097 if (!map) 3102 get_map(s, page, map); 3105 if (!test_bit(slab_index(p, s, addr), map)) { 3112 kfree(map); 4058 unsigned long *map) 4068 bitmap_zero(map, pag 4057 validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) argument 4084 validate_slab_slab(struct kmem_cache *s, struct page *page, unsigned long *map) argument 4092 validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n, unsigned long *map) argument 4130 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * local 4271 process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc, unsigned long *map) argument 4293 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * local [all...] |