/arch/s390/mm/ |
H A D | page-states.c | 95 struct zone *zone; local 101 for_each_populated_zone(zone) { 102 spin_lock_irqsave(&zone->lock, flags); 104 list_for_each(l, &zone->free_area[order].free_list[t]) { 112 spin_unlock_irqrestore(&zone->lock, flags);
|
H A D | init.c | 229 struct zone *zone; local 235 for_each_zone(zone) { 236 if (zone_idx(zone) != ZONE_MOVABLE) { 237 /* Add range within existing zone limits */ 238 zone_start_pfn = zone->zone_start_pfn; 239 zone_end_pfn = zone->zone_start_pfn + 240 zone->spanned_pages; 250 rc = __add_pages(nid, zone, start_pfn, nr_pages);
|
/arch/x86/mm/ |
H A D | highmem_32.c | 121 struct zone *zone; local 124 for_each_zone(zone) { 127 if (!is_highmem(zone)) 130 zone_start_pfn = zone->zone_start_pfn; 131 zone_end_pfn = zone_start_pfn + zone->spanned_pages; 133 nid = zone_to_nid(zone); 135 zone->name, nid, zone_start_pfn, zone_end_pfn);
|
H A D | init_32.c | 839 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; local 843 return __add_pages(nid, zone, start_pfn, nr_pages);
|
H A D | init_64.c | 650 * Memory is added always to NORMAL zone. This means you will never get 656 struct zone *zone = pgdat->node_zones + ZONE_NORMAL; local 665 ret = __add_pages(nid, zone, start_pfn, nr_pages);
|
/arch/m68k/mm/ |
H A D | mcfmmu.c | 46 enum zone_type zone; local 82 for (zone = 0; zone < MAX_NR_ZONES; zone++) 83 zones_size[zone] = 0x0;
|
/arch/powerpc/platforms/pseries/ |
H A D | hotplug-memory.c | 79 struct zone *zone; local 89 zone = page_zone(pfn_to_page(start_pfn)); 100 ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
|
/arch/tile/include/asm/ |
H A D | homecache.h | 26 struct zone;
|
/arch/tile/mm/ |
H A D | pgtable.c | 45 struct zone *zone; local 66 for_each_zone(zone) { 69 if (!populated_zone(zone)) 72 spin_lock_irqsave(&zone->lock, flags); 74 int nr = zone->free_area[order].nr_free; 79 spin_unlock_irqrestore(&zone->lock, flags); 81 zone_to_nid(zone), zone->name,
|
H A D | init.c | 740 struct zone *z; 936 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; local 940 return __add_pages(zone, start_pfn, nr_pages);
|
/arch/ia64/mm/ |
H A D | init.c | 466 unsigned long zone; member in struct:memmap_init_callback_data 495 args->nid, args->zone, page_to_pfn(map_start), 501 memmap_init (unsigned long size, int nid, unsigned long zone, argument 505 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); 514 args.zone = zone; 675 struct zone *zone; local 682 zone = pgdat->node_zones + ZONE_NORMAL; 683 ret = __add_pages(nid, zone, start_pf [all...] |
/arch/powerpc/mm/ |
H A D | mem.c | 121 struct zone *zone; local 132 zone = pgdata->node_zones; 134 return __add_pages(nid, zone, start_pfn, nr_pages);
|
/arch/parisc/mm/ |
H A D | init.c | 740 struct zone *zone; local 742 printk("Zone list for zone %d on node %d: ", j, i); 743 for_each_zone_zonelist(zone, z, zl, j) 744 printk("[%d/%s] ", zone_to_nid(zone), 745 zone->name); 901 the zone */
|
/arch/c6x/ |
H A D | Kconfig | 107 int "Maximum zone order" 111 blocks into "zones", where each zone is a power of two number of
|
/arch/x86/ |
H A D | Makefile | 63 KBUILD_CFLAGS += -mno-red-zone
|
/arch/ia64/include/asm/ |
H A D | pgtable.h | 565 extern void memmap_init (unsigned long size, int nid, unsigned long zone,
|
/arch/sh/kernel/cpu/sh5/ |
H A D | entry.S | 684 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
|