Searched refs:zone (Results 1 - 22 of 22) sorted by relevance

/mm/
H A Dmmzone.c30 struct zone *next_zone(struct zone *zone) argument
32 pg_data_t *pgdat = zone->zone_pgdat;
34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
35 zone++;
39 zone = pgdat->node_zones;
41 zone = NULL;
43 return zone;
55 /* Returns the next zone a
56 next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes, struct zone **zone) argument
78 memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) argument
[all...]
H A Dcompaction.c25 * at the end of a zone and migrate_pfn begins at the start. Movable pages
26 * are moved to the end of a zone during a compaction run and the run
44 struct zone *zone; member in struct:compact_control
61 /* Isolate free pages onto a private freelist. Must hold zone->lock */
62 static unsigned long isolate_freepages_block(struct zone *zone, argument
71 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
138 static void isolate_freepages(struct zone *zon argument
223 acct_isolated(struct zone *zone, struct compact_control *cc) argument
240 too_many_isolated(struct zone *zone) argument
265 isolate_migratepages(struct zone *zone, struct compact_control *cc) argument
454 compact_finished(struct zone *zone, struct compact_control *cc) argument
502 compaction_suitable(struct zone *zone, int order) argument
545 compact_zone(struct zone *zone, struct compact_control *cc) argument
611 compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, bool sync) argument
649 struct zone *zone; local
684 struct zone *zone; local
[all...]
H A Dvmstat.c77 * Manage combined zone based / global counters
86 int calculate_pressure_threshold(struct zone *zone) argument
99 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
110 int calculate_normal_threshold(struct zone *zone) argument
117 * of memory per zone. More memory means that we can defer updates for
145 mem = zone->present_pages >> (27 - PAGE_SHIFT);
158 * Refresh the thresholds for each zone
162 struct zone *zone; local
191 struct zone *zone; local
211 __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) argument
254 __inc_zone_state(struct zone *zone, enum zone_stat_item item) argument
276 __dec_zone_state(struct zone *zone, enum zone_stat_item item) argument
311 mod_state(struct zone *zone, enum zone_stat_item item, int delta, int overstep_mode) argument
349 mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) argument
356 inc_zone_state(struct zone *zone, enum zone_stat_item item) argument
376 mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) argument
387 inc_zone_state(struct zone *zone, enum zone_stat_item item) argument
399 struct zone *zone; local
439 struct zone *zone; local
543 fill_contig_page_info(struct zone *zone, unsigned int suitable_order, struct contig_page_info *info) argument
598 fragmentation_index(struct zone *zone, unsigned int order) argument
647 struct zone *zone; local
795 frag_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
816 pagetypeinfo_showfree_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
858 pagetypeinfo_showblockcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
966 zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
1246 unusable_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
1306 extfrag_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
[all...]
H A Dvmscan.c66 * order-0 pages and then compact the zone
163 static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, argument
167 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
169 return &zone->reclaim_stat;
172 static unsigned long zone_nr_lru_pages(struct zone *zone, argument
176 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
178 return zone_page_state(zone, NR_LRU_BASE + lru);
599 * Put unevictable pages directly on zone'
720 shrink_page_list(struct list_head *page_list, struct zone *zone, struct scan_control *sc) argument
1234 struct zone *zone = page_zone(page); local
1253 too_many_isolated(struct zone *zone, int file, struct scan_control *sc) argument
1279 putback_lru_pages(struct zone *zone, struct scan_control *sc, unsigned long nr_anon, unsigned long nr_file, struct list_head *page_list) argument
1325 update_isolated_counts(struct zone *zone, struct scan_control *sc, unsigned long *nr_anon, unsigned long *nr_file, struct list_head *isolated_list) argument
1402 shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority, int file) argument
1500 move_active_pages_to_lru(struct zone *zone, struct list_head *list, enum lru_list lru) argument
1533 shrink_active_list(unsigned long nr_pages, struct zone *zone, struct scan_control *sc, int priority, int file) argument
1627 inactive_anon_is_low_global(struct zone *zone) argument
1648 inactive_anon_is_low(struct zone *zone, struct scan_control *sc) argument
1666 inactive_anon_is_low(struct zone *zone, struct scan_control *sc) argument
1673 inactive_file_is_low_global(struct zone *zone) argument
1698 inactive_file_is_low(struct zone *zone, struct scan_control *sc) argument
1709 inactive_list_is_low(struct zone *zone, struct scan_control *sc, int file) argument
1718 shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority) argument
1740 get_scan_count(struct zone *zone, struct scan_control *sc, unsigned long *nr, int priority) argument
1875 should_continue_reclaim(struct zone *zone, unsigned long nr_reclaimed, unsigned long nr_scanned, struct scan_control *sc) argument
1934 shrink_zone(int priority, struct zone *zone, struct scan_control *sc) argument
2008 struct zone *zone; local
2044 zone_reclaimable(struct zone *zone) argument
2054 struct zone *zone; local
2093 struct zone *zone; local
2210 mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, unsigned int swappiness, struct zone *zone, unsigned long *nr_scanned) argument
2337 struct zone *zone = pgdat->node_zones + i; local
2442 struct zone *zone = pgdat->node_zones + i; local
2468 struct zone *zone = pgdat->node_zones + i; local
2483 struct zone *zone = pgdat->node_zones + i; local
2635 struct zone *zone = pgdat->node_zones + i; local
2806 wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) argument
2850 zone_reclaimable_pages(struct zone *zone) argument
3011 zone_unmapped_file_pages(struct zone *zone) argument
3026 zone_pagecache_reclaimable(struct zone *zone) argument
3056 __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) argument
3142 zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) argument
3230 check_move_unevictable_page(struct page *page, struct zone *zone) argument
3268 struct zone *zone; local
3322 scan_zone_unevictable_pages(struct zone *zone) argument
3366 struct zone *zone; local
3410 struct zone *zone; local
[all...]
H A Dswap.c52 struct zone *zone = page_zone(page); local
54 spin_lock_irqsave(&zone->lru_lock, flags);
57 del_page_from_lru(zone, page);
58 spin_unlock_irqrestore(&zone->lru_lock, flags);
210 struct zone *zone = NULL; local
215 struct zone *pagezone = page_zone(page);
217 if (pagezone != zone) {
218 if (zone)
235 struct zone *zone = page_zone(page); local
278 update_page_reclaim_stat(struct zone *zone, struct page *page, int file, int rotated) argument
300 struct zone *zone = page_zone(page); local
346 struct zone *zone = page_zone(page); local
416 struct zone *zone = page_zone(page); local
450 struct zone *zone = page_zone(page); local
589 struct zone *zone = NULL; local
659 lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) argument
698 struct zone *zone = page_zone(page); local
[all...]
H A Dpage_alloc.c228 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
255 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) argument
262 seq = zone_span_seqbegin(zone);
263 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
265 else if (pfn < zone->zone_start_pfn)
267 } while (zone_span_seqretry(zone, seq));
272 static int page_is_consistent(struct zone *zone, struc argument
284 bad_range(struct zone *zone, struct page *page) argument
294 bad_range(struct zone *zone, struct page *page) argument
508 __free_one_page(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument
608 free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp) argument
654 free_one_page(struct zone *zone, struct page *page, int order, int migratetype) argument
752 expand(struct zone *zone, struct page *page, int low, int high, struct free_area *area, int migratetype) argument
815 __rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) argument
857 move_freepages(struct zone *zone, struct page *start_page, struct page *end_page, int migratetype) argument
900 move_freepages_block(struct zone *zone, struct page *page, int migratetype) argument
934 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) argument
1006 __rmqueue(struct zone *zone, unsigned int order, int migratetype) argument
1037 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, int cold) argument
1079 drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) argument
1105 struct zone *zone; local
1141 mark_free_pages(struct zone *zone) argument
1181 struct zone *zone = page_zone(page); local
1269 struct zone *zone; local
1306 buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, int order, gfp_t gfp_flags, int migratetype) argument
1683 struct zone *zone; local
2053 struct zone *zone; local
2479 struct zone *zone; local
2513 show_node(struct zone *zone) argument
2581 struct zone *zone; local
2717 zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) argument
2731 struct zone *zone; local
3137 struct zone *zone; local
3142 &zone); local
3407 setup_zone_migrate_reserve(struct zone *zone) argument
3481 memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) argument
3539 zone_init_free_lists(struct zone *zone) argument
3553 zone_batchsize(struct zone *zone) argument
3635 setup_zone_pageset(struct zone *zone) argument
3659 struct zone *zone; local
3666 zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) argument
3710 struct zone *zone = data; local
3729 zone_pcp_update(struct zone *zone) argument
3734 zone_pcp_init(struct zone *zone) argument
3749 init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size, enum memmap_context context) argument
4256 setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) argument
4266 setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) argument
4331 struct zone *zone = pgdat->node_zones + j; local
4851 struct zone *zone = &pgdat->node_zones[zone_type]; local
5050 struct zone *zone = pgdat->node_zones + i; local
5083 struct zone *zone = pgdat->node_zones + j; local
5120 struct zone *zone; local
5192 calculate_zone_inactive_ratio(struct zone *zone) argument
5208 struct zone *zone; local
5275 struct zone *zone; local
5291 struct zone *zone; local
5331 struct zone *zone; local
5456 get_pageblock_bitmap(struct zone *zone, unsigned long pfn) argument
5466 pfn_to_bitidx(struct zone *zone, unsigned long pfn) argument
5487 struct zone *zone; local
5515 struct zone *zone; local
5541 __count_immobile_pages(struct zone *zone, struct page *page, int count) argument
5590 struct zone *zone = page_zone(page); local
5607 struct zone *zone; local
5663 struct zone *zone; local
5683 struct zone *zone; local
5725 struct zone *zone = page_zone(page); local
[all...]
H A Dmemory_hotplug.c155 struct zone *zone; local
163 zone = &pgdat->node_zones[0];
164 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
165 if (zone->wait_table) {
166 nr_pages = zone->wait_table_hash_nr_entries
169 page = virt_to_page(zone->wait_table);
186 static void grow_zone_span(struct zone *zone, unsigne argument
216 __add_zone(struct zone *zone, unsigned long phys_start_pfn) argument
243 __add_section(int nid, struct zone *zone, unsigned long phys_start_pfn) argument
266 __remove_section(struct zone *zone, struct mem_section *ms) argument
275 __remove_section(struct zone *zone, struct mem_section *ms) argument
301 __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) argument
339 __remove_pages(struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) argument
402 struct zone *zone; local
653 struct zone *zone = NULL; local
811 struct zone *zone; local
[all...]
H A Dmm_init.c31 struct zone *zone; local
39 /* Identify the zone and nodelist */
43 zone = &pgdat->node_zones[zoneid];
44 if (!populated_zone(zone))
50 zone->name);
53 for_each_zone_zonelist(zone, z, zonelist, zoneid) {
56 zone->node, zone->name);
58 printk(KERN_CONT "0:%s ", zone
124 mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn) argument
[all...]
H A Doom_kill.c234 struct zone *zone; local
266 for_each_zone_zonelist_nodemask(zone, z, zonelist,
268 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
591 * if a parallel OOM killing is already taking place that includes a zone in
597 struct zone *zone; local
601 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
602 if (zone_is_oom_locked(zone)) {
608 for_each_zone_zonelist(zone,
630 struct zone *zone; local
646 struct zone *zone; local
668 struct zone *zone; local
[all...]
H A Dpage_isolation.c86 * all pages in [start_pfn...end_pfn) must be in the same zone.
87 * zone->lock must be held before call this.
119 struct zone *zone; local
136 zone = page_zone(page);
137 spin_lock_irqsave(&zone->lock, flags);
139 spin_unlock_irqrestore(&zone->lock, flags);
H A Dmemcontrol.c125 * per-zone information in memory controller.
231 * per zone LRU lists.
496 int node, zone; local
501 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
502 mz = mem_cgroup_zoneinfo(mem, node, zone);
503 mctz = soft_limit_tree_node_zone(node, zone);
991 * It's done under lock_page and expected that zone->lru_lock isnever held.
996 struct zone *zon local
1023 struct zone *zone = page_zone(page); local
1126 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone, enum lru_list lru) argument
1223 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) argument
1715 mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, struct zone *zone, gfp_t gfp_mask, unsigned long reclaim_options, unsigned long *total_scanned) argument
3435 struct zone *zone; local
3647 mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument
3748 struct zone *zone; local
4838 int zone, tmp = node; local
4972 int tmp, node, zone; local
[all...]
H A Dinternal.h106 * zone->lock is already acquired when we use these.
263 enum zone_type zone, unsigned long nid, unsigned long pfn);
278 enum zone_type zone, unsigned long nid, unsigned long pfn)
277 mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn) argument
H A Dquicklist.c31 struct zone *zones = NODE_DATA(node)->node_zones;
H A Dsparse.c730 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, argument
734 struct pglist_data *pgdat = zone->zone_pgdat;
778 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) argument
H A Dhuge_memory.c99 struct zone *zone; local
110 for_each_populated_zone(zone)
1159 struct zone *zone = page_zone(page); local
1164 spin_lock_irq(&zone->lru_lock);
1239 lru_add_page_tail(zone, page, page_tail);
1245 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1253 __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1258 spin_unlock_irq(&zone
[all...]
H A Dbacking-dev.c789 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
790 * @zone: A zone to check if it is heavily congested
795 * @zone has experienced recent congestion, this waits for up to @timeout
799 * In the absence of zone congestion, cond_resched() is called to yield
806 long wait_iff_congested(struct zone *zone, int sync, long timeout) argument
815 * encountered in the current zone, yield if necessary instead
819 !zone_is_reclaim_congested(zone)) {
H A Dmempolicy.c47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
106 /* Highest zone. An specific allocation for a zone below that is not
139 /* Check that the nodemask contains at least one populated zone */
145 struct zone *z;
1592 struct zone *zone; local
1597 &zone);
[all...]
H A Dfilemap.c98 * ->zone.lru_lock (follow_page->mark_page_accessed)
99 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
542 const struct zone *zone = page_zone(page); local
544 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
H A Dhugetlb.c461 struct zone *zone; local
480 for_each_zone_zonelist_nodemask(zone, z, zonelist,
482 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
483 page = dequeue_huge_page_node(h, zone_to_nid(zone));
H A Dslab.c2319 /* add space for red zone words */
2325 * the real object. But if the second red zone needs to be
3245 struct zone *zone; local
3262 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3263 nid = zone_to_nid(zone);
3265 if (cpuset_zone_allowed_hardwall(zone, flags) &&
H A Dslub.c1457 struct zone *zone; local
1485 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1488 n = get_node(s, zone_to_nid(zone));
1490 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
H A Dpage-writeback.c364 struct zone *z =

Completed in 652 milliseconds