/mm/ |
H A D | kmemcheck.c | 7 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) argument 13 pages = 1 << order; 19 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); 38 void kmemcheck_free_shadow(struct page *page, int order) argument 47 pages = 1 << order; 56 __free_pages(shadow, order); 99 void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, argument 107 pages = 1 << order; 116 kmemcheck_alloc_shadow(page, order, gfpflags, -1);
|
H A D | mempool.c | 355 * of the order specified by pool_data. 359 int order = (int)(long)pool_data; local 360 return alloc_pages(gfp_mask, order); 366 int order = (int)(long)pool_data; local 367 __free_pages(element, order);
|
H A D | nobootmem.c | 87 int order = ilog2(BITS_PER_LONG); local 103 __free_pages_bootmem(pfn_to_page(i), order); local
|
H A D | oom_kill.c | 419 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, argument 423 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " 425 current->comm, gfp_mask, order, current->signal->oom_adj, 485 static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, argument 496 dump_header(p, gfp_mask, order, memcg, nodemask); 543 int order, const nodemask_t *nodemask) 557 dump_header(NULL, gfp_mask, order, NULL, nodemask); 701 * @order: amount of memory being requested as a power of 2 710 int order, nodemask_t *nodemask) 742 check_panic_on_oom(constraint, gfp_mask, order, mpol_mas 542 check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, int order, const nodemask_t *nodemask) argument 709 out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order, nodemask_t *nodemask) argument [all...] |
H A D | slob.c | 24 * into the free list in address order, so this is effectively an 30 * alloc_pages() directly, allocating compound pages so the page order 57 * in order to prevent random node placement. 243 static void *slob_new_pages(gfp_t gfp, int order, int node) argument 249 page = alloc_pages_exact_node(node, gfp, order); 252 page = alloc_pages(gfp, order); 260 static void slob_free_pages(void *b, int order) argument 263 current->reclaim_state->reclaimed_slab += 1 << order; 264 free_pages((unsigned long)b, order); 503 unsigned int order local [all...] |
H A D | bootmem.c | 76 * link bdata in order 198 int order = ilog2(BITS_PER_LONG); local 200 __free_pages_bootmem(pfn_to_page(start), order); local
|
H A D | compaction.c | 38 unsigned int order; /* order a direct compactor needs */ member in struct:compact_control 89 /* Found a free page, break it into order-0 pages */ 455 unsigned int order; local 466 * order == -1 is expected when compacting via 469 if (cc->order == -1) 474 watermark += (1 << cc->order); 476 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 480 for (order = cc->order; orde 500 compaction_suitable(struct zone *zone, int order) argument 609 compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, bool sync) argument 639 try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, bool sync) argument [all...] |
H A D | memory_hotplug.c | 682 int order; local 684 order = page_order(page); 685 if ((order < MAX_ORDER) && (order >= pageblock_order)) 686 return page + (1 << order);
|
H A D | vmstat.c | 547 unsigned int order; local 553 for (order = 0; order < MAX_ORDER; order++) { 557 blocks = zone->free_area[order].nr_free; 561 info->free_pages += blocks << order; 564 if (order >= suitable_order) 566 (order - suitable_order); 577 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) argument 579 unsigned long requested = 1UL << order; 598 fragmentation_index(struct zone *zone, unsigned int order) argument 799 int order; local 820 int order, mtype; local 845 int order; local 1229 unusable_free_index(unsigned int order, struct contig_page_info *info) argument 1250 unsigned int order; local 1310 unsigned int order; local [all...] |
H A D | mempolicy.c | 1048 * is in virtual address order. 1806 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, argument 1813 page = __alloc_pages(gfp, order, zl); 1829 * @order:Order of the GFP allocation. 1843 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, argument 1854 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); 1856 page = alloc_page_interleave(gfp, order, nid); 1865 struct page *page = __alloc_pages_nodemask(gfp, order, 1874 page = __alloc_pages_nodemask(gfp, order, zl, 1889 * @order 1899 alloc_pages_current(gfp_t gfp, unsigned order) argument [all...] |
H A D | nommu.c | 108 * region. This test is intentionally done in reverse order, 110 * PAGE_SIZE for 0-order pages. 713 /* sort by: start addr, end addr, VMA struct addr in that order 1130 int ret, order; local 1157 order = get_order(len); 1158 kdebug("alloc order %d for %lx", order, len); 1160 pages = alloc_pages(GFP_KERNEL, order); 1164 total = 1 << order; 1173 order [all...] |
H A D | vmalloc.c | 516 * All the lazy freeing logic is still retained, in order to 902 unsigned int order; local 907 order = get_order(size); 916 if (vb->free < 1UL << order) 920 VMAP_BBMAP_BITS, order); 933 vb->free -= 1UL << order; 965 unsigned int order; local 973 order = get_order(size); 986 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 988 vb->dirty += 1UL << order; 1574 const int order = 0; local [all...] |
H A D | hugetlb.c | 245 return 1UL << (hstate->order + PAGE_SHIFT); 501 VM_BUG_ON(h->order >= MAX_ORDER); 568 static void prep_compound_gigantic_page(struct page *page, unsigned long order) argument 571 int nr_pages = 1 << order; 575 set_compound_order(page, order); 602 if (h->order >= MAX_ORDER) 756 if (h->order >= MAX_ORDER) 947 if (h->order >= MAX_ORDER) 1095 static void prep_compound_huge_page(struct page *page, int order) argument 1097 if (unlikely(order > (MAX_ORDE 1802 hugetlb_add_hstate(unsigned order) argument [all...] |
H A D | vmscan.c | 58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages 61 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference 64 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of 65 * order-0 pages and then compact the zone 97 int order; member in struct:scan_control 101 * enough amount of memory. i.e, mode for high order allocation. 374 * reclaim/compaction.Depending on the order, we will either set the 375 * sync mode or just reclaim order-0 pages later. 387 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 389 else if (sc->order 1148 isolate_lru_pages(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, struct list_head *dst, unsigned long *nr_scanned, int order, isolate_mode_t mode, int active, int file) argument 2428 try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) argument 2596 sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, int classzone_idx) argument 2664 balance_pgdat(pg_data_t *pgdat, int order, int *classzone_idx) argument 2939 kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) argument 2998 unsigned long order, new_order; local 3090 wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) argument 3339 __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) argument 3424 zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) argument [all...] |
H A D | slab.c | 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 484 * Do not go above this order unless 0 objects fit into the slab or 1503 int order; local 1566 for (order = 0; order < MAX_ORDER; order++) { 1567 cache_estimate(order, cache_cache.buffer_size, 1573 cache_cache.gfporder = order; 1750 * requires __GFP_COMP to properly refcount higher order allocations 2077 * calculate_slab_order - calculate size (page order) o [all...] |
H A D | memcontrol.c | 770 * Check events in order. 3504 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, argument 3516 if (order > 0)
|
H A D | page_alloc.c | 114 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 152 static void __free_pages_ok(struct page *page, unsigned int order); 325 * Higher-order pages are called "compound pages". They are structured thusly: 335 * put_page() function. Its ->lru.prev holds the order of allocation. 336 * This usage means that zero-order pages may not be compound. 344 void prep_compound_page(struct page *page, unsigned long order) argument 347 int nr_pages = 1 << order; 350 set_compound_order(page, order); 361 static int destroy_compound_page(struct page *page, unsigned long order) argument 364 int nr_pages = 1 << order; 388 prep_zero_page(struct page *page, int order, gfp_t gfp_flags) argument 432 set_page_order(struct page *page, int order) argument 462 __find_buddy_index(unsigned long page_idx, unsigned int order) argument 480 page_is_buddy(struct page *page, struct page *buddy, int order) argument 525 __free_one_page(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument 679 free_one_page(struct zone *zone, struct page *page, int order, int migratetype) argument 691 free_pages_prepare(struct page *page, unsigned int order) argument 707 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); local 709 PAGE_SIZE << order); local 717 __free_pages_ok(struct page *page, unsigned int order) argument 729 free_one_page(page_zone(page), page, order, local 734 __free_pages_bootmem(struct page *page, unsigned int order) argument 818 prep_new_page(struct page *page, int order, gfp_t gfp_flags) argument 848 __rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) argument 895 unsigned long order; local 967 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) argument 1039 __rmqueue(struct zone *zone, unsigned int order, int migratetype) argument 1070 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, int cold) argument 1178 int order, t; local 1281 split_page(struct page *page, unsigned int order) argument 1294 split_page(virt_to_page(page[0].shadow), order); local 1313 unsigned int order; local 1352 buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, int order, gfp_t gfp_flags, int migratetype) argument 1453 should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 1502 should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 1513 __zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags, long free_pages) argument 1541 zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) argument 1548 zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) argument 1704 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, struct zonelist *zonelist, int high_zoneidx, int alloc_flags, struct zone *preferred_zone, int migratetype) argument 1844 warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) argument 1887 should_alloc_retry(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress, unsigned long pages_reclaimed) argument 1929 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, struct zone *preferred_zone, int migratetype) argument 1982 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, bool sync_migration, bool *deferred_compaction, unsigned long *did_some_progress) argument 2041 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, bool sync_migration, bool *deferred_compaction, unsigned long *did_some_progress) argument 2054 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, unsigned long *did_some_progress) argument 2111 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, struct zone *preferred_zone, int migratetype) argument 2131 wake_all_kswapd(unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, enum zone_type classzone_idx) argument 2185 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, struct zone *preferred_zone, int migratetype) argument 2377 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) argument 2430 __get_free_pages(gfp_t gfp_mask, unsigned int order) argument 2453 __free_pages(struct page *page, unsigned int order) argument 2465 free_pages(unsigned long addr, unsigned int order) argument 2469 __free_pages(virt_to_page((void *)addr), order); local 2475 make_alloc_exact(unsigned long addr, unsigned order, size_t size) argument 2481 split_page(virt_to_page((void *)addr), order); local 2505 unsigned int order = get_order(size); local 2527 unsigned order = get_order(size); local 2772 unsigned long nr[MAX_ORDER], flags, order, total = 0; local 3140 int order = current_zonelist_order; local 3627 int order, t; local 4215 set_pageblock_order(unsigned int order) argument 4235 pageblock_default_order(unsigned int order) argument 5519 int order, i; local 5563 int order; local [all...] |
H A D | slub.c | 36 * Lock order: 70 * Interrupts are disabled during allocation and deallocation in order to 155 * disabled when slub_debug=O is used and a cache's min order increases with 324 static inline int order_objects(int order, unsigned long size, int reserved) argument 326 return ((PAGE_SIZE << order) - reserved) / size; 329 static inline struct kmem_cache_order_objects oo_make(int order, argument 333 (order << OO_SHIFT) + order_objects(order, size, reserved) 955 * So in order to make the debug calls that expect irqs to be 1015 * May be called early in order t 1269 int order = oo_order(oo); local 1391 int order = compound_order(page); local 1438 int order = compound_order(page); local 2362 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) argument 2637 int order; local 2665 int order; local 2873 int order; local 3673 int order; local 4178 int order; local 4606 unsigned long order; local 4624 SLAB_ATTR(order); variable [all...] |