Searched defs:batch (Results 1 - 3 of 3) sorted by relevance
/mm/ |
H A D | memory.c | 200 struct mmu_gather_batch *batch; local 202 batch = tlb->active; 203 if (batch->next) { 204 tlb->active = batch->next; 208 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 209 if (!batch) 212 batch->next = NULL; 213 batch->nr = 0; 214 batch->max = MAX_GATHER_BATCH; 216 tlb->active->next = batch; 246 struct mmu_gather_batch *batch; local 272 struct mmu_gather_batch *batch, *next; local 294 struct mmu_gather_batch *batch; local 343 struct mmu_table_batch *batch; local 356 struct mmu_table_batch **batch = &tlb->batch; local 366 struct mmu_table_batch **batch = &tlb->batch; local [all...] |
H A D | memcontrol.c | 2144 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch 2215 unsigned int batch = max(CHARGE_BATCH, nr_pages); local 2303 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check); 2308 batch = nr_pages; 2329 if (batch > nr_pages) 2330 refill_stock(memcg, batch - nr_pages); 2835 struct memcg_batch_info *batch = NULL; local 2842 batch = ¤t->memcg_batch; 2848 if (!batch->memcg) 2849 batch 3004 struct memcg_batch_info *batch = ¤t->memcg_batch; local [all...] |
H A D | page_alloc.c | 1118 if (pcp->count >= pcp->batch) 1119 to_drain = pcp->batch; 1252 free_pcppages_bulk(zone, pcp->batch, pcp); 1253 pcp->count -= pcp->batch; 1370 pcp->batch, list, 2674 pageset->pcp.batch, pageset->pcp.count); 3291 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 3642 int batch; local 3650 batch = zone->present_pages / 1024; 3651 if (batch * PAGE_SIZ 3689 setup_pageset(struct per_cpu_pageset *p, unsigned long batch) argument 3798 unsigned long batch = zone_batchsize(zone), flags; local [all...] |
Completed in 438 milliseconds