Searched defs:flags (Results 1 - 25 of 44) sorted by relevance

12

/mm/
H A Dballoon_compaction.c25 unsigned long flags; local
37 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
40 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
60 unsigned long flags; local
78 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
81 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
96 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
100 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
112 unsigned long flags; local
114 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
124 unsigned long flags; local
[all...]
H A Dkmemcheck.c8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) argument
20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
71 if (s->flags & SLAB_NOTRACK)
96 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
H A Ddebug.c53 static void dump_flags(unsigned long flags, argument
60 pr_emerg("flags: %#lx(", flags);
63 flags &= (1UL << NR_PAGEFLAGS) - 1;
65 for (i = 0; i < count && flags; i++) {
68 if ((flags & mask) != mask)
71 flags &= ~mask;
76 /* check for left over flags */
77 if (flags)
78 pr_cont("%s%#lx", delim, flags);
[all...]
H A Dmmzone.c4 * management codes for pgdats, zones and page flags
103 unsigned long old_flags, flags; local
107 old_flags = flags = page->flags;
110 flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
111 flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
112 } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
H A Dmempool.c130 unsigned long flags; local
134 spin_lock_irqsave(&pool->lock, flags);
138 spin_unlock_irqrestore(&pool->lock, flags);
140 spin_lock_irqsave(&pool->lock, flags);
145 spin_unlock_irqrestore(&pool->lock, flags);
152 spin_lock_irqsave(&pool->lock, flags);
155 spin_unlock_irqrestore(&pool->lock, flags);
166 spin_unlock_irqrestore(&pool->lock, flags);
170 spin_lock_irqsave(&pool->lock, flags);
174 spin_unlock_irqrestore(&pool->lock, flags);
201 unsigned long flags; local
277 unsigned long flags; local
[all...]
H A Dpage_isolation.c7 #include <linux/pageblock-flags.h>
15 unsigned long flags, pfn; local
22 spin_lock_irqsave(&zone->lock, flags);
69 spin_unlock_irqrestore(&zone->lock, flags);
78 unsigned long flags, nr_pages; local
85 spin_lock_irqsave(&zone->lock, flags);
124 spin_unlock_irqrestore(&zone->lock, flags);
263 unsigned long pfn, flags; local
283 spin_lock_irqsave(&zone->lock, flags);
286 spin_unlock_irqrestore(&zone->lock, flags);
[all...]
H A Ddmapool.c321 unsigned long flags; local
328 spin_lock_irqsave(&pool->lock, flags);
335 spin_unlock_irqrestore(&pool->lock, flags);
341 spin_lock_irqsave(&pool->lock, flags);
377 spin_unlock_irqrestore(&pool->lock, flags);
407 unsigned long flags; local
410 spin_lock_irqsave(&pool->lock, flags);
413 spin_unlock_irqrestore(&pool->lock, flags);
427 spin_unlock_irqrestore(&pool->lock, flags);
445 spin_unlock_irqrestore(&pool->lock, flags);
[all...]
H A Dgup.c20 unsigned int flags)
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
36 unsigned long address, pmd_t *pmd, unsigned int flags)
45 return no_page_table(vma, flags);
56 if (likely(!(flags & FOLL_MIGRATION)))
67 if ((flags & FOLL_NUMA) && pte_numa(pte))
69 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
76 if ((flags & FOLL_DUMP) ||
82 if (flags & FOLL_GET)
84 if (flags
19 no_page_table(struct vm_area_struct *vma, unsigned int flags) argument
35 follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) argument
143 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument
270 faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) argument
641 int flags = FOLL_TOUCH; local
945 unsigned long next, flags; local
[all...]
H A Dhighmem.c143 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
144 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
148 #define lock_kmap_any(flags) \
149 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
150 #define unlock_kmap_any(flags) \
151 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
312 unsigned long vaddr, flags; local
314 lock_kmap_any(flags);
336 unsigned long flags; local
414 unsigned long flags; local
448 unsigned long flags; local
[all...]
H A Dprocess_vm_access.c138 * @flags: currently unused
147 unsigned long flags, int vm_write)
244 * @flags: currently unused
255 unsigned long flags, int vm_write)
264 if (flags != 0)
284 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
297 unsigned long, riovcnt, unsigned long, flags)
299 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
305 unsigned long, riovcnt, unsigned long, flags)
307 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags,
144 process_vm_rw_core(pid_t pid, struct iov_iter *iter, const struct iovec *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) argument
250 process_vm_rw(pid_t pid, const struct iovec __user *lvec, unsigned long liovcnt, const struct iovec __user *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) argument
313 compat_process_vm_rw(compat_pid_t pid, const struct compat_iovec __user *lvec, unsigned long liovcnt, const struct compat_iovec __user *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) argument
[all...]
H A Dinternal.h214 static inline bool is_cow_mapping(vm_flags_t flags) argument
216 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
257 unsigned long flags; local
260 local_irq_save(flags);
264 local_irq_restore(flags);
H A Dmlock.c668 * flags. VMAs must be already marked with the desired vm_flags, and
707 * double checks the vma flags, so that it won't mlock pages
773 static int do_mlockall(int flags) argument
777 if (flags & MCL_FUTURE)
781 if (flags == MCL_FUTURE)
788 if (flags & MCL_CURRENT)
799 SYSCALL_DEFINE1(mlockall, int, flags)
804 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
811 if (flags
[all...]
H A Dpage_cgroup.c111 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; local
114 addr = alloc_pages_exact_nid(nid, size, flags);
116 kmemleak_alloc(addr, size, 1, flags);
286 * page->flags of out of node pages are not initialized. So we
412 unsigned long flags; local
417 spin_lock_irqsave(&ctrl->lock, flags);
423 spin_unlock_irqrestore(&ctrl->lock, flags);
440 unsigned long flags; local
444 spin_lock_irqsave(&ctrl->lock, flags);
447 spin_unlock_irqrestore(&ctrl->lock, flags);
[all...]
H A Dslab.h23 unsigned long flags; /* Active flags on the slab */ member in struct:kmem_cache
69 unsigned long calculate_alignment(unsigned long flags,
82 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
85 unsigned long flags);
87 size_t size, unsigned long flags);
93 unsigned long flags, const char *name, void (*ctor)(void *));
97 unsigned long flags, void (*ctor)(void *));
100 unsigned long flags, const char *name,
105 unsigned long flags, voi
104 __kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument
108 kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) argument
[all...]
H A Dslob.c54 * page flags. As a result, block allocations that can be satisfied from
274 unsigned long flags; local
283 spin_lock_irqsave(&slob_lock, flags);
312 spin_unlock_irqrestore(&slob_lock, flags);
322 spin_lock_irqsave(&slob_lock, flags);
330 spin_unlock_irqrestore(&slob_lock, flags);
345 unsigned long flags; local
355 spin_lock_irqsave(&slob_lock, flags);
361 spin_unlock_irqrestore(&slob_lock, flags);
419 spin_unlock_irqrestore(&slob_lock, flags);
525 __kmem_cache_create(struct kmem_cache *c, unsigned long flags) argument
535 slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) argument
563 kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) argument
[all...]
H A Dbootmem.c305 unsigned long eidx, int flags)
308 int exclusive = flags & BOOTMEM_EXCLUSIVE;
310 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
314 flags);
330 int reserve, int flags)
334 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
335 bdata - bootmem_node_data, start, end, reserve, flags);
344 return __reserve(bdata, sidx, eidx, flags);
351 int reserve, int flags)
369 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
304 __reserve(bootmem_data_t *bdata, unsigned long sidx, unsigned long eidx, int flags) argument
328 mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) argument
350 mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) argument
437 reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size, int flags) argument
458 reserve_bootmem(unsigned long addr, unsigned long size, int flags) argument
[all...]
H A Dslab_common.c34 * Set of flags that will prevent slab merging
209 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
228 unsigned long flags, const char *name, void (*ctor)(void *))
232 if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
239 align = calculate_alignment(flags, align, size);
241 flags = kmem_cache_flags(size, flags, name, NULL);
250 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
273 * flags,
227 find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)) argument
275 calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) argument
299 do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align, unsigned long flags, void (*ctor)(void *), struct mem_cgroup *memcg, struct kmem_cache *root_cache) argument
363 kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) argument
571 create_boot_cache(struct kmem_cache *s, const char *name, size_t size, unsigned long flags) argument
588 create_kmalloc_cache(const char *name, size_t size, unsigned long flags) argument
652 kmalloc_slab(size_t size, gfp_t flags) argument
682 create_kmalloc_caches(unsigned long flags) argument
783 kmalloc_order(size_t size, gfp_t flags, unsigned int order) argument
797 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) argument
957 __do_krealloc(const void *p, size_t new_size, gfp_t flags) argument
986 __krealloc(const void *p, size_t new_size, gfp_t flags) argument
1007 krealloc(const void *p, size_t new_size, gfp_t flags) argument
[all...]
H A Dsparse.c471 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
472 * @map: usemap_map for pageblock flags or mmap_map for vmemmap
700 unsigned long flags; local
719 pgdat_resize_lock(pgdat, &flags);
734 pgdat_resize_unlock(pgdat, &flags);
794 unsigned long *usemap = NULL, flags; local
797 pgdat_resize_lock(pgdat, &flags);
805 pgdat_resize_unlock(pgdat, &flags);
H A Dswap.c56 unsigned long flags; local
58 spin_lock_irqsave(&zone->lru_lock, flags);
63 spin_unlock_irqrestore(&zone->lru_lock, flags);
114 * flags after the head page flags. The
161 unsigned long flags; local
169 flags = compound_lock_irqsave(page_head);
172 compound_unlock_irqrestore(page_head, flags);
210 compound_unlock_irqrestore(page_head, flags);
287 unsigned long flags; local
417 unsigned long flags = 0; local
473 unsigned long flags; local
806 unsigned long flags; local
[all...]
H A Dcompaction.c234 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, argument
238 if (!spin_trylock_irqsave(lock, *flags)) {
243 spin_lock_irqsave(lock, *flags);
265 unsigned long flags, bool *locked, struct compact_control *cc)
268 spin_unlock_irqrestore(lock, flags);
347 unsigned long flags = 0; local
364 && compact_unlock_should_abort(&cc->zone->lock, flags,
394 &flags, cc);
440 spin_unlock_irqrestore(&cc->zone->lock, flags);
583 unsigned long flags local
264 compact_unlock_should_abort(spinlock_t *lock, unsigned long flags, bool *locked, struct compact_control *cc) argument
[all...]
H A Dfilemap.c248 if (test_bit(AS_ENOSPC, &mapping->flags) &&
249 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
251 if (test_bit(AS_EIO, &mapping->flags) &&
252 test_and_clear_bit(AS_EIO, &mapping->flags))
683 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
685 if (test_bit(bit_nr, &page->flags))
693 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
695 if (!test_bit(bit_nr, &page->flags))
705 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
708 if (!test_bit(bit_nr, &page->flags))
725 unsigned long flags; local
838 __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) argument
2343 pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) argument
2436 grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags) argument
2462 unsigned int flags = 0; local
[all...]
H A Dmemblock.c272 type->regions[0].flags = 0;
440 this->flags != next->flags) {
460 * @flags: flags of the new region
468 int nid, unsigned long flags)
476 rgn->flags = flags;
488 * @flags: flags o
465 memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
498 memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
697 memblock_reserve_region(phys_addr_t base, phys_addr_t size, int nid, unsigned long flags) argument
1497 unsigned long flags; local
[all...]
H A Dmemory-failure.c40 #include <linux/page-flags.h>
41 #include <linux/kernel-page-flags.h>
191 unsigned long pfn, struct page *page, int flags)
207 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
348 int flags)
373 pfn, page, flags) < 0)
396 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
546 * Clear sensible page flags, so that the buddy system won't
766 * A page state is defined by its current page->flags bit
190 kill_proc(struct task_struct *t, unsigned long addr, int trapno, unsigned long pfn, struct page *page, int flags) argument
346 kill_procs(struct list_head *to_kill, int forcekill, int trapno, int fail, struct page *page, unsigned long pfn, int flags) argument
887 hwpoison_user_mappings(struct page *p, unsigned long pfn, int trapno, int flags, struct page **hpagep) argument
1065 memory_failure(unsigned long pfn, int trapno, int flags) argument
1294 int flags; member in struct:memory_failure_entry
1323 memory_failure_queue(unsigned long pfn, int trapno, int flags) argument
1481 __get_any_page(struct page *p, unsigned long pfn, int flags) argument
1511 get_any_page(struct page *page, unsigned long pfn, int flags) argument
1535 soft_offline_huge_page(struct page *page, int flags) argument
1585 __soft_offline_page(struct page *page, int flags) argument
1700 soft_offline_page(struct page *page, int flags) argument
[all...]
H A Dmemory_hotplug.c139 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
351 unsigned long flags; local
358 pgdat_resize_lock(z1->zone_pgdat, &flags);
379 pgdat_resize_unlock(z1->zone_pgdat, &flags);
385 pgdat_resize_unlock(z1->zone_pgdat, &flags);
393 unsigned long flags; local
400 pgdat_resize_lock(z1->zone_pgdat, &flags);
421 pgdat_resize_unlock(z1->zone_pgdat, &flags);
427 pgdat_resize_unlock(z1->zone_pgdat, &flags);
449 unsigned long flags; local
714 unsigned long flags; local
964 unsigned long flags; local
1673 unsigned long flags; local
[all...]
H A Dmigrate.c821 if (current->flags & PF_MEMALLOC)
1119 int swapwrite = current->flags & PF_SWAPWRITE;
1123 current->flags |= PF_SWAPWRITE;
1169 current->flags &= ~PF_SWAPWRITE;
1304 int __user *status, int flags)
1364 flags & MPOL_MF_MOVE_ALL);
1466 int __user *, status, int, flags)
1474 /* Check flags */
1475 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1478 if ((flags
1300 do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) argument
1536 migrate_vmas(struct mm_struct *mm, const nodemask_t *to, const nodemask_t *from, unsigned long flags) argument
[all...]

Completed in 1647 milliseconds

12