Searched defs:gfp (Results 1 - 9 of 9) sorted by relevance

/mm/
H A Dpercpu-vm.c111 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; local
119 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
H A Dutil.c17 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
19 char *kstrdup(const char *s, gfp_t gfp) argument
28 buf = kmalloc_track_caller(len, gfp);
39 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
41 char *kstrndup(const char *s, size_t max, gfp_t gfp) argument
50 buf = kmalloc_track_caller(len+1, gfp);
64 * @gfp: GFP mask to use
66 void *kmemdup(const void *src, size_t len, gfp_t gfp) argument
70 p = kmalloc_track_caller(len, gfp);
H A Dslob.c243 static void *slob_new_pages(gfp_t gfp, int order, int node) argument
249 page = alloc_pages_exact_node(node, gfp, order);
252 page = alloc_pages(gfp, order);
321 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) argument
369 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
385 if (unlikely((gfp & __GFP_ZERO) && b))
479 void *__kmalloc_node(size_t size, gfp_t gfp, int node) argument
485 lockdep_trace_alloc(gfp);
491 m = slob_alloc(size + align, gfp, align, node);
499 size, size + align, gfp, nod
[all...]
H A Drmap.c114 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) argument
116 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
H A Dfilemap.c19 #include <linux/gfp.h>
513 struct page *__page_cache_alloc(gfp_t gfp) argument
521 page = alloc_pages_exact_node(n, gfp, 0);
525 return alloc_pages(gfp, 0);
1786 gfp_t gfp)
1793 page = __page_cache_alloc(gfp | __GFP_COLD);
1796 err = add_to_page_cache_lru(page, mapping, index, gfp);
1817 gfp_t gfp)
1824 page = __read_cache_page(mapping, index, filler, data, gfp);
1890 * @gfp
1782 __read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data, gfp_t gfp) argument
1813 do_read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data, gfp_t gfp) argument
1897 read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument
[all...]
H A Dkmemleak.c116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
510 int min_count, gfp_t gfp)
516 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
727 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) argument
740 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
858 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
864 gfp_t gfp)
869 create_object((unsigned long)ptr, size, min_count, gfp);
956 * @gfp
509 create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp) argument
863 kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) argument
962 kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) argument
[all...]
H A Dmempolicy.c1511 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) argument
1515 gfp_zone(gfp) >= policy_zone &&
1522 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1523 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, argument
1538 if (unlikely(gfp & __GFP_THISNODE) &&
1545 return node_zonelist(nd, gfp);
1780 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, argument
1786 zl = node_zonelist(nid, gfp);
1787 page = __alloc_pages(gfp, order, zl);
1796 * @gfp
1817 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) argument
1873 alloc_pages_current(gfp_t gfp, unsigned order) argument
[all...]
H A Dshmem.c1168 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, argument
1183 page = swapin_readahead(entry, gfp, &pvma, 0);
1187 static struct page *shmem_alloc_page(gfp_t gfp, argument
1201 return alloc_page_vma(gfp, &pvma, 0);
1210 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, argument
1213 return swapin_readahead(entry, gfp, NULL, 0);
1216 static inline struct page *shmem_alloc_page(gfp_t gfp, argument
1219 return alloc_page(gfp);
1248 gfp_t gfp; local
1270 gfp
3060 shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument
[all...]
H A Dslab.c300 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
874 int batchcount, gfp_t gfp)
879 nc = kmalloc_node(memsize, gfp, node);
926 static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) argument
957 static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) argument
965 ac_ptr = kzalloc_node(memsize, gfp, node);
970 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
2026 * high order pages for slabs. When the gfp() functions are more friendly
2072 * currently bad for the gfp()s.
2086 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) argument
873 alloc_arraycache(int node, int entries, int batchcount, gfp_t gfp) argument
2173 gfp_t gfp; local
3849 alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) argument
3952 do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) argument
3995 enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) argument
[all...]

Completed in 1580 milliseconds