Searched defs:gfp_mask (Results 1 - 14 of 14) sorted by relevance

/include/linux/
H A Dcompaction.h24 int order, gfp_t gfp_mask, nodemask_t *mask,
28 gfp_t gfp_mask, bool sync);
61 int order, gfp_t gfp_mask, nodemask_t *nodemask,
73 gfp_t gfp_mask, bool sync)
60 try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, bool sync) argument
72 compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, bool sync) argument
H A Dkmod.h72 char **envp, gfp_t gfp_mask);
94 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; local
96 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
H A Dradix-tree.h56 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
59 gfp_t gfp_mask; member in struct:radix_tree_root
65 .gfp_mask = (mask), \
75 (root)->gfp_mask = (mask); \
204 int radix_tree_preload(gfp_t gfp_mask);
H A Dcpuset.h31 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
34 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) argument
37 __cpuset_node_allowed_softwall(node, gfp_mask);
40 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) argument
43 __cpuset_node_allowed_hardwall(node, gfp_mask);
46 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) argument
48 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
51 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) argument
53 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
166 cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) argument
171 cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) argument
176 cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) argument
181 cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) argument
[all...]
H A Dtextsearch.h161 gfp_t gfp_mask)
165 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
160 alloc_ts_config(size_t payload, gfp_t gfp_mask) argument
H A Dmemcontrol.h44 * All "charge" functions with gfp_mask should use GFP_KERNEL or
45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
48 * available but adding a rule is better. charge functions' gfp_mask should
49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
55 gfp_t gfp_mask);
64 gfp_t gfp_mask);
80 struct mm_struct *mm, gfp_t gfp_mask);
82 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
103 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
169 mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
175 mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
181 mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) argument
212 mem_cgroup_shmem_charge_fallback(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
274 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask) argument
353 mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument
[all...]
H A Dpagemap.h235 pgoff_t index, gfp_t gfp_mask);
264 pgoff_t index, gfp_t gfp_mask);
458 pgoff_t index, gfp_t gfp_mask);
460 pgoff_t index, gfp_t gfp_mask);
463 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
470 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
475 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
469 add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument
H A Dgfp.h272 * We get the zone list from the current node and the gfp_mask.
293 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
297 __alloc_pages(gfp_t gfp_mask, unsigned int order, argument
300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
303 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, argument
310 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
313 static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, argument
318 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
325 alloc_pages(gfp_t gfp_mask, unsigned int order) argument
[all...]
H A Dswap.h253 gfp_t gfp_mask, nodemask_t *mask);
255 gfp_t gfp_mask, bool noswap,
258 gfp_t gfp_mask, bool noswap,
404 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) argument
426 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, argument
448 gfp_t gfp_mask)
447 add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) argument
H A DmISDNif.h525 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) argument
529 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
536 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) argument
538 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask);
553 u_int id, u_int len, void *dp, gfp_t gfp_mask)
559 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
552 _queue_data(struct mISDNchannel *ch, u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) argument
H A Dmm.h999 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1139 gfp_t gfp_mask; member in struct:shrink_control
1371 extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
H A Dskbuff.h515 gfp_t gfp_mask);
518 gfp_t gfp_mask);
1533 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1543 gfp_t gfp_mask)
1545 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1554 unsigned int length, gfp_t gfp_mask);
1588 * @gfp_mask: alloc_pages_node mask
1594 static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) argument
1596 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1542 __dev_alloc_skb(unsigned int length, gfp_t gfp_mask) argument
H A Dblkdev.h665 struct bio_set *bs, gfp_t gfp_mask,
924 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
926 sector_t nr_sects, gfp_t gfp_mask);
928 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
932 gfp_mask, flags);
935 sector_t nr_blocks, gfp_t gfp_mask)
940 gfp_mask);
927 sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) argument
934 sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) argument
/include/net/
H A Dsch_generic.h654 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, argument
659 n = skb_clone(skb, gfp_mask);

Completed in 263 milliseconds