Searched refs:gfp_mask (Results 1 - 25 of 35) sorted by relevance

12

/include/linux/
H A Dcpuset.h31 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
34 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) argument
37 __cpuset_node_allowed_softwall(node, gfp_mask);
40 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) argument
43 __cpuset_node_allowed_hardwall(node, gfp_mask);
46 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) argument
48 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
51 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) argument
53 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
155 cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) argument
160 cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) argument
165 cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) argument
170 cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) argument
[all...]
H A Dmempool.h11 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
31 extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
33 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
40 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
53 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
65 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
H A Dgfp.h274 * We get the zone list from the current node and the gfp_mask.
295 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
299 __alloc_pages(gfp_t gfp_mask, unsigned int order, argument
302 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
305 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, argument
312 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
315 static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, argument
320 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
327 alloc_pages(gfp_t gfp_mask, unsigned int order) argument
[all...]
H A Dprio_heap.h29 * @gfp_mask: mask to pass to kmalloc()
32 extern int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask,
H A Dshrinker.h9 gfp_t gfp_mask; member in struct:shrink_control
H A Dmemcontrol.h43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
54 gfp_t gfp_mask);
63 gfp_t gfp_mask);
80 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
189 gfp_t gfp_mask,
205 mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
211 mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
217 mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) argument
306 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask) argument
393 mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument
[all...]
H A Didr.h105 int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
142 int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
150 gfp_t gfp_mask);
H A Dkmod.h71 char **envp, gfp_t gfp_mask);
92 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; local
94 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
H A Doom.h51 extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
H A Dcompaction.h24 int order, gfp_t gfp_mask, nodemask_t *mask,
66 int order, gfp_t gfp_mask, nodemask_t *nodemask,
65 try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, bool sync) argument
H A Dtextsearch.h162 gfp_t gfp_mask)
166 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
161 alloc_ts_config(size_t payload, gfp_t gfp_mask) argument
H A Dpagemap.h235 pgoff_t index, gfp_t gfp_mask);
262 pgoff_t index, gfp_t gfp_mask);
456 pgoff_t index, gfp_t gfp_mask);
458 pgoff_t index, gfp_t gfp_mask);
461 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
468 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
473 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
467 add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument
H A Dswap.h253 gfp_t gfp_mask, nodemask_t *mask);
256 gfp_t gfp_mask, bool noswap);
258 gfp_t gfp_mask, bool noswap,
408 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) argument
430 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, argument
452 gfp_t gfp_mask)
451 add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) argument
H A Dradix-tree.h63 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
66 gfp_t gfp_mask; member in struct:radix_tree_root
72 .gfp_mask = (mask), \
82 (root)->gfp_mask = (mask); \
233 int radix_tree_preload(gfp_t gfp_mask);
H A Dshmem_fs.h53 pgoff_t index, gfp_t gfp_mask);
H A DmISDNif.h525 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) argument
529 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
536 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) argument
538 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask);
553 u_int id, u_int len, void *dp, gfp_t gfp_mask)
559 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
552 _queue_data(struct mISDNchannel *ch, u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) argument
H A Dvmalloc.h62 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64 unsigned long start, unsigned long end, gfp_t gfp_mask,
H A Dbtree.h45 * @gfp_mask: gfp mask for the allocation
48 void *btree_alloc(gfp_t gfp_mask, void *pool_data);
H A Dblkdev.h679 struct bio_set *bs, gfp_t gfp_mask,
955 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
957 sector_t nr_sects, gfp_t gfp_mask);
959 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
963 gfp_mask, flags);
966 sector_t nr_blocks, gfp_t gfp_mask)
971 gfp_mask);
958 sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) argument
965 sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) argument
H A Dsuspend.h314 extern unsigned long get_safe_page(gfp_t gfp_mask);
H A Dwriteback.h123 void throttle_vm_writeout(gfp_t gfp_mask);
H A Dkfifo.h323 * @gfp_mask: get_free_pages mask, passed to kmalloc()
331 #define kfifo_alloc(fifo, size, gfp_mask) \
337 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
791 size_t esize, gfp_t gfp_mask);
/include/linux/sunrpc/
H A Dgss_api.h39 gfp_t gfp_mask);
94 gfp_t gfp_mask);
/include/rdma/
H A Dib_sa.h278 int timeout_ms, gfp_t gfp_mask,
290 int timeout_ms, gfp_t gfp_mask,
315 * @gfp_mask: GFP mask for memory allocations.
336 ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
/include/scsi/
H A Dscsi_cmnd.h160 extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask);
166 struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask);
167 void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd);

Completed in 445 milliseconds

12