/block/ |
H A D | blk-map.c | 44 unsigned int len, gfp_t gfp_mask) 58 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); 60 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); 95 * @gfp_mask: memory allocation flags 112 unsigned long len, gfp_t gfp_mask) 143 gfp_mask); 174 * @gfp_mask: memory allocation flags 191 int iov_count, unsigned int len, gfp_t gfp_mask) 215 gfp_mask); 217 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 42 __blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned int len, gfp_t gfp_mask) argument 110 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) argument 189 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct sg_iovec *iov, int iov_count, unsigned int len, gfp_t gfp_mask) argument 289 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) argument [all...] |
H A D | blk-lib.c | 34 * @gfp_mask: memory allocation flags (for bio_alloc) 41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 89 bio = bio_alloc(gfp_mask, 1); 149 * @gfp_mask: memory allocation flags (for bio_alloc) 156 sector_t nr_sects, gfp_t gfp_mask, 179 bio = bio_alloc(gfp_mask, 1); 223 * @gfp_mask: memory allocation flags (for bio_alloc) 230 sector_t nr_sects, gfp_t gfp_mask) 244 bio = bio_alloc(gfp_mask, 285 * @gfp_mask 40 blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) argument 155 blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) argument 229 __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) argument 291 blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) argument [all...] |
H A D | bio.c | 175 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, argument 212 bvl = mempool_alloc(pool, gfp_mask); 215 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); 229 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { 378 * @gfp_mask: the GFP_ mask given to the slab allocator 411 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) argument 413 gfp_t saved_gfp = gfp_mask; 427 gfp_mask); 456 gfp_mask &= ~__GFP_WAIT; 458 p = mempool_alloc(bs->bio_pool, gfp_mask); 582 bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) argument 616 bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, struct bio_set *bs) argument 940 bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) argument 1034 bio_alloc_map_data(unsigned int iov_count, gfp_t gfp_mask) argument 1140 bio_copy_user_iov(struct request_queue *q, struct rq_map_data *map_data, const struct sg_iovec *iov, int iov_count, int write_to_vm, gfp_t gfp_mask) argument 1266 bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, unsigned long uaddr, unsigned int len, int write_to_vm, gfp_t gfp_mask) argument 1279 __bio_map_user_iov(struct request_queue *q, struct block_device *bdev, const struct sg_iovec *iov, int iov_count, int write_to_vm, gfp_t gfp_mask) argument 1403 bio_map_user(struct request_queue *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm, gfp_t gfp_mask) argument 1428 bio_map_user_iov(struct request_queue *q, struct block_device *bdev, const struct sg_iovec *iov, int iov_count, int write_to_vm, gfp_t gfp_mask) argument 1489 __bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) argument 1536 bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) argument 1589 bio_copy_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask, int reading) argument [all...] |
H A D | blk.h | 56 gfp_t gfp_mask); 241 gfp_t gfp_mask); 244 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 248 * @gfp_mask: allocation mask 258 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) argument 262 create_task_io_context(current, gfp_mask, node);
|
H A D | bio-integrity.c | 38 * @gfp_mask: Memory allocation mask 46 gfp_t gfp_mask, 56 sizeof(struct bio_vec) * nr_vecs, gfp_mask); 59 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); 69 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, 446 * @gfp_mask: Memory allocation mask 451 gfp_t gfp_mask) 458 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); 45 bio_integrity_alloc(struct bio *bio, gfp_t gfp_mask, unsigned int nr_vecs) argument 450 bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) argument
|
H A D | blk-core.c | 539 gfp_t gfp_mask) 552 gfp_mask, q->node); 565 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) argument 567 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); 571 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) argument 577 gfp_mask | __GFP_ZERO, node_id); 581 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 926 * @gfp_mask: allocation mask 936 struct bio *bio, gfp_t gfp_mask) 1018 rq = mempool_alloc(rl->rq_pool, gfp_mask); 538 blk_init_rl(struct request_list *rl, struct request_queue *q, gfp_t gfp_mask) argument 935 __get_request(struct request_list *rl, int rw_flags, struct bio *bio, gfp_t gfp_mask) argument 1112 get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) argument 1153 blk_old_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) argument 1172 blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) argument 1212 blk_make_request(struct request_queue *q, struct bio *bio, gfp_t gfp_mask) argument 2916 blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) argument [all...] |
H A D | blk-ioc.c | 349 * @gfp_mask: allocation mask 352 * will be created using @gfp_mask. 358 gfp_t gfp_mask) 364 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, 369 if (radix_tree_maybe_preload(gfp_mask) < 0) { 357 ioc_create_icq(struct io_context *ioc, struct request_queue *q, gfp_t gfp_mask) argument
|
H A D | bounce.c | 74 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) argument 76 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
|
H A D | blk-flush.c | 440 * @gfp_mask: memory allocation flags (for bio_alloc) 449 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, argument 472 bio = bio_alloc(gfp_mask, 0);
|
H A D | blk-cgroup.c | 65 * @gfp_mask: allocation mask to use 70 gfp_t gfp_mask) 76 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); 87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) 100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); 69 blkg_alloc(struct blkcg *blkcg, struct request_queue *q, gfp_t gfp_mask) argument
|
H A D | cfq-iosched.c | 862 gfp_t gfp_mask); 3582 struct bio *bio, gfp_t gfp_mask) 3604 } else if (gfp_mask & __GFP_WAIT) { 3608 gfp_mask | __GFP_ZERO, 3617 gfp_mask | __GFP_ZERO, 3657 struct bio *bio, gfp_t gfp_mask) 3670 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); 4206 gfp_t gfp_mask) 4214 might_sleep_if(gfp_mask & __GFP_WAIT); 4223 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); 3581 cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct bio *bio, gfp_t gfp_mask) argument 3656 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct bio *bio, gfp_t gfp_mask) argument 4205 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask) argument [all...] |
H A D | elevator.c | 707 struct bio *bio, gfp_t gfp_mask) 712 return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); 706 elv_set_request(struct request_queue *q, struct request *rq, struct bio *bio, gfp_t gfp_mask) argument
|