Searched refs:gfp_mask (Results 1 - 25 of 57) sorted by relevance

123

/drivers/infiniband/hw/amso1100/
H A Dc2_alloc.c39 static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, argument
47 &dma_addr, gfp_mask);
71 int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, argument
74 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
90 dma_addr_t *dma_addr, gfp_t gfp_mask)
100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
89 c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, dma_addr_t *dma_addr, gfp_t gfp_mask) argument
H A Dc2.h542 extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
546 dma_addr_t *dma_addr, gfp_t gfp_mask);
/drivers/infiniband/core/
H A Dsa.h56 int timeout_ms, gfp_t gfp_mask,
H A Dsa_query.c529 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) argument
545 gfp_mask);
578 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) argument
584 if (!idr_pre_get(&query_idr, gfp_mask))
649 * @gfp_mask:GFP mask to use for internal allocations
670 int timeout_ms, gfp_t gfp_mask,
690 query = kmalloc(sizeof *query, gfp_mask);
695 ret = alloc_mad(&query->sa_query, gfp_mask);
717 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
765 * @gfp_mask
666 ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_path_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
783 ib_sa_service_rec_query(struct ib_sa_client *client, struct ib_device *device, u8 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_service_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
879 ib_sa_mcmember_rec_query(struct ib_sa_client *client, struct ib_device *device, u8 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
[all...]
H A Dmulticast.c558 union ib_gid *mgid, gfp_t gfp_mask)
573 group = kzalloc(sizeof *group, gfp_mask);
610 ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
624 member = kmalloc(sizeof *member, gfp_mask);
639 &rec->mgid, gfp_mask);
557 acquire_group(struct mcast_port *port, union ib_gid *mgid, gfp_t gfp_mask) argument
607 ib_sa_join_multicast(struct ib_sa_client *client, struct ib_device *device, u8 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, struct ib_sa_multicast *multicast), void *context) argument
/drivers/net/wireless/ath/
H A Dmain.c28 gfp_t gfp_mask)
46 skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
26 ath_rxbuf_alloc(struct ath_common *common, u32 len, gfp_t gfp_mask) argument
/drivers/gpu/ion/
H A Dion_page_pool.c31 struct page *page = alloc_pages(pool->gfp_mask, pool->order);
130 if (sc->gfp_mask & __GFP_HIGHMEM)
162 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) argument
176 pool->gfp_mask = gfp_mask;
H A Dion_priv.h230 * @gfp_mask: gfp_mask to use from alloc
247 gfp_t gfp_mask; member in struct:ion_page_pool
251 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
/drivers/xen/
H A Dxencomm.c70 static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, argument
88 desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
97 desc = kmalloc(size, gfp_mask);
121 struct xencomm_desc **ret, gfp_t gfp_mask)
137 desc = xencomm_alloc(gfp_mask, buffer, bytes);
120 xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask) argument
/drivers/scsi/
H A Dscsi.c144 gfp_t gfp_mask; member in struct:scsi_host_cmd_pool
157 .gfp_mask = __GFP_DMA,
165 * @gfp_mask: mask for the allocation
171 scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask) argument
175 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
180 gfp_mask | pool->gfp_mask);
211 * @gfp_mask: mask for the allocation
217 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) argument
245 __scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) argument
284 scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) argument
359 scsi_get_host_cmd_pool(gfp_t gfp_mask) argument
392 scsi_put_host_cmd_pool(gfp_t gfp_mask) argument
426 scsi_allocate_command(gfp_t gfp_mask) argument
446 scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd) argument
482 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; local
[all...]
H A Dhosts.c339 gfp_t gfp_mask = GFP_KERNEL; local
342 gfp_mask |= __GFP_DMA;
344 shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
H A Dscsi_lib.c604 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) argument
609 return mempool_alloc(sgp->pool, gfp_mask);
613 gfp_t gfp_mask)
620 gfp_mask, scsi_sg_alloc);
971 gfp_t gfp_mask)
979 gfp_mask))) {
1007 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) argument
1011 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1036 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
612 scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, gfp_t gfp_mask) argument
970 scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, gfp_t gfp_mask) argument
H A Dscsi_tgt_lib.c71 * gfp_mask- allocator flags
79 gfp_t gfp_mask)
99 rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
103 cmd = __scsi_get_command(shost, gfp_mask);
77 scsi_host_get_command(struct Scsi_Host *shost, enum dma_data_direction data_dir, gfp_t gfp_mask) argument
/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c96 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) argument
100 page = alloc_pages(gfp_mask, order);
109 int order, gfp_t gfp_mask)
112 &sg_dma_address(mem), gfp_mask);
123 gfp_t gfp_mask, int coherent)
131 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
133 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
161 cur_order, gfp_mask);
164 cur_order, gfp_mask);
108 mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, int order, gfp_t gfp_mask) argument
122 mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument
[all...]
H A Dicm.h71 gfp_t gfp_mask, int coherent);
/drivers/connector/
H A Dconnector.c66 int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) argument
100 skb = alloc_skb(size, gfp_mask);
112 return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
/drivers/staging/android/
H A Dlowmemorykiller.c94 sc->nr_to_scan, sc->gfp_mask, other_free,
102 sc->nr_to_scan, sc->gfp_mask, rem);
157 sc->nr_to_scan, sc->gfp_mask, rem);
/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.c107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) argument
115 page = alloc_pages(gfp_mask | __GFP_ZERO, order);
124 int order, gfp_t gfp_mask)
127 gfp_mask);
138 gfp_t gfp_mask, int coherent)
146 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
148 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
160 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
176 cur_order, gfp_mask);
179 cur_order, gfp_mask);
123 mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, int order, gfp_t gfp_mask) argument
137 mthca_alloc_icm(struct mthca_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument
[all...]
H A Dmthca_memfree.h83 gfp_t gfp_mask, int coherent);
/drivers/net/ethernet/ti/
H A Ddavinci_cpdma.h85 int len, gfp_t gfp_mask);
/drivers/usb/wusbcore/
H A Dwa-hc.h218 static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) argument
223 return usb_submit_urb(urb, gfp_mask);
/drivers/md/
H A Ddm-bufio.c321 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, argument
326 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
330 gfp_mask & __GFP_NORETRY) {
332 return (void *)__get_free_pages(gfp_mask,
337 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
369 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) argument
372 gfp_mask);
379 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1392 if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1406 if (sc->gfp_mask
[all...]
H A Dmd.h621 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
623 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
/drivers/net/wireless/ipw2x00/
H A Dlibipw_tx.c192 int headroom, gfp_t gfp_mask)
197 gfp_mask);
207 gfp_mask);
191 libipw_alloc_txb(int nr_frags, int txb_size, int headroom, gfp_t gfp_mask) argument
/drivers/staging/rtl8187se/ieee80211/
H A Dieee80211_tx.c243 int gfp_mask)
249 gfp_mask);
242 ieee80211_alloc_txb(int nr_frags, int txb_size, int gfp_mask) argument

Completed in 975 milliseconds

123