/drivers/crypto/caam/ |
H A D | sg_sw_sec4.h | 33 sg_to_sec4_sg(struct scatterlist *sg, int sg_count, argument 37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 38 sg_dma_len(sg), offset); 40 sg = scatterwalk_sg_next(sg); 50 static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, argument 54 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); 62 struct scatterlist *sg = sg_list; local 67 nbytes -= sg->length; 68 if (!sg_is_last(sg) 88 dma_map_sg_chained(struct device *dev, struct scatterlist *sg, unsigned int nents, enum dma_data_direction dir, bool chained) argument 104 dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, unsigned int nents, enum dma_data_direction dir, bool chained) argument [all...] |
/drivers/crypto/qce/ |
H A D | dma.c | 57 int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, argument 63 while (sg) { 64 err = dma_map_sg(dev, sg, 1, dir); 67 sg = scatterwalk_sg_next(sg); 70 err = dma_map_sg(dev, sg, nents, dir); 78 void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, argument 82 while (sg) { 83 dma_unmap_sg(dev, sg, 1, dir); 84 sg 92 struct scatterlist *sg = sglist; local 112 struct scatterlist *sg = sgt->sgl, *sg_last = NULL; local 134 qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, int nents, unsigned long flags, enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_param) argument [all...] |
/drivers/media/pci/tw68/ |
H A D | tw68-risc.c | 46 struct scatterlist *sg; local 62 sg = sglist; 65 while (offset && offset >= sg_dma_len(sg)) { 66 offset -= sg_dma_len(sg); 67 sg = sg_next(sg); 69 if (bpl <= sg_dma_len(sg) - offset) { 73 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); 84 done = (sg_dma_len(sg) - offset); 88 *(rp++) = cpu_to_le32(sg_dma_address(sg) [all...] |
/drivers/usb/storage/ |
H A D | protocol.c | 139 struct scatterlist *sg = *sgptr; local 143 if (sg) 144 nents = sg_nents(sg); 146 sg = scsi_sglist(srb); 148 sg_miter_start(&miter, sg, nents, dir == FROM_XFER_BUF ? 163 if (*offset + len < miter.piter.sg->length) { 165 *sgptr = miter.piter.sg; 168 *sgptr = sg_next(miter.piter.sg); 185 struct scatterlist *sg = NULL; local 188 buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, [all...] |
/drivers/gpu/drm/omapdrm/ |
H A D | omap_gem_dmabuf.c | 29 struct sg_table *sg; local 33 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 34 if (!sg) 44 ret = sg_alloc_table(sg, 1, GFP_KERNEL); 48 sg_init_table(sg->sgl, 1); 49 sg_dma_len(sg->sgl) = obj->size; 50 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0); 51 sg_dma_address(sg->sgl) = paddr; 56 return sg; 62 omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) argument [all...] |
/drivers/gpu/drm/msm/ |
H A D | msm_iommu.c | 51 struct scatterlist *sg; local 59 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 60 u32 pa = sg_phys(sg) - sg->offset; 61 size_t bytes = sg->length + sg->offset; 77 for_each_sg(sgt->sgl, sg, i, j) { 78 size_t bytes = sg->length + sg->offset; 90 struct scatterlist *sg; local [all...] |
H A D | msm_gem_prime.c | 41 struct dma_buf_attachment *attach, struct sg_table *sg) 43 return msm_gem_import(dev, attach->dmabuf->size, sg); 40 msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) argument
|
/drivers/net/wireless/orinoco/ |
H A D | mic.c | 51 struct scatterlist sg[2]; local 68 sg_init_table(sg, 2); 69 sg_set_buf(&sg[0], hdr, sizeof(hdr)); 70 sg_set_buf(&sg[1], data, data_len); 77 return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
|
/drivers/s390/scsi/ |
H A D | zfcp_qdio.h | 172 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data 173 * @sg: The scatterlist where to check the data size 179 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg) argument 181 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN; 229 * @sg: pointer to struct scatterlist 232 unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg) argument 236 for (; sg; sg = sg_next(sg)) 247 zfcp_qdio_real_bytes(struct scatterlist *sg) argument [all...] |
/drivers/gpu/drm/nouveau/ |
H A D | nouveau_sgdma.c | 33 if (ttm->sg) { 34 node->sg = ttm->sg; 37 node->sg = NULL; 68 if (ttm->sg) { 69 node->sg = ttm->sg; 72 node->sg = NULL;
|
/drivers/infiniband/hw/ipath/ |
H A D | ipath_dma.c | 104 struct scatterlist *sg; local 111 for_each_sg(sgl, sg, nents, i) { 112 addr = (u64) page_address(sg_page(sg)); 118 sg->dma_address = addr + sg->offset; 120 sg->dma_length = sg->length; 127 struct scatterlist *sg, int nents, 126 ipath_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) argument
|
/drivers/infiniband/hw/qib/ |
H A D | qib_dma.c | 97 struct scatterlist *sg; local 104 for_each_sg(sgl, sg, nents, i) { 105 addr = (u64) page_address(sg_page(sg)); 111 sg->dma_address = addr + sg->offset; 113 sg->dma_length = sg->length; 120 struct scatterlist *sg, int nents, 119 qib_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) argument
|
/drivers/infiniband/hw/mthca/ |
H A D | mthca_wqe.h | 117 struct ib_sge *sg) 119 dseg->byte_count = cpu_to_be32(sg->length); 120 dseg->lkey = cpu_to_be32(sg->lkey); 121 dseg->addr = cpu_to_be64(sg->addr); 116 mthca_set_data_seg(struct mthca_data_seg *dseg, struct ib_sge *sg) argument
|
/drivers/scsi/arm/ |
H A D | scsi.h | 23 * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max 25 * (possibly chained) sg-list 27 static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max) argument 36 sg_set_buf(sg, SCp->ptr, SCp->this_residual); 43 *(++sg) = *src_sg; 44 sg_mark_end(sg); 100 struct scatterlist *sg; local 103 scsi_for_each_sg(SCpnt, sg, sg_count, i) 104 len += sg->length;
|
/drivers/target/tcm_fc/ |
H A D | tfc_io.c | 66 struct scatterlist *sg = NULL; local 100 sg = se_cmd->t_data_sg; 101 mem_len = sg->length; 102 mem_off = sg->offset; 103 page = sg_page(sg); 118 sg = sg_next(sg); 119 mem_len = min((size_t)sg->length, remaining); 120 mem_off = sg->offset; 121 page = sg_page(sg); 222 struct scatterlist *sg = NULL; local [all...] |
/drivers/scsi/aacraid/ |
H A D | commctrl.c | 560 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { 561 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", 562 le32_to_cpu(srbcmd->sg.count))); 567 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); 568 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * 574 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " 576 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, 582 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { 589 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; 590 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; [all...] |
/drivers/staging/android/ion/ |
H A D | ion_chunk_heap.c | 44 struct scatterlist *sg; local 67 sg = table->sgl; 73 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), 75 sg = sg_next(sg); 82 sg = table->sgl; 84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 85 sg->length); 86 sg = sg_next(sg); 99 struct scatterlist *sg; local [all...] |
H A D | ion_heap.c | 31 struct scatterlist *sg; local 48 for_each_sg(table->sgl, sg, table->nents, i) { 49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; 50 struct page *page = sg_page(sg); 77 struct scatterlist *sg; local 81 for_each_sg(table->sgl, sg, table->nents, i) { 82 struct page *page = sg_page(sg); 84 unsigned long len = sg->length; 86 if (offset >= sg->length) { 87 offset -= sg 156 struct scatterlist sg; local [all...] |
/drivers/infiniband/hw/mlx5/ |
H A D | mem.c | 56 struct scatterlist *sg; local 66 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 67 len = sg_dma_len(sg) >> page_shift; 68 pfn = sg_dma_address(sg) >> page_shift; 121 struct scatterlist *sg; local 125 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 126 len = sg_dma_len(sg) >> umem_page_shift; 127 base = sg_dma_address(sg);
|
/drivers/mmc/card/ |
H A D | queue.c | 147 struct scatterlist *sg; local 149 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); 150 if (!sg) 154 sg_init_table(sg, sg_len); 157 return sg; 251 mqrq_cur->sg = mmc_alloc_sg(1, &ret); 260 mqrq_prev->sg = mmc_alloc_sg(1, &ret); 279 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); 284 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); 307 kfree(mqrq_cur->sg); 449 mmc_queue_packed_map_sg(struct mmc_queue *mq, struct mmc_packed *packed, struct scatterlist *sg, enum mmc_packed_type cmd_type) argument 491 struct scatterlist *sg; local [all...] |
/drivers/gpu/drm/udl/ |
H A D | udl_gem.c | 181 drm_prime_gem_destroy(gem_obj, obj->sg); 226 struct sg_table *sg, 239 obj->sg = sg; 246 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); 256 struct sg_table *sg; local 270 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 271 if (IS_ERR(sg)) { 272 ret = PTR_ERR(sg); 276 ret = udl_prime_create(dev, dma_buf->size, sg, 224 udl_prime_create(struct drm_device *dev, size_t size, struct sg_table *sg, struct udl_gem_object **obj_p) argument [all...] |
/drivers/gpu/drm/ |
H A D | drm_scatter.c | 70 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && 72 drm_sg_cleanup(dev->sg); 73 dev->sg = NULL; 97 if (dev->sg) 149 dev->sg = entry; 210 entry = dev->sg; 211 dev->sg = NULL;
|
/drivers/mmc/host/ |
H A D | tmio_mmc_dma.c | 49 struct scatterlist *sg = host->sg_ptr, *sg_tmp; local 58 for_each_sg(sg, sg_tmp, host->sg_len, i) { 67 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 73 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 80 /* The only sg element can be unaligned, use our bounce buffer then */ 82 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 84 sg = host->sg_ptr; 87 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 89 desc = dmaengine_prep_slave_sg(chan, sg, ret, 120 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[ 126 struct scatterlist *sg = host->sg_ptr, *sg_tmp; local [all...] |
/drivers/gpu/drm/i915/ |
H A D | i915_gem_dmabuf.c | 53 /* Copy sg so that we make an independent mapping */ 93 struct sg_table *sg, 100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 101 sg_free_table(sg); 102 kfree(sg); 246 struct sg_table *sg; local 248 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); 249 if (IS_ERR(sg)) 250 return PTR_ERR(sg); 92 i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) argument [all...] |
/drivers/infiniband/core/ |
H A D | umem.c | 48 struct scatterlist *sg; local 57 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 59 page = sg_page(sg); 91 struct scatterlist *sg, *sg_list_start; local 174 for_each_sg(sg_list_start, sg, ret, i) { 178 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); 182 sg_list_start = sg; 284 struct scatterlist *sg; local 289 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 290 n += sg_dma_len(sg) >> shif [all...] |