/drivers/misc/genwqe/ |
H A D | card_utils.c | 290 * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages 292 * Allocates memory for sgl and overlapping pages. Pages which might 297 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, argument 303 sgl->fpage_offs = offset_in_page((unsigned long)user_addr); 304 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size); 305 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); 306 sgl->lpage_size = (user_size - sgl 368 genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, dma_addr_t *dma_list) argument 465 genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) argument [all...] |
/drivers/gpu/drm/exynos/ |
H A D | exynos_drm_dmabuf.c | 59 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 100 rd = buf->sgt->sgl; 101 wr = sgt->sgl; 109 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); 111 DRM_ERROR("failed to map sgl with iommu.\n"); 198 struct scatterlist *sgl; local 244 sgl = sgt->sgl; 247 buffer->dma_addr = sg_dma_address(sgl);
|
H A D | exynos_drm_gem.c | 86 struct scatterlist *sgl; local 98 sgl = buf->sgt->sgl; 99 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { 100 if (page_offset < (sgl->length >> PAGE_SHIFT)) 102 page_offset -= (sgl->length >> PAGE_SHIFT); 105 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; 486 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 488 DRM_ERROR("failed to map sgl wit [all...] |
/drivers/gpu/drm/omapdrm/ |
H A D | omap_gem_dmabuf.c | 48 sg_init_table(sg->sgl, 1); 49 sg_dma_len(sg->sgl) = obj->size; 50 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0); 51 sg_dma_address(sg->sgl) = paddr;
|
/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_fcoe.h | 63 struct scatterlist *sgl; member in struct:ixgbe_fcoe_ddp
|
H A D | ixgbe_fcoe.c | 53 ddp->sgl = NULL; 107 if (ddp->sgl) 108 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, 124 * @sgl: the scatter-gather list for this request 130 struct scatterlist *sgl, unsigned int sgc, 149 if (!netdev || !sgl) 165 if (ddp->sgl) { 166 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 167 xid, ddp->sgl, ddp->sgc); 184 /* setup dma from scsi command sgl */ 129 ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc, int target_mode) argument 324 ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc) argument 345 ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc) argument [all...] |
/drivers/staging/android/ion/ |
H A D | ion_carveout_heap.c | 64 struct page *page = sg_page(table->sgl); 97 sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); 113 struct page *page = sg_page(table->sgl); 119 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
|
H A D | ion_chunk_heap.c | 67 sg = table->sgl; 82 sg = table->sgl; 108 dma_sync_sg_for_device(NULL, table->sgl, table->nents, 111 for_each_sg(table->sgl, sg, table->nents, i) {
|
/drivers/media/platform/ |
H A D | m2m-deinterlace.c | 257 ctx->xt->sgl[0].size = s_width; 258 ctx->xt->sgl[0].icg = s_width; 264 ctx->xt->sgl[0].size = s_width; 265 ctx->xt->sgl[0].icg = s_width; 271 ctx->xt->sgl[0].size = s_width / 2; 272 ctx->xt->sgl[0].icg = s_width / 2; 278 ctx->xt->sgl[0].size = s_width / 2; 279 ctx->xt->sgl[0].icg = s_width / 2; 285 ctx->xt->sgl[0].size = s_width / 2; 286 ctx->xt->sgl[ [all...] |
/drivers/media/v4l2-core/ |
H A D | videobuf2-dma-contig.c | 59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { 73 dma_addr_t expected = sg_dma_address(sgt->sgl); 77 for_each_sg(sgt->sgl, s, sgt->nents, i) { 123 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 135 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); 259 rd = buf->sgt_base->sgl; 260 wr = sgt->sgl; 286 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 313 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, 319 ret = dma_map_sg(db_attach->dev, sgt->sgl, sg [all...] |
/drivers/scsi/esas2r/ |
H A D | esas2r_io.c | 224 struct esas2r_mem_desc *sgl; local 231 sgl = esas2r_alloc_sgl(a); 233 if (unlikely(sgl == NULL)) 244 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); 248 (struct atto_vda_sge *)((u8 *)sgl->virt_addr + 253 (struct atto_vda_sge *)((u8 *)sgl->virt_addr 260 cpu_to_le64(sgl->phys_addr); 302 list_add(&sgl->next_desc, &rq->sg_table_head); 376 struct esas2r_mem_desc *sgl; local 449 sgl [all...] |
/drivers/gpu/drm/i915/ |
H A D | i915_gem_dmabuf.c | 64 src = obj->pages->sgl; 65 dst = st->sgl; 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
|
/drivers/infiniband/core/ |
H A D | umem.c | 53 ib_dma_unmap_sg(dev, umem->sg_head.sgl, 57 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 159 sg_list_start = umem->sg_head.sgl; 186 umem->sg_head.sgl, 289 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
|
/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_qp.c | 77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); 78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); 103 wqe->write.sgl[0].stag = wr->ex.imm_data; 104 wqe->write.sgl[0].len = cpu_to_be32(0); 114 wqe->write.sgl[i].stag = 116 wqe->write.sgl[i].len = 118 wqe->write.sgl[i].to = 264 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); 265 wqe->recv.sgl[ 540 struct ib_sge sgl; local [all...] |
/drivers/scsi/lpfc/ |
H A D | lpfc_scsi.c | 182 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; local 183 if (sgl) { 184 sgl += 1; 185 sgl->word2 = le32_to_cpu(sgl->word2); 186 bf_set(lpfc_sli4_sge_last, sgl, 1); 187 sgl->word2 = cpu_to_le32(sgl->word2); 740 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use 771 /* a hole in xri block, form a sgl postin 917 struct sli4_sge *sgl; local 2381 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt) argument 2518 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt) argument 3358 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; local 3488 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); local [all...] |
/drivers/gpu/drm/msm/ |
H A D | msm_iommu.c | 59 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 77 for_each_sg(sgt->sgl, sg, i, j) { 94 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
/drivers/video/adf/ |
H A D | adf_memblock.c | 41 sg_set_page(table->sgl, page, attach->dmabuf->size, 0); 43 nents = dma_map_sg(attach->dev, table->sgl, 1, direction); 61 dma_unmap_sg(attach->dev, table->sgl, 1, direction);
|
/drivers/infiniband/hw/ipath/ |
H A D | ipath_dma.c | 101 static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, argument 111 for_each_sg(sgl, sg, nents, i) {
|
/drivers/infiniband/hw/qib/ |
H A D | qib_dma.c | 94 static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, argument 104 for_each_sg(sgl, sg, nents, i) {
|
/drivers/gpu/drm/armada/ |
H A D | armada_gem.c | 444 for_each_sg(sgt->sgl, sg, count, i) { 456 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) { 465 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0); 467 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) 473 sg_dma_address(sgt->sgl) = dobj->dev_addr; 474 sg_dma_len(sgt->sgl) = dobj->obj.size; 481 for_each_sg(sgt->sgl, sg, num, i) 498 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 502 for_each_sg(sgt->sgl, sg, sgt->nents, i) 604 if (sg_dma_len(dobj->sgt->sgl) < dob [all...] |
/drivers/infiniband/hw/mlx4/ |
H A D | doorbell.c | 75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
|
/drivers/infiniband/hw/mlx5/ |
H A D | doorbell.c | 77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
|
/drivers/infiniband/ulp/iser/ |
H A D | iser_memory.c | 53 struct scatterlist *sgl = (struct scatterlist *)data->buf; local 59 for_each_sg(sgl, sg, data->size, i) 79 sgl = (struct scatterlist *)data->buf; 81 for_each_sg(sgl, sg, data->size, i) { 127 struct scatterlist *sgl, *sg; local 135 sgl = (struct scatterlist *)data->buf; 139 for_each_sg(sgl, sg, sg_size, i) { 179 struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf; local 186 *offset = (u64) sgl[0].offset & ~MASK_4K; 190 for_each_sg(sgl, s 231 struct scatterlist *sgl, *sg, *next_sg = NULL; local 270 struct scatterlist *sgl = (struct scatterlist *)data->buf; local [all...] |
/drivers/media/pci/saa7134/ |
H A D | saa7134-vbi.c | 125 if (dma->sgl->offset) { 135 ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE); 138 return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, 179 dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
|
/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_fcoe.h | 110 struct scatterlist *sgl; member in struct:i40e_fcoe_ddp
|