Searched refs:sgl (Results 76 - 100 of 211) sorted by relevance

123456789

/drivers/mtd/nand/gpmi-nand/
H A Dgpmi-lib.c1123 struct scatterlist *sgl; local
1143 sgl = &this->cmd_sgl;
1145 sg_init_one(sgl, this->cmd_buffer, this->command_length);
1146 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
1148 sgl, 1, DMA_MEM_TO_DEV,
H A Dgpmi-nand.c384 struct scatterlist *sgl = &this->data_sgl; local
390 sg_init_one(sgl, this->upper_buf, this->upper_len);
391 ret = dma_map_sg(this->dev, sgl, 1, dr);
401 sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
406 dma_map_sg(this->dev, sgl, 1, dr);
/drivers/spi/
H A Dspi-ep93xx.c493 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
513 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
517 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
519 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
547 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
/drivers/scsi/be2iscsi/
H A Dbe_main.c3166 struct be_dma_mem *sgl)
3171 WARN_ON(!sgl);
3173 sgl->va = virtual_address;
3174 sgl->dma = (unsigned long)physical_address;
3175 sgl->size = length;
3180 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) argument
3182 memset(sgl, 0, sizeof(*sgl));
3187 struct mem_array *pmem, struct be_dma_mem *sgl)
3189 if (sgl
3164 be_sgl_create_contiguous(void *virtual_address, u64 physical_address, u32 length, struct be_dma_mem *sgl) argument
3186 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, struct mem_array *pmem, struct be_dma_mem *sgl) argument
3198 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, struct mem_array *pmem, struct be_dma_mem *sgl) argument
3469 struct be_dma_mem sgl; local
3504 struct be_dma_mem sgl; local
3570 struct be_dma_mem sgl; local
[all...]
/drivers/gpu/drm/
H A Ddrm_prime.c156 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
204 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
716 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_buffer.c316 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
336 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
359 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
/drivers/scsi/megaraid/
H A Dmegaraid_mbox.c1375 struct scatterlist *sgl; local
1394 scsi_for_each_sg(scp, sgl, sgcnt, i) {
1395 ccb->sgl64[i].address = sg_dma_address(sgl);
1396 ccb->sgl64[i].length = sg_dma_len(sgl);
1584 struct scatterlist *sgl; local
1587 sgl = scsi_sglist(scp);
1588 if (sg_page(sgl)) {
1589 vaddr = (caddr_t) sg_virt(&sgl[0]);
2245 struct scatterlist *sgl; local
2329 sgl
[all...]
/drivers/scsi/esas2r/
H A Desas2r_init.c840 struct esas2r_mem_desc *sgl; local
908 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
909 sgl->size = sgl_page_size;
911 list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
913 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
/drivers/scsi/
H A D3w-9xxx.h506 TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH]; member in struct:TW_Command::__anon5039::__anon5040
510 TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH]; member in struct:TW_Command::__anon5039::__anon5041
H A D3w-sas.c175 "Last sgl length: %4d\n"
176 "Max sgl length: %4d\n"
459 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
460 command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
719 TW_SG_Entry_ISO *sgl; local
741 sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
742 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
743 sgl->length = TW_CPU_TO_SGL(length);
987 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
988 command_packet->byte8_offset.param.sgl[
[all...]
H A Dscsi_lib.c578 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) argument
583 mempool_free(sgl, sgp->pool);
612 sg_init_table(sdb->table.sgl, sdb->table.nents);
615 first_chunk = sdb->table.sgl;
1091 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1153 prot_sdb->table.sgl);
1828 cmd->sdb.table.sgl = sg;
1835 cmd->prot_sdb->table.sgl =
1844 bidi_sdb->table.sgl =
2972 * @sgl
2979 scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, size_t *offset, size_t *len) argument
[all...]
H A D3w-9xxx.c64 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
171 "Last sgl length: %4d\n"
172 "Max sgl length: %4d\n"
494 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
495 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1081 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1082 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1134 /* Turn on 64-bit sgl support if we need to */
1348 /* Report residual bytes for single sgl */
1383 TW_SG_Entry *sgl; local
[all...]
/drivers/xen/
H A Dxen-scsiback.c134 struct scatterlist *sgl; member in struct:vscsibk_pend
291 kfree(req->sgl);
292 req->sgl = NULL;
412 pending_req->sgl, pending_req->n_sg,
528 /* free of (sgl) in fast_flush_area()*/
529 pending_req->sgl = kmalloc_array(nr_segments,
531 if (!pending_req->sgl)
534 sg_init_table(pending_req->sgl, nr_segments);
568 for_each_sg(pending_req->sgl, sg, nr_segments, i) {
/drivers/base/
H A Ddma-mapping.c238 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
/drivers/block/
H A Dnvme-scsi.c378 struct sg_iovec sgl; local
381 not_copied = copy_from_user(&sgl, hdr->dxferp +
386 xfer_len = min(remaining, sgl.iov_len);
387 not_copied = copy_to_user(sgl.iov_base, index,
419 struct sg_iovec sgl; local
422 not_copied = copy_from_user(&sgl, hdr->dxferp +
427 xfer_len = min(remaining, sgl.iov_len);
428 not_copied = copy_from_user(index, sgl.iov_base,
2062 struct sg_iovec sgl; local
2064 retcode = copy_from_user(&sgl, hd
2139 struct sg_iovec sgl; local
[all...]
/drivers/dma/
H A Dk3dma.c470 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
480 if (sgl == NULL)
483 for_each_sg(sgl, sg, sglen, i) {
498 for_each_sg(sgl, sg, sglen, i) {
469 k3_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, enum dma_transfer_direction dir, unsigned long flags, void *context) argument
H A Dmmp_pdma.c523 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, argument
534 if ((sgl == NULL) || (sg_len == 0))
539 for_each_sg(sgl, sg, sg_len, i) {
541 avail = sg_dma_len(sgl);
/drivers/mmc/core/
H A Dsdio_ops.c165 data.sg = sgtable.sgl;
/drivers/staging/android/ion/
H A Dion_cma_heap.c57 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
H A Dion_test.c66 for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
/drivers/crypto/
H A Domap-sham.c153 struct scatterlist sgl; member in struct:omap_sham_reqctx
580 * set correctly so use a local SG entry (sgl) with the
584 sg_init_table(&ctx->sgl, 1);
585 ctx->sgl.page_link = ctx->sg->page_link;
586 ctx->sgl.offset = ctx->sg->offset;
587 sg_dma_len(&ctx->sgl) = len32;
588 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
590 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
/drivers/media/pci/solo6x10/
H A Dsolo6x10-v4l2-enc.c333 for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
476 /* may discard all previous data in vbuf->sgl */
477 if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
484 dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
488 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
524 /* may discard all previous data in vbuf->sgl */
525 if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
531 dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
536 sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
/drivers/hsi/clients/
H A Dssi_protocol.c167 data = sg_virt(msg->sgt.sgl);
175 data = sg_virt(msg->sgt.sgl);
188 sg = msg->sgt.sgl;
258 kfree(sg_virt(msg->sgt.sgl));
278 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
/drivers/gpu/drm/i915/
H A Di915_gem_gtt.c304 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
879 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1365 obj->pages->sgl, obj->pages->nents,
1395 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1441 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1630 obj->pages->sgl, obj->pages->nents,
/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c1037 * @sgl: the SGL
1052 const struct sg_ent *sgl,
1077 const u64 *fp = (const u64 *)sgl;
1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; local
1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1551 const struct sg_ent *sgl, int sgl_flits)
1558 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1559 *p++ = be64_to_cpu(sgl->addr[0]);
1560 *p++ = be64_to_cpu(sgl
1049 write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, struct tx_desc *d, unsigned int pidx, const struct sge_txq *q, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, unsigned int gen, __be32 wr_hi, __be32 wr_lo) argument
1550 setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, const struct sg_ent *sgl, int sgl_flits) argument
1584 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; local
[all...]

Completed in 1683 milliseconds

123456789