/drivers/iommu/ |
H A D | iommu-traces.c | 24 EXPORT_TRACEPOINT_SYMBOL_GPL(unmap); variable
|
/drivers/gpu/drm/nouveau/core/include/subdev/ |
H A D | bar.h | 20 void (*unmap)(struct nouveau_bar *, struct nouveau_vma *); member in struct:nouveau_bar
|
H A D | vm.h | 87 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); member in struct:nouveau_vmmgr
|
/drivers/gpu/drm/nouveau/nvif/ |
H A D | driver.h | 13 void (*unmap)(void *priv, void __iomem *ptr, u32 size); member in struct:nvif_driver
|
/drivers/dma/ |
H A D | dmaengine.c | 962 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 993 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); local 994 struct device *dev = unmap->dev; 997 cnt = unmap->to_cnt; 999 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1001 cnt += unmap->from_cnt; 1003 dma_unmap_page(dev, unmap->addr[i], unmap 1016 dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) argument 1069 struct dmaengine_unmap_data *unmap; local [all...] |
H A D | mv_xor.c | 693 struct dmaengine_unmap_data *unmap; local 716 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 717 if (!unmap) { 724 unmap->addr[0] = src_dma; 731 unmap->to_cnt = 1; 735 unmap->addr[1] = dest_dma; 742 unmap->from_cnt = 1; 743 unmap->len = PAGE_SIZE; 784 dmaengine_unmap_put(unmap); 802 struct dmaengine_unmap_data *unmap; local [all...] |
/drivers/acpi/ |
H A D | nvs.c | 79 bool unmap; member in struct:nvs_page 138 if (entry->unmap) { 140 entry->unmap = false; 184 entry->unmap = !!entry->kaddr;
|
/drivers/gpu/drm/msm/ |
H A D | msm_mmu.h | 28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, member in struct:msm_mmu_funcs
|
H A D | msm_iommu.c | 102 VERB("unmap[%d]: %08x(%x)", i, iova, bytes); 123 .unmap = msm_iommu_unmap,
|
/drivers/net/ethernet/brocade/bna/ |
H A D | bnad.c | 118 struct bnad_tx_unmap *unmap; local 122 unmap = &unmap_q[index]; 123 nvecs = unmap->nvecs; 125 skb = unmap->skb; 126 unmap->skb = NULL; 127 unmap->nvecs = 0; 129 dma_unmap_addr(&unmap->vectors[0], dma_addr), 131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); 140 unmap = &unmap_q[index]; 144 dma_unmap_addr(&unmap 189 struct bnad_tx_unmap *unmap; local 316 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) argument 331 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) argument 352 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; local 367 struct bnad_rx_unmap *unmap, *prev; local 435 struct bnad_rx_unmap *unmap; local 514 struct bnad_rx_unmap *unmap; local 536 struct bnad_rx_unmap *unmap; local 570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, struct bnad_rx_unmap *unmap, u32 len) argument 592 struct bnad_rx_unmap *unmap = NULL; local 2936 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; local [all...] |
/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 396 struct vfio_iommu_type1_dma_unmap *unmap) 405 if (unmap->iova & mask) 407 if (!unmap->size || unmap->size & mask) 418 * to unmap any range. Depending on the contiguousness of physical 420 * or may not have worked. We only guaranteed unmap granularity 425 * 2M hugepage into the IOMMU, the unmap ioctl returns success but with 426 * a zero sized unmap. Also, if an unmap request overlaps the first 427 * address of a hugepage, the IOMMU will unmap th 395 vfio_dma_do_unmap(struct vfio_iommu *iommu, struct vfio_iommu_type1_dma_unmap *unmap) argument 949 struct vfio_iommu_type1_dma_unmap unmap; local [all...] |
/drivers/gpio/ |
H A D | gpio-sodaville.c | 233 goto unmap; 239 goto unmap; 244 goto unmap; 250 unmap:
|
/drivers/ntb/ |
H A D | ntb_transport.c | 1061 struct dmaengine_unmap_data *unmap; local 1080 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); 1081 if (!unmap) 1084 unmap->len = len; 1085 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), 1087 if (dma_mapping_error(device->dev, unmap->addr[0])) 1090 unmap->to_cnt = 1; 1092 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), 1094 if (dma_mapping_error(device->dev, unmap->addr[1])) 1097 unmap 1279 struct dmaengine_unmap_data *unmap; local [all...] |
/drivers/lguest/ |
H A D | core.c | 331 goto unmap; 346 unmap:
|
/drivers/gpu/drm/nouveau/ |
H A D | nouveau_nvif.c | 134 .unmap = nvkm_client_unmap,
|
/drivers/block/xen-blkback/ |
H A D | blkback.c | 268 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; local 278 gnttab_set_unmap_op(&unmap[segs_to_unmap], 288 ret = gnttab_unmap_refs(unmap, NULL, pages, 304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; local 316 gnttab_set_unmap_op(&unmap[segs_to_unmap], 324 ret = gnttab_unmap_refs(unmap, NULL, pages, 333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); 664 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; local 677 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), 681 ret = gnttab_unmap_refs(unmap, NUL [all...] |
/drivers/gpu/drm/cirrus/ |
H A D | cirrus_fbdev.c | 29 bool unmap = false; local 83 unmap = true; 91 if (unmap)
|
/drivers/gpu/drm/mgag200/ |
H A D | mgag200_fb.c | 31 bool unmap = false; local 86 unmap = true; 94 if (unmap)
|
/drivers/gpu/drm/nouveau/core/subdev/bar/ |
H A D | base.c | 77 bar->unmap(bar, &barobj->vma);
|
/drivers/gpu/drm/nouveau/core/subdev/vm/ |
H A D | nv04.c | 33 * VM map/unmap callbacks 105 priv->base.unmap = nv04_vm_unmap;
|
/drivers/xen/xenbus/ |
H A D | xenbus_client.c | 66 int (*unmap)(struct xenbus_device *dev, void *vaddr); member in struct:xenbus_ring_ops 564 * @vaddr: addr to unmap 575 return ring_ops->unmap(dev, vaddr); 661 * @vaddr: addr to unmap 707 .unmap = xenbus_unmap_ring_vfree_pv, 712 .unmap = xenbus_unmap_ring_vfree_hvm,
|
/drivers/input/serio/ |
H A D | ambakmi.c | 148 goto unmap; 157 unmap:
|
/drivers/gpu/drm/ast/ |
H A D | ast_fb.c | 55 bool unmap = false; local 110 unmap = true; 118 if (unmap)
|
/drivers/i2c/busses/ |
H A D | i2c-iop3xx.c | 463 goto unmap; 470 goto unmap; 498 unmap:
|
/drivers/bcma/ |
H A D | driver_pci_host.c | 123 goto unmap; 140 unmap: 188 goto unmap; 222 unmap:
|