Searched refs:prot (Results 1 - 25 of 57) sorted by relevance

123

/drivers/iommu/
H A Dfsl_pamu_domain.h28 int prot; member in struct:dma_window
H A Dipmmu-vmsa.c559 static u64 ipmmu_page_prot(unsigned int prot, u64 type) argument
565 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
568 if (prot & IOMMU_CACHE)
571 if (prot & IOMMU_EXEC)
573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
582 size_t size, int prot)
584 pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
615 int prot)
617 pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SEC
580 ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, unsigned long iova, unsigned long pfn, size_t size, int prot) argument
613 ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, unsigned long iova, unsigned long pfn, int prot) argument
625 ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) argument
945 ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) argument
[all...]
H A Domap-iommu.c570 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) argument
580 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
585 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) argument
597 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
602 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) argument
610 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
619 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) argument
635 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
644 u32 prot; local
669 prot
1038 omap_iommu_map(struct iommu_domain *domain, unsigned long da, phys_addr_t pa, size_t bytes, int prot) argument
[all...]
H A Darm-smmu.c1281 unsigned long pfn, int prot, int stage)
1300 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
1303 if (prot & IOMMU_CACHE)
1308 if (prot & IOMMU_READ)
1310 if (prot & IOMMU_WRITE)
1312 if (prot & IOMMU_CACHE)
1319 if (prot & IOMMU_EXEC)
1321 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
1385 phys_addr_t phys, int prot, in
1279 arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, int prot, int stage) argument
1383 arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, int prot, int stage) argument
1417 arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys, int prot, int stage) argument
1450 arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) argument
1505 arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) argument
[all...]
H A Dfsl_pamu_domain.c130 sub_win_ptr[i].prot);
156 0, wnd->prot);
191 wnd->prot);
204 0, wnd->prot);
550 phys_addr_t paddr, u64 size, int prot)
559 if (prot & IOMMU_READ)
561 if (prot & IOMMU_WRITE)
603 wnd->prot = pamu_prot;
549 fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) argument
H A Dfsl_pamu.h399 u32 subwin_cnt, int prot);
402 uint32_t snoopid, u32 stashid, int enable, int prot);
H A Dintel-iommu.c1982 unsigned long nr_pages, int prot)
1992 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1995 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2001 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2011 pteval = page_to_phys(sg_page(sg)) | prot;
2090 int prot)
2092 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2097 int prot)
2099 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
3039 int prot local
1980 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long phys_pfn, unsigned long nr_pages, int prot) argument
2088 domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long nr_pages, int prot) argument
2095 domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phys_pfn, unsigned long nr_pages, int prot) argument
3326 int prot = 0; local
4322 int prot = 0; local
[all...]
/drivers/vfio/
H A Dvfio_iommu_type1.c68 int prot; /* IOMMU_CACHE */ member in struct:vfio_domain
76 int prot; /* IOMMU_READ/WRITE */ member in struct:vfio_dma
218 static int put_pfn(unsigned long pfn, int prot) argument
222 if (prot & IOMMU_WRITE)
230 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) argument
236 if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
262 int prot, unsigned long *pfn_base)
271 ret = vaddr_get_pfn(vaddr, prot, pfn_base);
279 put_pfn(*pfn_base, prot);
294 ret = vaddr_get_pfn(vaddr, prot,
261 vfio_pin_pages(unsigned long vaddr, long npage, int prot, unsigned long *pfn_base) argument
316 vfio_unpin_pages(unsigned long pfn, long npage, int prot, bool do_accounting) argument
480 map_try_harder(struct vfio_domain *domain, dma_addr_t iova, unsigned long pfn, long npage, int prot) argument
500 vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, unsigned long pfn, long npage, int prot) argument
532 int ret = 0, prot = 0; local
[all...]
/drivers/gpu/drm/ttm/
H A Dttm_bo_util.c252 pgprot_t prot)
263 dst = kmap_atomic_prot(d, prot);
265 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266 dst = vmap(&d, 1, 0, prot);
278 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
289 pgprot_t prot)
299 src = kmap_atomic_prot(s, prot);
301 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302 src = vmap(&s, 1, 0, prot);
314 if (pgprot_val(prot) !
250 ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, unsigned long page, pgprot_t prot) argument
287 ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, unsigned long page, pgprot_t prot) argument
384 pgprot_t prot = ttm_io_prot(old_mem->placement, local
389 pgprot_t prot = ttm_io_prot(new_mem->placement, local
533 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; local
[all...]
/drivers/gpu/drm/msm/
H A Dmsm_mmu.h27 unsigned len, int prot);
H A Dmsm_iommu.c47 struct sg_table *sgt, unsigned len, int prot)
65 ret = iommu_map(domain, da, pa, bytes, prot);
46 msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, unsigned len, int prot) argument
/drivers/net/ethernet/mellanox/mlx4/
H A Dmcg.c159 u32 prot; local
207 prot = be32_to_cpu(mgm->members_count) >> 30;
222 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
429 u32 prot; local
475 prot = be32_to_cpu(mgm->members_count) >> 30;
506 (prot << 30));
696 u8 *gid, enum mlx4_protocol prot,
705 u8 op_mod = (prot == MLX4_PROT_ETH) ?
740 be32_to_cpu(mgm->members_count) >> 30 == prot)
1083 int block_mcast_loopback, enum mlx4_protocol prot,
695 find_entry(struct mlx4_dev *dev, u8 port, u8 *gid, enum mlx4_protocol prot, struct mlx4_cmd_mailbox *mgm_mailbox, int *prev, int *index) argument
1082 mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) argument
1191 mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot, enum mlx4_steer_type steer) argument
1309 mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 attach, u8 block_loopback, enum mlx4_protocol prot) argument
1338 mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 port, int block_mcast_loopback, enum mlx4_protocol prot, u64 *reg_id) argument
1378 mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], u8 port, int block_mcast_loopback, enum mlx4_protocol prot, u64 *reg_id) argument
1408 mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot, u64 reg_id) argument
1493 mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) argument
1509 mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot) argument
[all...]
/drivers/base/
H A Ddma-mapping.c279 unsigned long vm_flags, pgprot_t prot,
290 if (map_vm_area(area, prot, pages)) {
305 pgprot_t prot, const void *caller)
319 ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
278 dma_common_pages_remap(struct page **pages, size_t size, unsigned long vm_flags, pgprot_t prot, const void *caller) argument
303 dma_common_contiguous_remap(struct page *page, size_t size, unsigned long vm_flags, pgprot_t prot, const void *caller) argument
/drivers/isdn/hisax/
H A Dl3dss1.c55 retval = p->prot.dss1.last_invoke_id + 1; /* try new id */
56 while ((i) && (p->prot.dss1.invoke_used[retval >> 3] == 0xFF)) {
57 p->prot.dss1.last_invoke_id = (retval & 0xF8) + 8;
61 while (p->prot.dss1.invoke_used[retval >> 3] & (1 << (retval & 7)))
65 p->prot.dss1.last_invoke_id = retval;
66 p->prot.dss1.invoke_used[retval >> 3] |= (1 << (retval & 7));
78 p->prot.dss1.invoke_used[id >> 3] &= ~(1 << (id & 7));
92 proc->prot.dss1.invoke_id = 0;
93 proc->prot.dss1.remote_operation = 0;
94 proc->prot
[all...]
H A Dl3ni1.c54 retval = p->prot.ni1.last_invoke_id + 1; /* try new id */
55 while ((i) && (p->prot.ni1.invoke_used[retval >> 3] == 0xFF)) {
56 p->prot.ni1.last_invoke_id = (retval & 0xF8) + 8;
60 while (p->prot.ni1.invoke_used[retval >> 3] & (1 << (retval & 7)))
64 p->prot.ni1.last_invoke_id = retval;
65 p->prot.ni1.invoke_used[retval >> 3] |= (1 << (retval & 7));
77 p->prot.ni1.invoke_used[id >> 3] &= ~(1 << (id & 7));
91 proc->prot.ni1.invoke_id = 0;
92 proc->prot.ni1.remote_operation = 0;
93 proc->prot
[all...]
/drivers/infiniband/ulp/iser/
H A Diser_initiator.c46 * os stored in task->prot[ISER_DIR_IN].data_len
66 struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
97 * is stored at task->prot[ISER_DIR_OUT].data_len
121 struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
401 prot_buf = &iser_task->prot[ISER_DIR_IN];
404 prot_buf = &iser_task->prot[ISER_DIR_OUT];
662 iser_task->prot[ISER_DIR_IN].data_len = 0;
663 iser_task->prot[ISER_DIR_OUT].data_len = 0;
700 &iser_task->prot[ISER_DIR_IN],
708 &iser_task->prot[ISER_DIR_OU
[all...]
/drivers/staging/android/
H A Dashmem.c354 static inline vm_flags_t calc_vm_may_flags(unsigned long prot) argument
356 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
357 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
358 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
478 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) argument
485 if (unlikely((asma->prot_mask & prot) != prot)) {
491 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
492 prot |= PROT_EXEC;
494 asma->prot_mask = prot;
[all...]
/drivers/char/agp/
H A Dcompat_ioctl.h56 * The "prot" down below needs still a "sleep" flag somehow ...
61 compat_int_t prot; /* prot flags for mmap */ member in struct:agp_segment32
H A Dcompat_ioctl.c121 ksegment[seg].prot = usegment[seg].prot;
H A Dfrontend.c117 (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
155 static pgprot_t agp_convert_mmap_flags(int prot) argument
159 prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
181 seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
/drivers/ata/
H A Dsata_sil24.c41 __le16 prot; member in struct:sil24_prb
801 u8 prot = qc->tf.protocol; local
822 int is_excl = (ata_is_atapi(prot) ||
857 u16 prot = 0; local
860 prot |= PRB_PROT_NCQ;
862 prot |= PRB_PROT_WRITE;
864 prot |= PRB_PROT_READ;
865 prb->prot = cpu_to_le16(prot);
/drivers/infiniband/ulp/isert/
H A Dib_isert.h94 struct isert_data_buf prot; member in struct:isert_rdma_wr
/drivers/s390/net/
H A Dqeth_core.h204 #define qeth_is_ipafunc_supported(c, prot, f) \
205 ((prot == QETH_PROT_IPV6) ? \
207 #define qeth_is_ipafunc_enabled(c, prot, f) \
208 ((prot == QETH_PROT_IPV6) ? \
984 int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
H A Dqeth_l3_main.c310 enum qeth_prot_versions prot)
319 addr->proto = prot;
611 enum qeth_routing_types type, enum qeth_prot_versions prot)
618 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
627 enum qeth_routing_types *type, enum qeth_prot_versions prot)
646 if (qeth_is_ipafunc_supported(card, prot,
1044 __u16 len, enum qeth_prot_versions prot)
1050 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
1862 __be16 prot; local
1868 prot
309 qeth_l3_get_addr_buffer( enum qeth_prot_versions prot) argument
610 qeth_l3_send_setrouting(struct qeth_card *card, enum qeth_routing_types type, enum qeth_prot_versions prot) argument
626 qeth_l3_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, enum qeth_prot_versions prot) argument
1042 qeth_l3_get_setassparms_cmd( struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code, __u16 len, enum qeth_prot_versions prot) argument
2325 arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) argument
2430 qeth_l3_query_arp_cache_info(struct qeth_card *card, enum qeth_prot_versions prot, struct qeth_arp_query_info *qinfo) argument
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dt4.h273 static inline pgprot_t t4_pgprot_wc(pgprot_t prot) argument
276 return pgprot_writecombine(prot);
278 return pgprot_noncached(prot);

Completed in 439 milliseconds

123