Searched refs:pfn (Results 1 - 25 of 70) sorted by relevance

123

/drivers/xen/
H A Dtmem.c102 u32 index, unsigned long pfn)
104 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
111 u32 index, unsigned long pfn)
113 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
154 unsigned long pfn = page_to_pfn(page); local
161 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
169 unsigned long pfn = page_to_pfn(page); local
177 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
101 xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) argument
110 xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) argument
277 unsigned long pfn = page_to_pfn(page); local
303 unsigned long pfn = page_to_pfn(page); local
[all...]
H A Dballoon.c317 unsigned long pfn, i; local
357 pfn = page_to_pfn(page);
359 phys_to_machine_mapping_valid(pfn));
361 set_phys_to_machine(pfn, frame_list[i]);
367 (unsigned long)__va(pfn << PAGE_SHIFT),
387 unsigned long pfn, i; local
415 pfn = page_to_pfn(page);
416 frame_list[i] = pfn_to_mfn(pfn);
422 (unsigned long)__va(pfn << PAGE_SHIFT),
435 pfn
565 unsigned long pfn, extra_pfn_end; local
[all...]
H A Dswiotlb-xen.c72 static int check_pages_physically_contiguous(unsigned long pfn, argument
80 next_mfn = pfn_to_mfn(pfn);
84 if (pfn_to_mfn(++pfn) != ++next_mfn)
92 unsigned long pfn = PFN_DOWN(p); local
97 if (check_pages_physically_contiguous(pfn, offset, size))
105 unsigned long pfn = mfn_to_local_pfn(mfn); local
112 if (pfn_valid(pfn)) {
113 paddr = PFN_PHYS(pfn);
/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_gmr.c117 unsigned long pfn; local
147 pfn = page_to_pfn(*pages);
149 if (pfn != prev_pfn + 1) {
155 (++desc_virtual)->ppn = cpu_to_le32(pfn);
162 prev_pfn = pfn;
223 unsigned long pfn; local
227 pfn = page_to_pfn(*pages++);
228 if (prev_pfn + 1 != pfn)
230 prev_pfn = pfn;
/drivers/base/
H A Dnode.c359 static int get_nid_for_pfn(unsigned long pfn) argument
363 if (!pfn_valid_within(pfn))
365 page = pfn_to_page(pfn);
368 return pfn_to_nid(pfn);
375 unsigned long pfn, sect_start_pfn, sect_end_pfn; local
385 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
388 page_nid = get_nid_for_pfn(pfn);
412 unsigned long pfn, sect_start_pf local
447 unsigned long pfn; local
[all...]
H A Dmemory.c151 unsigned long i, pfn; local
157 pfn = section_nr_to_pfn(mem->start_section_nr + i);
158 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
217 unsigned long pfn = start_pfn; local
224 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
225 if (WARN_ON_ONCE(!pfn_valid(pfn)))
227 page = pfn_to_page(pfn);
235 pfn_to_section_nr(pfn), j);
444 u64 pfn; local
447 if (strict_strtoull(buf, 0, &pfn) <
463 u64 pfn; local
[all...]
/drivers/infiniband/hw/mthca/
H A Dmthca_uar.c44 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
/drivers/gpu/ion/
H A Dion_system_mapper.c73 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); local
74 ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
/drivers/edac/
H A Dcell_edac.c37 unsigned long address, pfn, offset, syndrome; local
46 pfn = address >> PAGE_SHIFT;
51 edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
59 unsigned long address, pfn, offset; local
68 pfn = address >> PAGE_SHIFT;
72 edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
H A Di3000_edac.c50 * unsigned long pfn and offset from hardware regs which are u8/u32.
237 unsigned long pfn, offset; local
252 pfn = deap_pfn(info->edeap, info->deap);
256 row = edac_mc_find_csrow_by_page(mci, pfn);
259 edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
261 edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
/drivers/misc/
H A Dvmw_balloon.c317 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, argument
323 pfn32 = (u32)pfn;
324 if (pfn32 != pfn)
329 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
333 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
342 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) argument
347 pfn32 = (u32)pfn;
348 if (pfn32 != pfn)
353 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
357 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, statu
[all...]
/drivers/iommu/
H A Diova.c70 /* only cache if it's below 32bit pfn */
240 * find_iova - find's an iova for a given pfn
242 * pfn - page frame number
244 * given doamin which matches the given pfn.
246 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) argument
257 /* If pfn falls within iova's range, return iova */
258 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
269 if (pfn < iova->pfn_lo)
271 else if (pfn > iov
305 free_iova(struct iova_domain *iovad, unsigned long pfn) argument
[all...]
H A Dtegra-smmu.c174 #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
518 dma_addr_t iova, unsigned long pfn)
523 page = pfn_to_page(pfn);
529 vaddr[1] = pfn << PAGE_SHIFT;
534 unsigned long addr, unsigned long pfn)
608 unsigned long pfn)
621 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
626 put_signature(as, iova, pfn);
633 unsigned long pfn local
517 put_signature(struct smmu_as *as, dma_addr_t iova, unsigned long pfn) argument
533 put_signature(struct smmu_as *as, unsigned long addr, unsigned long pfn) argument
607 __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, unsigned long pfn) argument
668 unsigned long pfn; local
[all...]
/drivers/char/
H A Dmem.c28 #include <linux/pfn.h>
54 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) argument
62 static inline int range_is_allowed(unsigned long pfn, unsigned long size) argument
64 u64 from = ((u64)pfn) << PAGE_SHIFT;
69 if (!devmem_is_allowed(pfn)) {
76 pfn++;
81 static inline int range_is_allowed(unsigned long pfn, unsigned long size) argument
220 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
261 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, argument
265 unsigned long offset = pfn << PAGE_SHIF
219 phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) argument
344 unsigned long pfn; local
371 unsigned long pfn, offset; local
[all...]
H A Dmspec.c203 unsigned long pfn; local
229 pfn = paddr >> PAGE_SHIFT;
236 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); local
/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c635 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) argument
648 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
678 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) argument
683 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
688 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) argument
693 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
698 static int ehea_is_hugepage(unsigned long pfn) argument
702 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
705 page_order = compound_order(pfn_to_page(pfn));
716 unsigned long pfn, start_pf local
[all...]
/drivers/gpu/drm/gma500/
H A Dgem.c199 unsigned long pfn; local
232 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
234 pfn = page_to_pfn(r->pages[page_offset]);
235 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.c149 unsigned long pfn; local
155 pfn = page_to_pfn(buf->pages[page_offset++]);
157 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
159 return vm_insert_mixed(vma, f_vaddr, pfn);
489 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; local
534 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
537 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
539 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
541 DRM_ERROR("failed to remap pfn rang
[all...]
/drivers/hv/
H A Dchannel.c250 unsigned long long pfn; local
260 pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
289 gpadl_header->range[0].pfn_array[i] = pfn+i;
342 gpadl_body->pfn[i] = pfn + pfnsum + i;
368 gpadl_header->range[0].pfn_array[i] = pfn+i;
641 desc.range[i].pfn = pagebuffers[i].pfn;
/drivers/misc/sgi-gru/
H A Dgruhandles.c175 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
191 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
/drivers/staging/omapdrm/
H A Domap_gem.c325 unsigned long pfn; local
333 pfn = page_to_pfn(omap_obj->pages[pgoff]);
336 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
339 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
340 pfn, pfn << PAGE_SHIFT);
342 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
353 unsigned long pfn; local
429 pfn = entry->paddr >> PAGE_SHIFT;
431 VERB("Inserting %p pfn
[all...]
/drivers/virtio/
H A Dvirtio_balloon.c77 unsigned long pfn = page_to_pfn(page); local
80 /* Convert pfn from Linux page size to balloon page size. */
81 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
84 static struct page *balloon_pfn_to_page(u32 pfn) argument
86 BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
87 return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
122 * Note that the first pfn points at start of the page. */
/drivers/acpi/
H A Dosl.c329 #define should_use_kmap(pfn) page_is_ram(pfn)
332 #define should_use_kmap(pfn) 0
337 unsigned long pfn; local
339 pfn = pg_off >> PAGE_SHIFT;
340 if (should_use_kmap(pfn)) {
343 return (void __iomem __force *)kmap(pfn_to_page(pfn));
350 unsigned long pfn; local
352 pfn = pg_off >> PAGE_SHIFT;
353 if (should_use_kmap(pfn))
[all...]
/drivers/gpu/drm/radeon/
H A Dradeon_gart.c522 unsigned pfn)
529 addr += pfn * RADEON_GPU_PAGE_SIZE;
535 addr += pfn * RADEON_GPU_PAGE_SIZE;
540 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
556 uint64_t addr = 0, pfn; local
584 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
590 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
520 radeon_vm_get_addr(struct radeon_device *rdev, struct ttm_mem_reg *mem, unsigned pfn) argument
/drivers/lguest/
H A Dlguest_device.c240 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
282 (unsigned long)lvq->config.pfn << PAGE_SHIFT);
284 lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT,

Completed in 664 milliseconds

123