Searched defs:address (Results 151 - 175 of 329) sorted by relevance

1234567891011>>

/arch/sparc/mm/
H A Dhugetlbpage.c137 /* We must align the address, because our caller will run
219 unsigned long address, int write)
234 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, argument
218 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) argument
H A Dtlb.c196 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, argument
203 set_pmd_at(vma->vm_mm, address, pmdp, entry);
204 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/arch/tile/include/asm/
H A Dpgalloc.h71 extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
76 unsigned long address)
78 return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
89 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) argument
91 return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address)));
101 unsigned long address, int order);
103 unsigned long address)
105 __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
144 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) argument
146 struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDE
75 pte_alloc_one(struct mm_struct *mm, unsigned long address) argument
102 __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, unsigned long address) argument
155 __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long address) argument
[all...]
/arch/um/kernel/
H A Dtrap.c23 int handle_page_fault(unsigned long address, unsigned long ip, argument
48 vma = find_vma(mm, address);
51 else if (vma->vm_start <= address)
55 else if (is_user && !ARCH_IS_STACKGROW(address))
57 else if (expand_stack(vma, address))
75 fault = handle_mm_fault(mm, vma, address, flags);
102 pgd = pgd_offset(mm, address);
103 pud = pud_offset(pgd, address);
104 pmd = pmd_offset(pud, address);
105 pte = pte_offset_kernel(pmd, address);
207 unsigned long address = FAULT_ADDRESS(fi); local
[all...]
/arch/x86/include/asm/
H A Dpgalloc.h57 unsigned long address)
104 unsigned long address)
140 unsigned long address)
56 __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, unsigned long address) argument
103 __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) argument
139 __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) argument
/arch/x86/kernel/
H A Damd_nb.c140 u32 address; local
151 address = MSR_FAM10H_MMIO_CONF_BASE;
152 rdmsrl(address, msr);
H A Dmmconf-fam10h_64.c60 u32 address; local
97 address = MSR_K8_SYSCFG;
98 rdmsrl(address, val);
105 address = MSR_K8_TOP_MEM2;
106 rdmsrl(address, val);
172 u32 address; local
177 address = MSR_FAM10H_MMIO_CONF_BASE;
178 rdmsrl(address, val);
213 wrmsrl(address, val);
H A Dvsmp_64.c92 void __iomem *address; local
97 address = early_ioremap(cfg, 8);
98 cap = readl(address);
99 ctl = readl(address + 4);
127 writel(ctl, address + 4);
128 ctl = readl(address + 4);
131 early_iounmap(address, 8);
178 void __iomem *address; local
191 address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
192 if (WARN_ON(!address))
[all...]
/arch/x86/math-emu/
H A Dget_address.c4 | Get the effective address from an FPU instruction. |
122 static unsigned long vm86_segment(u_char segment, struct address *addr)
137 struct address *addr, long offset)
140 unsigned long base_address, limit, address, seg_top; local
163 address = base_address + offset;
178 (address <= limit) || (address >= seg_top) ? 0 :
179 ((seg_top - address) >= 255 ? 255 : seg_top - address);
182 (address > limi
214 int address = 0; /* Initialized just to stop compiler warnings. */ local
298 int address = 0; /* Default used for mod == 0 */ local
[all...]
/arch/x86/mm/kmemcheck/
H A Derror.c29 unsigned long address; member in struct:kmemcheck_error::__anon3243::__anon3244
112 (void *) e->address);
128 * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
157 unsigned long address, unsigned int size, struct pt_regs *regs)
177 e->address = address;
190 /* Round address down to nearest 16 bytes */
191 shadow_copy = kmemcheck_shadow_lookup(address
197 kmemcheck_show_addr(address);
198 memory_copy = (void *) (address
156 kmemcheck_error_save(enum kmemcheck_shadow state, unsigned long address, unsigned int size, struct pt_regs *regs) argument
[all...]
/arch/xtensa/include/asm/
H A Dtlbflush.h149 static inline void invalidate_itlb_mapping (unsigned address) argument
152 if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
156 static inline void invalidate_dtlb_mapping (unsigned address) argument
159 if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
174 * as[31..12] contain the virtual address
/arch/xtensa/mm/
H A Dcache.c199 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, argument
202 /* Note that we have to use the 'alias' address to avoid multi-hit */
205 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
/arch/alpha/boot/
H A Dbootpz.c57 * Find a physical address of a virtual object..
59 * This is easy using the virtual page table address.
64 find_pa(unsigned long address) argument
68 result = VPTB[address >> 13];
71 result |= address & 0x1fff;
110 * itself (through three levels) at virtual address 0x200802000.
136 * a1 = return address, but we give the asm the vaddr of the PCB
175 /* Calculate the kernel image address based on the end of the BOOTP
182 /* Round address to next higher page boundary. */
193 BOOT_ADDR Virtual address a
[all...]
/arch/arc/kernel/
H A Dkgdb.c81 unsigned long address[2]; member in struct:single_step_data_t
92 memcpy((void *) single_step_data.address[i],
96 flush_icache_range(single_step_data.address[i],
97 single_step_data.address[i] +
104 static void place_trap(unsigned long address, void *save) argument
106 memcpy(save, (void *) address, BREAK_INSTR_SIZE);
107 memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
109 flush_icache_range(address, address + BREAK_INSTR_SIZE);
117 &single_step_data.address[
[all...]
H A Dkprobes.c28 /* Attempt to probe at unaligned address */
270 * return to the same address and execute it
459 * real return address, and all the rest will point to
475 * This is the real return address. Any other
520 void trap_is_kprobe(unsigned long address, struct pt_regs *regs) argument
522 notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
H A Dtroubleshoot.c82 static void show_faulting_vma(unsigned long address, char *buf) argument
95 vma = find_vma(active_mm, address);
100 if (vma && (vma->vm_start <= address)) {
112 address : address - vma->vm_start,
123 unsigned long address; local
127 /* For Data fault, this is data address not instruction addr */
128 address = current->thread.fault_address;
138 address, regs->ret);
149 pr_cont("Misaligned r/w from 0x%08lx\n", address);
213 show_kernel_fault_diag(const char *str, struct pt_regs *regs, unsigned long address) argument
[all...]
/arch/arm/include/asm/
H A Dhw_breakpoint.h21 u32 address; member in struct:arch_hw_breakpoint
/arch/arm/mm/
H A Dflush.c168 * processes address space. Really, we want to allow our "user
353 * address for highmem pages, and
390 * userspace address only.
406 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, argument
410 VM_BUG_ON(address & ~PMD_MASK);
411 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
/arch/arm64/include/asm/
H A Dhw_breakpoint.h30 u64 address; member in struct:arch_hw_breakpoint
/arch/avr32/mm/
H A Dtlb.c63 static void update_dtlb(unsigned long address, pte_t pte) argument
74 tlbehi |= address & MMU_VPN_MASK;
104 unsigned long address, pte_t *ptep)
113 update_dtlb(address, *ptep);
103 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
/arch/cris/include/asm/
H A Dio.h27 static inline unsigned long virt_to_phys(volatile void * address) argument
29 return __pa(address);
32 static inline void * phys_to_virt(unsigned long address) argument
34 return __va(address);
50 * IO bus memory addresses are also 1:1 with the physical address
/arch/ia64/include/asm/
H A Dtlb.h22 * unmapping a portion of the virtual address space, these hooks are called according to
25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
30 * tlb_remove_tlb_entry(tlb, pte, address);
38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
101 * Tearing down the entire address space. This happens both as a result
123 /* flush the address range from the tlb: */
125 /* now flush the virt. page-table area mapping the address range: */
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
243 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
247 __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) argument
[all...]
/arch/m32r/mm/
H A Dfault.c53 * This routine handles page faults. It determines the address,
60 * address : M32R MMU MDEVA reg. (Operand ACE)
75 unsigned long address)
108 if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
128 * address space. Luckily the kernel only validly references user
134 * Attempt to lock the address space, if we cannot we then validate the
135 * source. If this is invalid we can skip the address space check,
145 vma = find_vma(mm, address);
148 if (vma->vm_start <= address)
160 if (address
74 do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) argument
[all...]
/arch/metag/include/asm/
H A Dcacheflush.h19 * normally accessed via the linear address range supplied. The region flushed
20 * must either lie in local or global address space determined by the top bit of
21 * the pStart address. If Bytes is >= 4K then the whole of the related cache
39 /* flush the entire user address space referenced in this mm structure */
117 /* Push n pages at kernel virtual address and clear the icache */
118 static inline void flush_icache_range(unsigned long address, argument
124 metag_code_cache_flush((void *) address, endaddr - address);
132 * fetched from this address, perhaps a previous sigtramp.
147 * Perform a single specific CACHEWD operation on an address, maskin
[all...]
/arch/metag/mm/
H A Dinit.c44 static void insert_gateway_page(pgd_t *pgd, unsigned long address) argument
52 pud = pud_offset(pgd, address);
55 pmd = pmd_offset(pud, address);
61 pte = pte_offset_kernel(pmd, address);
68 unsigned long address = USER_GATEWAY_PAGE; local
69 int offset = pgd_index(address);
75 insert_gateway_page(pgd, address);
82 * copy just the user address range and not the gateway page
86 insert_gateway_page(pgd, address);
91 gateway_page += (address
[all...]

Completed in 504 milliseconds

1234567891011>>