Searched defs:address (Results 226 - 250 of 329) sorted by relevance

1234567891011>>

/arch/powerpc/kernel/
H A Dbtext.c75 * The display is mapped to virtual address 0xD0000000, rather
117 * call before the logical address becomes unusable
120 unsigned long address)
126 logicalDisplayBase = (unsigned char *)address;
127 dispDeviceBase = (unsigned char *)address;
175 unsigned long address = 0; local
206 prop = of_get_property(np, "address", NULL);
208 address = *prop;
213 if (address == 0)
220 dispDeviceBase = (unsigned char *)address;
119 btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) argument
[all...]
/arch/powerpc/mm/
H A Dmem.c206 * Add 1 additional page in case the address isn't page-aligned.
506 * to establish the GOT address. Until recently the GOT was
537 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, argument
548 if (!pte_young(*ptep) || address >= TASK_SIZE)
565 hash_preload(vma->vm_mm, address, access, trap);
570 book3e_hugetlb_preload(vma, address, *ptep);
605 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
H A Dpgtable_32.c99 __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) argument
115 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) argument
192 * Choose an address to map it to.
201 * If the address lies within the first 16 MB, assume it's in ISA
231 * same virt address (and this is contiguous).
356 * a virtual address in a context.
395 unsigned long address; local
398 address = (unsigned long)page_address(page);
400 if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address))
446 unsigned long address = __fix_to_virt(idx); local
[all...]
H A Dslice.c2 * address space "slices" (meta-segments) support
587 * an address space from real 64K pages to combo 4K pages (typically
648 void slice_set_psize(struct mm_struct *mm, unsigned long address, argument
656 if (address < SLICE_LOW_TOP) {
657 i = GET_LOW_SLICE_INDEX(address);
663 i = GET_HIGH_SLICE_INDEX(address);
H A Dtlb_nohash.c390 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) argument
395 unsigned long start = address & PMD_MASK;
396 unsigned long end = address + PMD_SIZE;
409 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
410 unsigned long vpte = address & ~rmask;
/arch/powerpc/platforms/cell/
H A Dspu_manage.c116 unsigned long address; member in struct:address_prop
125 return ioremap(prop->address, prop->len);
/arch/powerpc/sysdev/
H A Dfsl_msi.c152 u64 address; /* Physical address of the MSIIR */ local
156 /* If the msi-address-64 property exists, then use it */
157 reg = of_get_property(hose->dn, "msi-address-64", &len);
159 address = be64_to_cpup(reg);
161 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
163 msg->address_lo = lower_32_bits(address);
164 msg->address_hi = upper_32_bits(address);
/arch/s390/include/asm/
H A Deadm.h83 u64 address; member in struct:scm_device
H A Dptrace.h90 unsigned long start; /* PER starting address */
91 unsigned long end; /* PER ending address */
99 unsigned long address; /* PER address */ member in struct:per_event
108 unsigned long cr10; /* PER starting address */
109 unsigned long cr11; /* PER ending address */
111 unsigned long starting_addr; /* User specified start address */
112 unsigned long ending_addr; /* User specified end address */
114 unsigned long address; /* PER trap instruction address */ member in struct:per_struct_kernel
[all...]
/arch/s390/mm/
H A Dfault.c106 * Returns the address space associated with the fault.
137 static void dump_pagetable(unsigned long asce, unsigned long address) argument
144 table = table + ((address >> 53) & 0x7ff);
153 table = table + ((address >> 42) & 0x7ff);
162 table = table + ((address >> 31) & 0x7ff);
171 table = table + ((address >> 20) & 0x7ff);
179 table = table + ((address >> 12) & 0xff);
192 static void dump_pagetable(unsigned long asce, unsigned long address) argument
197 table = table + ((address >> 20) & 0x7ff);
204 table = table + ((address >> 1
292 unsigned long address; local
409 unsigned long address; local
[all...]
/arch/sh/kernel/
H A Dtraps_64.c87 __u64 *address)
119 address is always accessible (and if not, just fault when the
129 *address = addr;
137 static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) argument
141 p = (unsigned char *) (int) address;
153 static void misaligned_kernel_word_store(__u64 address, __u64 value) argument
157 p = (unsigned char *) (int) address;
174 __u64 address; local
177 displacement_not_indexed, width_shift, &address);
185 if (!access_ok(VERIFY_READ, (unsigned long) address,
83 generate_and_check_address(struct pt_regs *regs, insn_size_t opcode, int displacement_not_indexed, int width_shift, __u64 *address) argument
248 __u64 address; local
320 __u64 address; local
392 __u64 address; local
[all...]
/arch/sparc/kernel/
H A Dsignal32.c327 * Return an always-bogus address instead so we will die with SIGSEGV.
350 * side linear mapping of the physical address backing the user
353 static void flush_signal_insns(unsigned long address) argument
374 pgdp = pgd_offset(current->mm, address);
377 pudp = pud_offset(pgdp, address);
380 pmdp = pmd_offset(pudp, address);
384 ptep = pte_offset_map(pmdp, address);
394 "r" (address & (PAGE_SIZE - 1))
522 unsigned long address = ((unsigned long)&(sf->insns[0])); local
530 flush_signal_insns(address);
653 unsigned long address = ((unsigned long)&(sf->insns[0])); local
[all...]
H A Dunaligned_64.c266 unsigned long address; local
268 address = compute_effective_address(regs, insn,
270 if (address < PAGE_SIZE) {
276 printk(KERN_ALERT " at virtual address %016lx\n",address);
/arch/tile/include/uapi/arch/
H A Dsim.h312 unsigned long address,
318 address, size, access_mask, user_data);
324 unsigned long address,
330 address, size, access_mask, user_data);
346 * The address of the watchpoint that fired (this is the address
347 * passed to sim_add_watchpoint, not an address within that range
350 unsigned long address; member in struct:SimQueryWatchpointStatus
365 "=R01" (status.address),
311 sim_add_watchpoint(unsigned int process_id, unsigned long address, unsigned long size, unsigned int access_mask, unsigned long user_data) argument
323 sim_remove_watchpoint(unsigned int process_id, unsigned long address, unsigned long size, unsigned int access_mask, unsigned long user_data) argument
/arch/tile/kernel/
H A Dstack.c42 /* Is address on the specified kernel stack? */
52 static bool read_memory_func(void *result, unsigned long address, argument
58 if (address == 0)
60 if (__kernel_text_address(address)) {
62 } else if (address >= PAGE_OFFSET) {
64 if (!in_kernel_stack(kbt, address))
67 return 0; /* can't read from other user address spaces */
71 (void __user __force *)address,
225 * so we will allow reads of that address range.
284 unsigned long address,
283 describe_addr(struct KBacktraceIterator *kbt, unsigned long address, int have_mmap_sem, char *buf, size_t bufsize) argument
400 unsigned long address = kbt->it.pc; local
[all...]
/arch/tile/mm/
H A Dpgtable.c87 * shatter_huge_page() - ensure a given address is mapped by a small page.
233 struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, argument
281 unsigned long address, int order)
333 * address in the given page table. A NULL page table just uses
510 /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
566 ensures there isn't another iounmap for the same address
567 in parallel. Reuse of the virtual address is prevented by
573 pr_err("iounmap: bad address %p\n", addr);
280 __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, unsigned long address, int order) argument
/arch/um/kernel/
H A Dtlb.c383 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) argument
394 address &= PAGE_MASK;
395 pgd = pgd_offset(mm, address);
399 pud = pud_offset(pgd, address);
403 pmd = pmd_offset(pud, address);
407 pte = pte_offset_kernel(pmd, address);
428 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
431 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
434 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
444 printk(KERN_ERR "Failed to flush page for address
448 pgd_offset_proc(struct mm_struct *mm, unsigned long address) argument
453 pud_offset_proc(pgd_t *pgd, unsigned long address) argument
458 pmd_offset_proc(pud_t *pud, unsigned long address) argument
463 pte_offset_proc(pmd_t *pmd, unsigned long address) argument
[all...]
/arch/x86/include/asm/
H A Dpci_x86.h132 u64 address; member in struct:pci_mmcfg_region
/arch/x86/kernel/
H A Dmpparse.c175 pr_err("MPTABLE: null local APIC address!\n");
403 * local APIC has default address
538 * local APIC has default address
607 unsigned int address; local
638 address = get_bios_ebda();
639 if (address)
640 smp_scan_config(address, 0x400);
/arch/x86/pci/
H A Dpcbios.c63 unsigned long entry; /* 32 bit physical address */
73 * Physical address of the service directory. I don't know if we're
80 unsigned long address; member in struct:__anon3250
91 unsigned long address; /* %ebx */ local
99 "=b" (address),
109 return address + entry;
121 unsigned long address; member in struct:__anon3251
134 pci_indirect.address = pcibios_entry + PAGE_OFFSET;
323 * directory by scanning the permissible address range from
358 bios32_indirect.address
[all...]
/arch/alpha/include/asm/
H A Dio.h63 static inline unsigned long virt_to_phys(void *address) argument
65 return (unsigned long)address - IDENT_ADDR;
68 static inline void * phys_to_virt(unsigned long address) argument
70 return (void *) (address + IDENT_ADDR);
73 static inline unsigned long virt_to_phys(void *address) argument
75 unsigned long phys = (unsigned long)address;
81 /* Crop to the physical address width of the processor. */
87 static inline void * phys_to_virt(unsigned long address) argument
89 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
100 /* Maximum PIO space address supporte
114 virt_to_bus(void *address) argument
122 bus_to_virt(unsigned long address) argument
[all...]
/arch/arc/mm/
H A Dtlb.c344 * Delete TLB entry in MMU for a given page (??? address)
442 void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
478 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
480 address &= PAGE_MASK;
490 pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
682 void do_tlb_overlap_fault(unsigned long cause, unsigned long address, argument
/arch/arm/mach-shmobile/
H A Dpm-sh7372.c194 static void sh7372_set_reset_vector(unsigned long address) argument
197 __raw_writel(address, SBAR);
356 /* pass physical address of cpu_resume() to assembly resume code */
/arch/arm64/kernel/
H A Darmv8_deprecated.c274 * Rn = address
335 static int emulate_swpX(unsigned int address, unsigned int *data, argument
340 if ((type != TYPE_SWPB) && (address & 0x3)) {
341 /* SWP to unaligned address not permitted */
350 __user_swpb_asm(*data, address, res, temp);
352 __user_swp_asm(*data, address, res, temp);
370 u32 destreg, data, type, address = 0; local
393 address = (u32)regs->user_regs.regs[rn];
398 rn, address, destreg,
402 if (!access_ok(VERIFY_WRITE, (address
[all...]
/arch/ia64/mm/
H A Dinit.c99 * This performs some platform-dependent address space initialization.
133 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
212 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) argument
223 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
226 pud = pud_alloc(&init_mm, pgd, address);
229 pmd = pmd_alloc(&init_mm, pud, address);
232 pte = pte_alloc_kernel(pmd, address);
325 * address space. The IA-64 architecture guarantees that at least 50 bits of
326 * virtual address space are implemented but if we pick a large enough page size
327 * (e.g., 64KB), the mapped address spac
439 unsigned long address, start_page, end_page; local
[all...]

Completed in 417 milliseconds

1234567891011>>