1/* 2 * linux/arch/frv/mm/fault.c 3 * 4 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. 5 * - Written by David Howells (dhowells@redhat.com) 6 * - Derived from arch/m68knommu/mm/fault.c 7 * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, 8 * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com) 9 * 10 * Based on: 11 * 12 * linux/arch/m68k/mm/fault.c 13 * 14 * Copyright (C) 1995 Hamish Macdonald 15 */ 16 17#include <linux/mman.h> 18#include <linux/mm.h> 19#include <linux/kernel.h> 20#include <linux/ptrace.h> 21#include <linux/hardirq.h> 22 23#include <asm/pgtable.h> 24#include <asm/uaccess.h> 25#include <asm/gdb-stub.h> 26 27/*****************************************************************************/ 28/* 29 * This routine handles page faults. It determines the problem, and 30 * then passes it off to one of the appropriate routines. 31 */ 32asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0) 33{ 34 struct vm_area_struct *vma; 35 struct mm_struct *mm; 36 unsigned long _pme, lrai, lrad, fixup; 37 unsigned long flags = 0; 38 siginfo_t info; 39 pgd_t *pge; 40 pud_t *pue; 41 pte_t *pte; 42 int fault; 43 44#if 0 45 const char *atxc[16] = { 46 [0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat", 47 [0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot", 48 }; 49 50 printk("do_page_fault(%d,%lx [%s],%lx)\n", 51 datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0); 52#endif 53 54 mm = current->mm; 55 56 /* 57 * We fault-in kernel-space virtual memory on-demand. The 58 * 'reference' page table is init_mm.pgd. 59 * 60 * NOTE! We MUST NOT take any locks for this case. We may 61 * be in an interrupt or a critical region, and should 62 * only copy the information from the master page table, 63 * nothing more. 64 * 65 * This verifies that the fault happens in kernel space 66 * and that the fault was a page not present (invalid) error 67 */ 68 if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) { 69 if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END) 70 goto kernel_pte_fault; 71 if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END) 72 goto kernel_pte_fault; 73 } 74 75 info.si_code = SEGV_MAPERR; 76 77 /* 78 * If we're in an interrupt or have no user 79 * context, we must not take the fault.. 80 */ 81 if (in_atomic() || !mm) 82 goto no_context; 83 84 if (user_mode(__frame)) 85 flags |= FAULT_FLAG_USER; 86 87 down_read(&mm->mmap_sem); 88 89 vma = find_vma(mm, ear0); 90 if (!vma) 91 goto bad_area; 92 if (vma->vm_start <= ear0) 93 goto good_area; 94 if (!(vma->vm_flags & VM_GROWSDOWN)) 95 goto bad_area; 96 97 if (user_mode(__frame)) { 98 /* 99 * accessing the stack below %esp is always a bug. 100 * The "+ 32" is there due to some instructions (like 101 * pusha) doing post-decrement on the stack and that 102 * doesn't show up until later.. 103 */ 104 if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) { 105#if 0 106 printk("[%d] ### Access below stack @%lx (sp=%lx)\n", 107 current->pid, ear0, __frame->sp); 108 show_registers(__frame); 109 printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n", 110 current->pid, 111 __frame->pc, 112 ((u8*)__frame->pc)[0], 113 ((u8*)__frame->pc)[1], 114 ((u8*)__frame->pc)[2], 115 ((u8*)__frame->pc)[3], 116 ((u8*)__frame->pc)[4], 117 ((u8*)__frame->pc)[5], 118 ((u8*)__frame->pc)[6], 119 ((u8*)__frame->pc)[7] 120 ); 121#endif 122 goto bad_area; 123 } 124 } 125 126 if (expand_stack(vma, ear0)) 127 goto bad_area; 128 129/* 130 * Ok, we have a good vm_area for this memory access, so 131 * we can handle it.. 132 */ 133 good_area: 134 info.si_code = SEGV_ACCERR; 135 switch (esr0 & ESR0_ATXC) { 136 default: 137 /* handle write to write protected page */ 138 case ESR0_ATXC_WP_EXCEP: 139#ifdef TEST_VERIFY_AREA 140 if (!(user_mode(__frame))) 141 printk("WP fault at %08lx\n", __frame->pc); 142#endif 143 if (!(vma->vm_flags & VM_WRITE)) 144 goto bad_area; 145 flags |= FAULT_FLAG_WRITE; 146 break; 147 148 /* handle read from protected page */ 149 case ESR0_ATXC_PRIV_EXCEP: 150 goto bad_area; 151 152 /* handle read, write or exec on absent page 153 * - can't support write without permitting read 154 * - don't support execute without permitting read and vice-versa 155 */ 156 case ESR0_ATXC_AMRTLB_MISS: 157 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 158 goto bad_area; 159 break; 160 } 161 162 /* 163 * If for any reason at all we couldn't handle the fault, 164 * make sure we exit gracefully rather than endlessly redo 165 * the fault. 166 */ 167 fault = handle_mm_fault(mm, vma, ear0, flags); 168 if (unlikely(fault & VM_FAULT_ERROR)) { 169 if (fault & VM_FAULT_OOM) 170 goto out_of_memory; 171 else if (fault & VM_FAULT_SIGBUS) 172 goto do_sigbus; 173 BUG(); 174 } 175 if (fault & VM_FAULT_MAJOR) 176 current->maj_flt++; 177 else 178 current->min_flt++; 179 180 up_read(&mm->mmap_sem); 181 return; 182 183/* 184 * Something tried to access memory that isn't in our memory map.. 185 * Fix it, but check if it's kernel or user first.. 186 */ 187 bad_area: 188 up_read(&mm->mmap_sem); 189 190 /* User mode accesses just cause a SIGSEGV */ 191 if (user_mode(__frame)) { 192 info.si_signo = SIGSEGV; 193 info.si_errno = 0; 194 /* info.si_code has been set above */ 195 info.si_addr = (void *) ear0; 196 force_sig_info(SIGSEGV, &info, current); 197 return; 198 } 199 200 no_context: 201 /* are we prepared to handle this kernel fault? */ 202 if ((fixup = search_exception_table(__frame->pc)) != 0) { 203 __frame->pc = fixup; 204 return; 205 } 206 207/* 208 * Oops. The kernel tried to access some bad page. We'll have to 209 * terminate things with extreme prejudice. 210 */ 211 212 bust_spinlocks(1); 213 214 if (ear0 < PAGE_SIZE) 215 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 216 else 217 printk(KERN_ALERT "Unable to handle kernel paging request"); 218 printk(" at virtual addr %08lx\n", ear0); 219 printk(" PC : %08lx\n", __frame->pc); 220 printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0); 221 222 asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0)); 223 asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0)); 224 225 printk(KERN_ALERT " LRAI: %08lx\n", lrai); 226 printk(KERN_ALERT " LRAD: %08lx\n", lrad); 227 228 __break_hijack_kernel_event(); 229 230 pge = pgd_offset(current->mm, ear0); 231 pue = pud_offset(pge, ear0); 232 _pme = pue->pue[0].ste[0]; 233 234 printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme); 235 236 if (_pme & xAMPRx_V) { 237 unsigned long dampr, damlr, val; 238 239 asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1" 240 : "=&r"(dampr), "=r"(damlr) 241 : "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V) 242 ); 243 244 pte = (pte_t *) damlr + __pte_index(ear0); 245 val = pte_val(*pte); 246 247 asm volatile("movgs %0,dampr2" :: "r" (dampr)); 248 249 printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val); 250 } 251 252 die_if_kernel("Oops\n"); 253 do_exit(SIGKILL); 254 255/* 256 * We ran out of memory, or some other thing happened to us that made 257 * us unable to handle the page fault gracefully. 258 */ 259 out_of_memory: 260 up_read(&mm->mmap_sem); 261 if (!user_mode(__frame)) 262 goto no_context; 263 pagefault_out_of_memory(); 264 return; 265 266 do_sigbus: 267 up_read(&mm->mmap_sem); 268 269 /* 270 * Send a sigbus, regardless of whether we were in kernel 271 * or user mode. 272 */ 273 info.si_signo = SIGBUS; 274 info.si_errno = 0; 275 info.si_code = BUS_ADRERR; 276 info.si_addr = (void *) ear0; 277 force_sig_info(SIGBUS, &info, current); 278 279 /* Kernel mode? Handle exceptions or die */ 280 if (!user_mode(__frame)) 281 goto no_context; 282 return; 283 284/* 285 * The fault was caused by a kernel PTE (such as installed by vmalloc or kmap) 286 */ 287 kernel_pte_fault: 288 { 289 /* 290 * Synchronize this task's top level page-table 291 * with the 'reference' page table. 292 * 293 * Do _not_ use "tsk" here. We might be inside 294 * an interrupt in the middle of a task switch.. 295 */ 296 int index = pgd_index(ear0); 297 pgd_t *pgd, *pgd_k; 298 pud_t *pud, *pud_k; 299 pmd_t *pmd, *pmd_k; 300 pte_t *pte_k; 301 302 pgd = (pgd_t *) __get_TTBR(); 303 pgd = (pgd_t *)__va(pgd) + index; 304 pgd_k = ((pgd_t *)(init_mm.pgd)) + index; 305 306 if (!pgd_present(*pgd_k)) 307 goto no_context; 308 //set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line 309 310 pud_k = pud_offset(pgd_k, ear0); 311 if (!pud_present(*pud_k)) 312 goto no_context; 313 314 pmd_k = pmd_offset(pud_k, ear0); 315 if (!pmd_present(*pmd_k)) 316 goto no_context; 317 318 pud = pud_offset(pgd, ear0); 319 pmd = pmd_offset(pud, ear0); 320 set_pmd(pmd, *pmd_k); 321 322 pte_k = pte_offset_kernel(pmd_k, ear0); 323 if (!pte_present(*pte_k)) 324 goto no_context; 325 return; 326 } 327} /* end do_page_fault() */ 328