traps_32.c revision fa43972fab24a3c050e880a7831f9378c6cebc0b
1/* 2 * 'traps.c' handles hardware traps and faults after we have saved some 3 * state in 'entry.S'. 4 * 5 * SuperH version: Copyright (C) 1999 Niibe Yutaka 6 * Copyright (C) 2000 Philipp Rumpf 7 * Copyright (C) 2000 David Howells 8 * Copyright (C) 2002 - 2007 Paul Mundt 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14#include <linux/kernel.h> 15#include <linux/ptrace.h> 16#include <linux/init.h> 17#include <linux/spinlock.h> 18#include <linux/module.h> 19#include <linux/kallsyms.h> 20#include <linux/io.h> 21#include <linux/bug.h> 22#include <linux/debug_locks.h> 23#include <linux/kdebug.h> 24#include <linux/kexec.h> 25#include <linux/limits.h> 26#include <asm/system.h> 27#include <asm/uaccess.h> 28#include <asm/fpu.h> 29 30#ifdef CONFIG_SH_KGDB 31#include <asm/kgdb.h> 32#define CHK_REMOTE_DEBUG(regs) \ 33{ \ 34 if (kgdb_debug_hook && !user_mode(regs))\ 35 (*kgdb_debug_hook)(regs); \ 36} 37#else 38#define CHK_REMOTE_DEBUG(regs) 39#endif 40 41#ifdef CONFIG_CPU_SH2 42# define TRAP_RESERVED_INST 4 43# define TRAP_ILLEGAL_SLOT_INST 6 44# define TRAP_ADDRESS_ERROR 9 45# ifdef CONFIG_CPU_SH2A 46# define TRAP_FPU_ERROR 13 47# define TRAP_DIVZERO_ERROR 17 48# define TRAP_DIVOVF_ERROR 18 49# endif 50#else 51#define TRAP_RESERVED_INST 12 52#define TRAP_ILLEGAL_SLOT_INST 13 53#endif 54 55static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 56{ 57 unsigned long p; 58 int i; 59 60 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); 61 62 for (p = bottom & ~31; p < top; ) { 63 printk("%04lx: ", p & 0xffff); 64 65 for (i = 0; i < 8; i++, p += 4) { 66 unsigned int val; 67 68 if (p < bottom || p >= top) 69 printk(" "); 70 else { 71 if (__get_user(val, (unsigned int __user *)p)) { 72 printk("\n"); 73 return; 74 } 75 printk("%08x ", val); 76 } 77 } 78 printk("\n"); 79 } 80} 81 82static DEFINE_SPINLOCK(die_lock); 83 84void die(const char * str, struct pt_regs * regs, long err) 85{ 86 static int die_counter; 87 88 oops_enter(); 89 90 console_verbose(); 91 spin_lock_irq(&die_lock); 92 bust_spinlocks(1); 93 94 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 95 96 CHK_REMOTE_DEBUG(regs); 97 print_modules(); 98 show_regs(regs); 99 100 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, 101 task_pid_nr(current), task_stack_page(current) + 1); 102 103 if (!user_mode(regs) || in_interrupt()) 104 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + 105 (unsigned long)task_stack_page(current)); 106 107 bust_spinlocks(0); 108 add_taint(TAINT_DIE); 109 spin_unlock_irq(&die_lock); 110 111 if (kexec_should_crash(current)) 112 crash_kexec(regs); 113 114 if (in_interrupt()) 115 panic("Fatal exception in interrupt"); 116 117 if (panic_on_oops) 118 panic("Fatal exception"); 119 120 oops_exit(); 121 do_exit(SIGSEGV); 122} 123 124static inline void die_if_kernel(const char *str, struct pt_regs *regs, 125 long err) 126{ 127 if (!user_mode(regs)) 128 die(str, regs, err); 129} 130 131/* 132 * try and fix up kernelspace address errors 133 * - userspace errors just cause EFAULT to be returned, resulting in SEGV 134 * - kernel/userspace interfaces cause a jump to an appropriate handler 135 * - other kernel errors are bad 136 * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault 137 */ 138static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) 139{ 140 if (!user_mode(regs)) { 141 const struct exception_table_entry *fixup; 142 fixup = search_exception_tables(regs->pc); 143 if (fixup) { 144 regs->pc = fixup->fixup; 145 return 0; 146 } 147 die(str, regs, err); 148 } 149 return -EFAULT; 150} 151 152static inline void sign_extend(unsigned int count, unsigned char *dst) 153{ 154#ifdef __LITTLE_ENDIAN__ 155 if ((count == 1) && dst[0] & 0x80) { 156 dst[1] = 0xff; 157 dst[2] = 0xff; 158 dst[3] = 0xff; 159 } 160 if ((count == 2) && dst[1] & 0x80) { 161 dst[2] = 0xff; 162 dst[3] = 0xff; 163 } 164#else 165 if ((count == 1) && dst[3] & 0x80) { 166 dst[2] = 0xff; 167 dst[1] = 0xff; 168 dst[0] = 0xff; 169 } 170 if ((count == 2) && dst[2] & 0x80) { 171 dst[1] = 0xff; 172 dst[0] = 0xff; 173 } 174#endif 175} 176 177static struct mem_access user_mem_access = { 178 copy_from_user, 179 copy_to_user, 180}; 181 182/* 183 * handle an instruction that does an unaligned memory access by emulating the 184 * desired behaviour 185 * - note that PC _may not_ point to the faulting instruction 186 * (if that instruction is in a branch delay slot) 187 * - return 0 if emulation okay, -EFAULT on existential error 188 */ 189static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, 190 struct mem_access *ma) 191{ 192 int ret, index, count; 193 unsigned long *rm, *rn; 194 unsigned char *src, *dst; 195 unsigned char __user *srcu, *dstu; 196 197 index = (instruction>>8)&15; /* 0x0F00 */ 198 rn = ®s->regs[index]; 199 200 index = (instruction>>4)&15; /* 0x00F0 */ 201 rm = ®s->regs[index]; 202 203 count = 1<<(instruction&3); 204 205 ret = -EFAULT; 206 switch (instruction>>12) { 207 case 0: /* mov.[bwl] to/from memory via r0+rn */ 208 if (instruction & 8) { 209 /* from memory */ 210 srcu = (unsigned char __user *)*rm; 211 srcu += regs->regs[0]; 212 dst = (unsigned char *)rn; 213 *(unsigned long *)dst = 0; 214 215#if !defined(__LITTLE_ENDIAN__) 216 dst += 4-count; 217#endif 218 if (ma->from(dst, srcu, count)) 219 goto fetch_fault; 220 221 sign_extend(count, dst); 222 } else { 223 /* to memory */ 224 src = (unsigned char *)rm; 225#if !defined(__LITTLE_ENDIAN__) 226 src += 4-count; 227#endif 228 dstu = (unsigned char __user *)*rn; 229 dstu += regs->regs[0]; 230 231 if (ma->to(dstu, src, count)) 232 goto fetch_fault; 233 } 234 ret = 0; 235 break; 236 237 case 1: /* mov.l Rm,@(disp,Rn) */ 238 src = (unsigned char*) rm; 239 dstu = (unsigned char __user *)*rn; 240 dstu += (instruction&0x000F)<<2; 241 242 if (ma->to(dstu, src, 4)) 243 goto fetch_fault; 244 ret = 0; 245 break; 246 247 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ 248 if (instruction & 4) 249 *rn -= count; 250 src = (unsigned char*) rm; 251 dstu = (unsigned char __user *)*rn; 252#if !defined(__LITTLE_ENDIAN__) 253 src += 4-count; 254#endif 255 if (ma->to(dstu, src, count)) 256 goto fetch_fault; 257 ret = 0; 258 break; 259 260 case 5: /* mov.l @(disp,Rm),Rn */ 261 srcu = (unsigned char __user *)*rm; 262 srcu += (instruction & 0x000F) << 2; 263 dst = (unsigned char *)rn; 264 *(unsigned long *)dst = 0; 265 266 if (ma->from(dst, srcu, 4)) 267 goto fetch_fault; 268 ret = 0; 269 break; 270 271 case 6: /* mov.[bwl] from memory, possibly with post-increment */ 272 srcu = (unsigned char __user *)*rm; 273 if (instruction & 4) 274 *rm += count; 275 dst = (unsigned char*) rn; 276 *(unsigned long*)dst = 0; 277 278#if !defined(__LITTLE_ENDIAN__) 279 dst += 4-count; 280#endif 281 if (ma->from(dst, srcu, count)) 282 goto fetch_fault; 283 sign_extend(count, dst); 284 ret = 0; 285 break; 286 287 case 8: 288 switch ((instruction&0xFF00)>>8) { 289 case 0x81: /* mov.w R0,@(disp,Rn) */ 290 src = (unsigned char *) ®s->regs[0]; 291#if !defined(__LITTLE_ENDIAN__) 292 src += 2; 293#endif 294 dstu = (unsigned char __user *)*rm; /* called Rn in the spec */ 295 dstu += (instruction & 0x000F) << 1; 296 297 if (ma->to(dstu, src, 2)) 298 goto fetch_fault; 299 ret = 0; 300 break; 301 302 case 0x85: /* mov.w @(disp,Rm),R0 */ 303 srcu = (unsigned char __user *)*rm; 304 srcu += (instruction & 0x000F) << 1; 305 dst = (unsigned char *) ®s->regs[0]; 306 *(unsigned long *)dst = 0; 307 308#if !defined(__LITTLE_ENDIAN__) 309 dst += 2; 310#endif 311 if (ma->from(dst, srcu, 2)) 312 goto fetch_fault; 313 sign_extend(2, dst); 314 ret = 0; 315 break; 316 } 317 break; 318 } 319 return ret; 320 321 fetch_fault: 322 /* Argh. Address not only misaligned but also non-existent. 323 * Raise an EFAULT and see if it's trapped 324 */ 325 return die_if_no_fixup("Fault in unaligned fixup", regs, 0); 326} 327 328/* 329 * emulate the instruction in the delay slot 330 * - fetches the instruction from PC+2 331 */ 332static inline int handle_delayslot(struct pt_regs *regs, 333 opcode_t old_instruction, 334 struct mem_access *ma) 335{ 336 opcode_t instruction; 337 void __user *addr = (void __user *)(regs->pc + 338 instruction_size(old_instruction)); 339 340 if (copy_from_user(&instruction, addr, sizeof(instruction))) { 341 /* the instruction-fetch faulted */ 342 if (user_mode(regs)) 343 return -EFAULT; 344 345 /* kernel */ 346 die("delay-slot-insn faulting in handle_unaligned_delayslot", 347 regs, 0); 348 } 349 350 return handle_unaligned_ins(instruction, regs, ma); 351} 352 353/* 354 * handle an instruction that does an unaligned memory access 355 * - have to be careful of branch delay-slot instructions that fault 356 * SH3: 357 * - if the branch would be taken PC points to the branch 358 * - if the branch would not be taken, PC points to delay-slot 359 * SH4: 360 * - PC always points to delayed branch 361 * - return 0 if handled, -EFAULT if failed (may not return if in kernel) 362 */ 363 364/* Macros to determine offset from current PC for branch instructions */ 365/* Explicit type coercion is used to force sign extension where needed */ 366#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) 367#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) 368 369/* 370 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit 371 * opcodes.. 372 */ 373 374static int handle_unaligned_notify_count = 10; 375 376int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, 377 struct mem_access *ma) 378{ 379 u_int rm; 380 int ret, index; 381 382 index = (instruction>>8)&15; /* 0x0F00 */ 383 rm = regs->regs[index]; 384 385 /* shout about the first ten userspace fixups */ 386 if (user_mode(regs) && handle_unaligned_notify_count>0) { 387 handle_unaligned_notify_count--; 388 389 printk(KERN_NOTICE "Fixing up unaligned userspace access " 390 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 391 current->comm, task_pid_nr(current), 392 (void *)regs->pc, instruction); 393 } 394 395 ret = -EFAULT; 396 switch (instruction&0xF000) { 397 case 0x0000: 398 if (instruction==0x000B) { 399 /* rts */ 400 ret = handle_delayslot(regs, instruction, ma); 401 if (ret==0) 402 regs->pc = regs->pr; 403 } 404 else if ((instruction&0x00FF)==0x0023) { 405 /* braf @Rm */ 406 ret = handle_delayslot(regs, instruction, ma); 407 if (ret==0) 408 regs->pc += rm + 4; 409 } 410 else if ((instruction&0x00FF)==0x0003) { 411 /* bsrf @Rm */ 412 ret = handle_delayslot(regs, instruction, ma); 413 if (ret==0) { 414 regs->pr = regs->pc + 4; 415 regs->pc += rm + 4; 416 } 417 } 418 else { 419 /* mov.[bwl] to/from memory via r0+rn */ 420 goto simple; 421 } 422 break; 423 424 case 0x1000: /* mov.l Rm,@(disp,Rn) */ 425 goto simple; 426 427 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ 428 goto simple; 429 430 case 0x4000: 431 if ((instruction&0x00FF)==0x002B) { 432 /* jmp @Rm */ 433 ret = handle_delayslot(regs, instruction, ma); 434 if (ret==0) 435 regs->pc = rm; 436 } 437 else if ((instruction&0x00FF)==0x000B) { 438 /* jsr @Rm */ 439 ret = handle_delayslot(regs, instruction, ma); 440 if (ret==0) { 441 regs->pr = regs->pc + 4; 442 regs->pc = rm; 443 } 444 } 445 else { 446 /* mov.[bwl] to/from memory via r0+rn */ 447 goto simple; 448 } 449 break; 450 451 case 0x5000: /* mov.l @(disp,Rm),Rn */ 452 goto simple; 453 454 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ 455 goto simple; 456 457 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ 458 switch (instruction&0x0F00) { 459 case 0x0100: /* mov.w R0,@(disp,Rm) */ 460 goto simple; 461 case 0x0500: /* mov.w @(disp,Rm),R0 */ 462 goto simple; 463 case 0x0B00: /* bf lab - no delayslot*/ 464 break; 465 case 0x0F00: /* bf/s lab */ 466 ret = handle_delayslot(regs, instruction, ma); 467 if (ret==0) { 468#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 469 if ((regs->sr & 0x00000001) != 0) 470 regs->pc += 4; /* next after slot */ 471 else 472#endif 473 regs->pc += SH_PC_8BIT_OFFSET(instruction); 474 } 475 break; 476 case 0x0900: /* bt lab - no delayslot */ 477 break; 478 case 0x0D00: /* bt/s lab */ 479 ret = handle_delayslot(regs, instruction, ma); 480 if (ret==0) { 481#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 482 if ((regs->sr & 0x00000001) == 0) 483 regs->pc += 4; /* next after slot */ 484 else 485#endif 486 regs->pc += SH_PC_8BIT_OFFSET(instruction); 487 } 488 break; 489 } 490 break; 491 492 case 0xA000: /* bra label */ 493 ret = handle_delayslot(regs, instruction, ma); 494 if (ret==0) 495 regs->pc += SH_PC_12BIT_OFFSET(instruction); 496 break; 497 498 case 0xB000: /* bsr label */ 499 ret = handle_delayslot(regs, instruction, ma); 500 if (ret==0) { 501 regs->pr = regs->pc + 4; 502 regs->pc += SH_PC_12BIT_OFFSET(instruction); 503 } 504 break; 505 } 506 return ret; 507 508 /* handle non-delay-slot instruction */ 509 simple: 510 ret = handle_unaligned_ins(instruction, regs, ma); 511 if (ret==0) 512 regs->pc += instruction_size(instruction); 513 return ret; 514} 515 516#ifdef CONFIG_CPU_HAS_SR_RB 517#define lookup_exception_vector(x) \ 518 __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) 519#else 520#define lookup_exception_vector(x) \ 521 __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) 522#endif 523 524/* 525 * Handle various address error exceptions: 526 * - instruction address error: 527 * misaligned PC 528 * PC >= 0x80000000 in user mode 529 * - data address error (read and write) 530 * misaligned data access 531 * access to >= 0x80000000 is user mode 532 * Unfortuntaly we can't distinguish between instruction address error 533 * and data address errors caused by read accesses. 534 */ 535asmlinkage void do_address_error(struct pt_regs *regs, 536 unsigned long writeaccess, 537 unsigned long address) 538{ 539 unsigned long error_code = 0; 540 mm_segment_t oldfs; 541 siginfo_t info; 542 opcode_t instruction; 543 int tmp; 544 545 /* Intentional ifdef */ 546#ifdef CONFIG_CPU_HAS_SR_RB 547 lookup_exception_vector(error_code); 548#endif 549 550 oldfs = get_fs(); 551 552 if (user_mode(regs)) { 553 int si_code = BUS_ADRERR; 554 555 local_irq_enable(); 556 557 /* bad PC is not something we can fix */ 558 if (regs->pc & 1) { 559 si_code = BUS_ADRALN; 560 goto uspace_segv; 561 } 562 563 set_fs(USER_DS); 564 if (copy_from_user(&instruction, (void __user *)(regs->pc), 565 sizeof(instruction))) { 566 /* Argh. Fault on the instruction itself. 567 This should never happen non-SMP 568 */ 569 set_fs(oldfs); 570 goto uspace_segv; 571 } 572 573 tmp = handle_unaligned_access(instruction, regs, 574 &user_mem_access); 575 set_fs(oldfs); 576 577 if (tmp==0) 578 return; /* sorted */ 579uspace_segv: 580 printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " 581 "access (PC %lx PR %lx)\n", current->comm, regs->pc, 582 regs->pr); 583 584 info.si_signo = SIGBUS; 585 info.si_errno = 0; 586 info.si_code = si_code; 587 info.si_addr = (void __user *)address; 588 force_sig_info(SIGBUS, &info, current); 589 } else { 590 if (regs->pc & 1) 591 die("unaligned program counter", regs, error_code); 592 593 set_fs(KERNEL_DS); 594 if (copy_from_user(&instruction, (void __user *)(regs->pc), 595 sizeof(instruction))) { 596 /* Argh. Fault on the instruction itself. 597 This should never happen non-SMP 598 */ 599 set_fs(oldfs); 600 die("insn faulting in do_address_error", regs, 0); 601 } 602 603 handle_unaligned_access(instruction, regs, &user_mem_access); 604 set_fs(oldfs); 605 } 606} 607 608#ifdef CONFIG_SH_DSP 609/* 610 * SH-DSP support gerg@snapgear.com. 611 */ 612int is_dsp_inst(struct pt_regs *regs) 613{ 614 unsigned short inst = 0; 615 616 /* 617 * Safe guard if DSP mode is already enabled or we're lacking 618 * the DSP altogether. 619 */ 620 if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) 621 return 0; 622 623 get_user(inst, ((unsigned short *) regs->pc)); 624 625 inst &= 0xf000; 626 627 /* Check for any type of DSP or support instruction */ 628 if ((inst == 0xf000) || (inst == 0x4000)) 629 return 1; 630 631 return 0; 632} 633#else 634#define is_dsp_inst(regs) (0) 635#endif /* CONFIG_SH_DSP */ 636 637#ifdef CONFIG_CPU_SH2A 638asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, 639 unsigned long r6, unsigned long r7, 640 struct pt_regs __regs) 641{ 642 siginfo_t info; 643 644 switch (r4) { 645 case TRAP_DIVZERO_ERROR: 646 info.si_code = FPE_INTDIV; 647 break; 648 case TRAP_DIVOVF_ERROR: 649 info.si_code = FPE_INTOVF; 650 break; 651 } 652 653 force_sig_info(SIGFPE, &info, current); 654} 655#endif 656 657asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, 658 unsigned long r6, unsigned long r7, 659 struct pt_regs __regs) 660{ 661 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 662 unsigned long error_code; 663 struct task_struct *tsk = current; 664 665#ifdef CONFIG_SH_FPU_EMU 666 unsigned short inst = 0; 667 int err; 668 669 get_user(inst, (unsigned short*)regs->pc); 670 671 err = do_fpu_inst(inst, regs); 672 if (!err) { 673 regs->pc += instruction_size(inst); 674 return; 675 } 676 /* not a FPU inst. */ 677#endif 678 679#ifdef CONFIG_SH_DSP 680 /* Check if it's a DSP instruction */ 681 if (is_dsp_inst(regs)) { 682 /* Enable DSP mode, and restart instruction. */ 683 regs->sr |= SR_DSP; 684 return; 685 } 686#endif 687 688 lookup_exception_vector(error_code); 689 690 local_irq_enable(); 691 CHK_REMOTE_DEBUG(regs); 692 force_sig(SIGILL, tsk); 693 die_if_no_fixup("reserved instruction", regs, error_code); 694} 695 696#ifdef CONFIG_SH_FPU_EMU 697static int emulate_branch(unsigned short inst, struct pt_regs* regs) 698{ 699 /* 700 * bfs: 8fxx: PC+=d*2+4; 701 * bts: 8dxx: PC+=d*2+4; 702 * bra: axxx: PC+=D*2+4; 703 * bsr: bxxx: PC+=D*2+4 after PR=PC+4; 704 * braf:0x23: PC+=Rn*2+4; 705 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4; 706 * jmp: 4x2b: PC=Rn; 707 * jsr: 4x0b: PC=Rn after PR=PC+4; 708 * rts: 000b: PC=PR; 709 */ 710 if ((inst & 0xfd00) == 0x8d00) { 711 regs->pc += SH_PC_8BIT_OFFSET(inst); 712 return 0; 713 } 714 715 if ((inst & 0xe000) == 0xa000) { 716 regs->pc += SH_PC_12BIT_OFFSET(inst); 717 return 0; 718 } 719 720 if ((inst & 0xf0df) == 0x0003) { 721 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; 722 return 0; 723 } 724 725 if ((inst & 0xf0df) == 0x400b) { 726 regs->pc = regs->regs[(inst & 0x0f00) >> 8]; 727 return 0; 728 } 729 730 if ((inst & 0xffff) == 0x000b) { 731 regs->pc = regs->pr; 732 return 0; 733 } 734 735 return 1; 736} 737#endif 738 739asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, 740 unsigned long r6, unsigned long r7, 741 struct pt_regs __regs) 742{ 743 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 744 unsigned long error_code; 745 struct task_struct *tsk = current; 746#ifdef CONFIG_SH_FPU_EMU 747 unsigned short inst = 0; 748 749 get_user(inst, (unsigned short *)regs->pc + 1); 750 if (!do_fpu_inst(inst, regs)) { 751 get_user(inst, (unsigned short *)regs->pc); 752 if (!emulate_branch(inst, regs)) 753 return; 754 /* fault in branch.*/ 755 } 756 /* not a FPU inst. */ 757#endif 758 759 lookup_exception_vector(error_code); 760 761 local_irq_enable(); 762 CHK_REMOTE_DEBUG(regs); 763 force_sig(SIGILL, tsk); 764 die_if_no_fixup("illegal slot instruction", regs, error_code); 765} 766 767asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, 768 unsigned long r6, unsigned long r7, 769 struct pt_regs __regs) 770{ 771 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); 772 long ex; 773 774 lookup_exception_vector(ex); 775 die_if_kernel("exception", regs, ex); 776} 777 778#if defined(CONFIG_SH_STANDARD_BIOS) 779void *gdb_vbr_vector; 780 781static inline void __init gdb_vbr_init(void) 782{ 783 register unsigned long vbr; 784 785 /* 786 * Read the old value of the VBR register to initialise 787 * the vector through which debug and BIOS traps are 788 * delegated by the Linux trap handler. 789 */ 790 asm volatile("stc vbr, %0" : "=r" (vbr)); 791 792 gdb_vbr_vector = (void *)(vbr + 0x100); 793 printk("Setting GDB trap vector to 0x%08lx\n", 794 (unsigned long)gdb_vbr_vector); 795} 796#endif 797 798void __cpuinit per_cpu_trap_init(void) 799{ 800 extern void *vbr_base; 801 802#ifdef CONFIG_SH_STANDARD_BIOS 803 if (raw_smp_processor_id() == 0) 804 gdb_vbr_init(); 805#endif 806 807 /* NOTE: The VBR value should be at P1 808 (or P2, virtural "fixed" address space). 809 It's definitely should not in physical address. */ 810 811 asm volatile("ldc %0, vbr" 812 : /* no output */ 813 : "r" (&vbr_base) 814 : "memory"); 815} 816 817void *set_exception_table_vec(unsigned int vec, void *handler) 818{ 819 extern void *exception_handling_table[]; 820 void *old_handler; 821 822 old_handler = exception_handling_table[vec]; 823 exception_handling_table[vec] = handler; 824 return old_handler; 825} 826 827void __init trap_init(void) 828{ 829 set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); 830 set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); 831 832#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ 833 defined(CONFIG_SH_FPU_EMU) 834 /* 835 * For SH-4 lacking an FPU, treat floating point instructions as 836 * reserved. They'll be handled in the math-emu case, or faulted on 837 * otherwise. 838 */ 839 set_exception_table_evt(0x800, do_reserved_inst); 840 set_exception_table_evt(0x820, do_illegal_slot_inst); 841#elif defined(CONFIG_SH_FPU) 842#ifdef CONFIG_CPU_SUBTYPE_SHX3 843 set_exception_table_evt(0xd80, fpu_state_restore_trap_handler); 844 set_exception_table_evt(0xda0, fpu_state_restore_trap_handler); 845#else 846 set_exception_table_evt(0x800, fpu_state_restore_trap_handler); 847 set_exception_table_evt(0x820, fpu_state_restore_trap_handler); 848#endif 849#endif 850 851#ifdef CONFIG_CPU_SH2 852 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); 853#endif 854#ifdef CONFIG_CPU_SH2A 855 set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); 856 set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); 857#ifdef CONFIG_SH_FPU 858 set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler); 859#endif 860#endif 861 862 /* Setup VBR for boot cpu */ 863 per_cpu_trap_init(); 864} 865 866void show_trace(struct task_struct *tsk, unsigned long *sp, 867 struct pt_regs *regs) 868{ 869 unsigned long addr; 870 871 if (regs && user_mode(regs)) 872 return; 873 874 printk("\nCall trace: "); 875#ifdef CONFIG_KALLSYMS 876 printk("\n"); 877#endif 878 879 while (!kstack_end(sp)) { 880 addr = *sp++; 881 if (kernel_text_address(addr)) 882 print_ip_sym(addr); 883 } 884 885 printk("\n"); 886 887 if (!tsk) 888 tsk = current; 889 890 debug_show_held_locks(tsk); 891} 892 893void show_stack(struct task_struct *tsk, unsigned long *sp) 894{ 895 unsigned long stack; 896 897 if (!tsk) 898 tsk = current; 899 if (tsk == current) 900 sp = (unsigned long *)current_stack_pointer; 901 else 902 sp = (unsigned long *)tsk->thread.sp; 903 904 stack = (unsigned long)sp; 905 dump_mem("Stack: ", stack, THREAD_SIZE + 906 (unsigned long)task_stack_page(tsk)); 907 show_trace(tsk, sp, NULL); 908} 909 910void dump_stack(void) 911{ 912 show_stack(NULL, NULL); 913} 914EXPORT_SYMBOL(dump_stack); 915