1/*
2 * arch/xtensa/kernel/traps.c
3 *
4 * Exception handling.
5 *
6 * Derived from code with the following copyrights:
7 * Copyright (C) 1994 - 1999 by Ralf Baechle
8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
9 * Complete output from die() by Ulf Carlsson, 1998
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 *
12 * Essentially rewritten for the Xtensa architecture port.
13 *
14 * Copyright (C) 2001 - 2013 Tensilica Inc.
15 *
16 * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
17 * Chris Zankel	<chris@zankel.net>
18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
19 * Kevin Chea
20 *
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License.  See the file "COPYING" in the main directory of this archive
23 * for more details.
24 */
25
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/stringify.h>
31#include <linux/kallsyms.h>
32#include <linux/delay.h>
33#include <linux/hardirq.h>
34
35#include <asm/stacktrace.h>
36#include <asm/ptrace.h>
37#include <asm/timex.h>
38#include <asm/uaccess.h>
39#include <asm/pgtable.h>
40#include <asm/processor.h>
41#include <asm/traps.h>
42
43#ifdef CONFIG_KGDB
44extern int gdb_enter;
45extern int return_from_debug_flag;
46#endif
47
48/*
49 * Machine specific interrupt handlers
50 */
51
52extern void kernel_exception(void);
53extern void user_exception(void);
54
55extern void fast_syscall_kernel(void);
56extern void fast_syscall_user(void);
57extern void fast_alloca(void);
58extern void fast_unaligned(void);
59extern void fast_second_level_miss(void);
60extern void fast_store_prohibited(void);
61extern void fast_coprocessor(void);
62
63extern void do_illegal_instruction (struct pt_regs*);
64extern void do_interrupt (struct pt_regs*);
65extern void do_unaligned_user (struct pt_regs*);
66extern void do_multihit (struct pt_regs*, unsigned long);
67extern void do_page_fault (struct pt_regs*, unsigned long);
68extern void do_debug (struct pt_regs*);
69extern void system_call (struct pt_regs*);
70
71/*
72 * The vector table must be preceded by a save area (which
73 * implies it must be in RAM, unless one places RAM immediately
74 * before a ROM and puts the vector at the start of the ROM (!))
75 */
76
77#define KRNL		0x01
78#define USER		0x02
79
80#define COPROCESSOR(x)							\
81{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
82
83typedef struct {
84	int cause;
85	int fast;
86	void* handler;
87} dispatch_init_table_t;
88
89static dispatch_init_table_t __initdata dispatch_init_table[] = {
90
91{ EXCCAUSE_ILLEGAL_INSTRUCTION,	0,	   do_illegal_instruction},
92{ EXCCAUSE_SYSTEM_CALL,		KRNL,	   fast_syscall_kernel },
93{ EXCCAUSE_SYSTEM_CALL,		USER,	   fast_syscall_user },
94{ EXCCAUSE_SYSTEM_CALL,		0,	   system_call },
95/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
96/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
97{ EXCCAUSE_LEVEL1_INTERRUPT,	0,	   do_interrupt },
98{ EXCCAUSE_ALLOCA,		USER|KRNL, fast_alloca },
99/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
100/* EXCCAUSE_PRIVILEGED unhandled */
101#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
102#ifdef CONFIG_XTENSA_UNALIGNED_USER
103{ EXCCAUSE_UNALIGNED,		USER,	   fast_unaligned },
104#endif
105{ EXCCAUSE_UNALIGNED,		0,	   do_unaligned_user },
106{ EXCCAUSE_UNALIGNED,		KRNL,	   fast_unaligned },
107#endif
108#ifdef CONFIG_MMU
109{ EXCCAUSE_ITLB_MISS,		0,	   do_page_fault },
110{ EXCCAUSE_ITLB_MISS,		USER|KRNL, fast_second_level_miss},
111{ EXCCAUSE_ITLB_MULTIHIT,		0,	   do_multihit },
112{ EXCCAUSE_ITLB_PRIVILEGE,	0,	   do_page_fault },
113/* EXCCAUSE_SIZE_RESTRICTION unhandled */
114{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE,	0,	   do_page_fault },
115{ EXCCAUSE_DTLB_MISS,		USER|KRNL, fast_second_level_miss},
116{ EXCCAUSE_DTLB_MISS,		0,	   do_page_fault },
117{ EXCCAUSE_DTLB_MULTIHIT,		0,	   do_multihit },
118{ EXCCAUSE_DTLB_PRIVILEGE,	0,	   do_page_fault },
119/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
120{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	USER|KRNL, fast_store_prohibited },
121{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	0,	   do_page_fault },
122{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE,	0,	   do_page_fault },
123#endif /* CONFIG_MMU */
124/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
125#if XTENSA_HAVE_COPROCESSOR(0)
126COPROCESSOR(0),
127#endif
128#if XTENSA_HAVE_COPROCESSOR(1)
129COPROCESSOR(1),
130#endif
131#if XTENSA_HAVE_COPROCESSOR(2)
132COPROCESSOR(2),
133#endif
134#if XTENSA_HAVE_COPROCESSOR(3)
135COPROCESSOR(3),
136#endif
137#if XTENSA_HAVE_COPROCESSOR(4)
138COPROCESSOR(4),
139#endif
140#if XTENSA_HAVE_COPROCESSOR(5)
141COPROCESSOR(5),
142#endif
143#if XTENSA_HAVE_COPROCESSOR(6)
144COPROCESSOR(6),
145#endif
146#if XTENSA_HAVE_COPROCESSOR(7)
147COPROCESSOR(7),
148#endif
149{ EXCCAUSE_MAPPED_DEBUG,		0,		do_debug },
150{ -1, -1, 0 }
151
152};
153
154/* The exception table <exc_table> serves two functions:
155 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
156 * 2. it is a temporary memory buffer for the exception handlers.
157 */
158
159DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
160
161void die(const char*, struct pt_regs*, long);
162
163static inline void
164__die_if_kernel(const char *str, struct pt_regs *regs, long err)
165{
166	if (!user_mode(regs))
167		die(str, regs, err);
168}
169
170/*
171 * Unhandled Exceptions. Kill user task or panic if in kernel space.
172 */
173
174void do_unhandled(struct pt_regs *regs, unsigned long exccause)
175{
176	__die_if_kernel("Caught unhandled exception - should not happen",
177	    		regs, SIGKILL);
178
179	/* If in user mode, send SIGILL signal to current process */
180	printk("Caught unhandled exception in '%s' "
181	       "(pid = %d, pc = %#010lx) - should not happen\n"
182	       "\tEXCCAUSE is %ld\n",
183	       current->comm, task_pid_nr(current), regs->pc, exccause);
184	force_sig(SIGILL, current);
185}
186
187/*
188 * Multi-hit exception. This if fatal!
189 */
190
191void do_multihit(struct pt_regs *regs, unsigned long exccause)
192{
193	die("Caught multihit exception", regs, SIGKILL);
194}
195
196/*
197 * IRQ handler.
198 */
199
200extern void do_IRQ(int, struct pt_regs *);
201
202void do_interrupt(struct pt_regs *regs)
203{
204	static const unsigned int_level_mask[] = {
205		0,
206		XCHAL_INTLEVEL1_MASK,
207		XCHAL_INTLEVEL2_MASK,
208		XCHAL_INTLEVEL3_MASK,
209		XCHAL_INTLEVEL4_MASK,
210		XCHAL_INTLEVEL5_MASK,
211		XCHAL_INTLEVEL6_MASK,
212		XCHAL_INTLEVEL7_MASK,
213	};
214	struct pt_regs *old_regs = set_irq_regs(regs);
215
216	irq_enter();
217
218	for (;;) {
219		unsigned intread = get_sr(interrupt);
220		unsigned intenable = get_sr(intenable);
221		unsigned int_at_level = intread & intenable;
222		unsigned level;
223
224		for (level = LOCKLEVEL; level > 0; --level) {
225			if (int_at_level & int_level_mask[level]) {
226				int_at_level &= int_level_mask[level];
227				break;
228			}
229		}
230
231		if (level == 0)
232			break;
233
234		do_IRQ(__ffs(int_at_level), regs);
235	}
236
237	irq_exit();
238	set_irq_regs(old_regs);
239}
240
241/*
242 * Illegal instruction. Fatal if in kernel space.
243 */
244
245void
246do_illegal_instruction(struct pt_regs *regs)
247{
248	__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
249
250	/* If in user mode, send SIGILL signal to current process. */
251
252	printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
253	    current->comm, task_pid_nr(current), regs->pc);
254	force_sig(SIGILL, current);
255}
256
257
258/*
259 * Handle unaligned memory accesses from user space. Kill task.
260 *
261 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
262 * accesses causes from user space.
263 */
264
265#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
266void
267do_unaligned_user (struct pt_regs *regs)
268{
269	siginfo_t info;
270
271	__die_if_kernel("Unhandled unaligned exception in kernel",
272	    		regs, SIGKILL);
273
274	current->thread.bad_vaddr = regs->excvaddr;
275	current->thread.error_code = -3;
276	printk("Unaligned memory access to %08lx in '%s' "
277	       "(pid = %d, pc = %#010lx)\n",
278	       regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
279	info.si_signo = SIGBUS;
280	info.si_errno = 0;
281	info.si_code = BUS_ADRALN;
282	info.si_addr = (void *) regs->excvaddr;
283	force_sig_info(SIGSEGV, &info, current);
284
285}
286#endif
287
288void
289do_debug(struct pt_regs *regs)
290{
291#ifdef CONFIG_KGDB
292	/* If remote debugging is configured AND enabled, we give control to
293	 * kgdb.  Otherwise, we fall through, perhaps giving control to the
294	 * native debugger.
295	 */
296
297	if (gdb_enter) {
298		extern void gdb_handle_exception(struct pt_regs *);
299		gdb_handle_exception(regs);
300		return_from_debug_flag = 1;
301		return;
302	}
303#endif
304
305	__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
306
307	/* If in user mode, send SIGTRAP signal to current process */
308
309	force_sig(SIGTRAP, current);
310}
311
312
313static void set_handler(int idx, void *handler)
314{
315	unsigned int cpu;
316
317	for_each_possible_cpu(cpu)
318		per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
319}
320
321/* Set exception C handler - for temporary use when probing exceptions */
322
323void * __init trap_set_handler(int cause, void *handler)
324{
325	void *previous = (void *)per_cpu(exc_table, 0)[
326		EXC_TABLE_DEFAULT / 4 + cause];
327	set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
328	return previous;
329}
330
331
332static void trap_init_excsave(void)
333{
334	unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
335	__asm__ __volatile__("wsr  %0, excsave1\n" : : "a" (excsave1));
336}
337
338/*
339 * Initialize dispatch tables.
340 *
341 * The exception vectors are stored compressed the __init section in the
342 * dispatch_init_table. This function initializes the following three tables
343 * from that compressed table:
344 * - fast user		first dispatch table for user exceptions
345 * - fast kernel	first dispatch table for kernel exceptions
346 * - default C-handler	C-handler called by the default fast handler.
347 *
348 * See vectors.S for more details.
349 */
350
351void __init trap_init(void)
352{
353	int i;
354
355	/* Setup default vectors. */
356
357	for(i = 0; i < 64; i++) {
358		set_handler(EXC_TABLE_FAST_USER/4   + i, user_exception);
359		set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
360		set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
361	}
362
363	/* Setup specific handlers. */
364
365	for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
366
367		int fast = dispatch_init_table[i].fast;
368		int cause = dispatch_init_table[i].cause;
369		void *handler = dispatch_init_table[i].handler;
370
371		if (fast == 0)
372			set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
373		if (fast && fast & USER)
374			set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
375		if (fast && fast & KRNL)
376			set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
377	}
378
379	/* Initialize EXCSAVE_1 to hold the address of the exception table. */
380	trap_init_excsave();
381}
382
383#ifdef CONFIG_SMP
384void secondary_trap_init(void)
385{
386	trap_init_excsave();
387}
388#endif
389
390/*
391 * This function dumps the current valid window frame and other base registers.
392 */
393
394void show_regs(struct pt_regs * regs)
395{
396	int i, wmask;
397
398	show_regs_print_info(KERN_DEFAULT);
399
400	wmask = regs->wmask & ~1;
401
402	for (i = 0; i < 16; i++) {
403		if ((i % 8) == 0)
404			printk(KERN_INFO "a%02d:", i);
405		printk(KERN_CONT " %08lx", regs->areg[i]);
406	}
407	printk(KERN_CONT "\n");
408
409	printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
410	       regs->pc, regs->ps, regs->depc, regs->excvaddr);
411	printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
412	       regs->lbeg, regs->lend, regs->lcount, regs->sar);
413	if (user_mode(regs))
414		printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
415		       regs->windowbase, regs->windowstart, regs->wmask,
416		       regs->syscall);
417}
418
419static int show_trace_cb(struct stackframe *frame, void *data)
420{
421	if (kernel_text_address(frame->pc)) {
422		printk(" [<%08lx>] ", frame->pc);
423		print_symbol("%s\n", frame->pc);
424	}
425	return 0;
426}
427
428void show_trace(struct task_struct *task, unsigned long *sp)
429{
430	if (!sp)
431		sp = stack_pointer(task);
432
433	printk("Call Trace:");
434#ifdef CONFIG_KALLSYMS
435	printk("\n");
436#endif
437	walk_stackframe(sp, show_trace_cb, NULL);
438	printk("\n");
439}
440
441/*
442 * This routine abuses get_user()/put_user() to reference pointers
443 * with at least a bit of error checking ...
444 */
445
446static int kstack_depth_to_print = 24;
447
448void show_stack(struct task_struct *task, unsigned long *sp)
449{
450	int i = 0;
451	unsigned long *stack;
452
453	if (!sp)
454		sp = stack_pointer(task);
455	stack = sp;
456
457	printk("\nStack: ");
458
459	for (i = 0; i < kstack_depth_to_print; i++) {
460		if (kstack_end(sp))
461			break;
462		if (i && ((i % 8) == 0))
463			printk("\n       ");
464		printk("%08lx ", *sp++);
465	}
466	printk("\n");
467	show_trace(task, stack);
468}
469
470void show_code(unsigned int *pc)
471{
472	long i;
473
474	printk("\nCode:");
475
476	for(i = -3 ; i < 6 ; i++) {
477		unsigned long insn;
478		if (__get_user(insn, pc + i)) {
479			printk(" (Bad address in pc)\n");
480			break;
481		}
482		printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
483	}
484}
485
486DEFINE_SPINLOCK(die_lock);
487
488void die(const char * str, struct pt_regs * regs, long err)
489{
490	static int die_counter;
491	int nl = 0;
492
493	console_verbose();
494	spin_lock_irq(&die_lock);
495
496	printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
497#ifdef CONFIG_PREEMPT
498	printk("PREEMPT ");
499	nl = 1;
500#endif
501	if (nl)
502		printk("\n");
503	show_regs(regs);
504	if (!user_mode(regs))
505		show_stack(NULL, (unsigned long*)regs->areg[1]);
506
507	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
508	spin_unlock_irq(&die_lock);
509
510	if (in_interrupt())
511		panic("Fatal exception in interrupt");
512
513	if (panic_on_oops)
514		panic("Fatal exception");
515
516	do_exit(err);
517}
518