1/*
2 *  linux/arch/m32r/kernel/traps.c
3 *
4 *  Copyright (C) 2001, 2002  Hirokazu Takata, Hiroyuki Kondo,
5 *                            Hitoshi Yamamoto
6 */
7
8/*
9 * 'traps.c' handles hardware traps and faults after we have saved some
10 * state in 'entry.S'.
11 */
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/kallsyms.h>
15#include <linux/stddef.h>
16#include <linux/ptrace.h>
17#include <linux/mm.h>
18#include <asm/page.h>
19#include <asm/processor.h>
20
21#include <asm/uaccess.h>
22#include <asm/io.h>
23#include <linux/atomic.h>
24
25#include <asm/smp.h>
26
27#include <linux/module.h>
28
29asmlinkage void alignment_check(void);
30asmlinkage void ei_handler(void);
31asmlinkage void rie_handler(void);
32asmlinkage void debug_trap(void);
33asmlinkage void cache_flushing_handler(void);
34asmlinkage void ill_trap(void);
35
36#ifdef CONFIG_SMP
37extern void smp_reschedule_interrupt(void);
38extern void smp_invalidate_interrupt(void);
39extern void smp_call_function_interrupt(void);
40extern void smp_ipi_timer_interrupt(void);
41extern void smp_flush_cache_all_interrupt(void);
42extern void smp_call_function_single_interrupt(void);
43
44/*
45 * for Boot AP function
46 */
47asm (
48	"	.section .eit_vector4,\"ax\"	\n"
49	"	.global _AP_RE			\n"
50	"	.global startup_AP		\n"
51	"_AP_RE:				\n"
52	"	.fill 32, 4, 0			\n"
53	"_AP_EI: bra	startup_AP		\n"
54	"	.previous			\n"
55);
56#endif  /* CONFIG_SMP */
57
58extern unsigned long	eit_vector[];
59#define BRA_INSN(func, entry)	\
60	((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
61	+ 0xff000000UL
62
63static void set_eit_vector_entries(void)
64{
65	extern void default_eit_handler(void);
66	extern void system_call(void);
67	extern void pie_handler(void);
68	extern void ace_handler(void);
69	extern void tme_handler(void);
70	extern void _flush_cache_copyback_all(void);
71
72	eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
73	eit_vector[1] = BRA_INSN(default_eit_handler, 1);
74	eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
75	eit_vector[5] = BRA_INSN(default_eit_handler, 5);
76	eit_vector[8] = BRA_INSN(rie_handler, 8);
77	eit_vector[12] = BRA_INSN(alignment_check, 12);
78	eit_vector[16] = BRA_INSN(ill_trap, 16);
79	eit_vector[17] = BRA_INSN(debug_trap, 17);
80	eit_vector[18] = BRA_INSN(system_call, 18);
81	eit_vector[19] = BRA_INSN(ill_trap, 19);
82	eit_vector[20] = BRA_INSN(ill_trap, 20);
83	eit_vector[21] = BRA_INSN(ill_trap, 21);
84	eit_vector[22] = BRA_INSN(ill_trap, 22);
85	eit_vector[23] = BRA_INSN(ill_trap, 23);
86	eit_vector[24] = BRA_INSN(ill_trap, 24);
87	eit_vector[25] = BRA_INSN(ill_trap, 25);
88	eit_vector[26] = BRA_INSN(ill_trap, 26);
89	eit_vector[27] = BRA_INSN(ill_trap, 27);
90	eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
91	eit_vector[29] = BRA_INSN(ill_trap, 29);
92	eit_vector[30] = BRA_INSN(ill_trap, 30);
93	eit_vector[31] = BRA_INSN(ill_trap, 31);
94	eit_vector[32] = BRA_INSN(ei_handler, 32);
95	eit_vector[64] = BRA_INSN(pie_handler, 64);
96#ifdef CONFIG_MMU
97	eit_vector[68] = BRA_INSN(ace_handler, 68);
98	eit_vector[72] = BRA_INSN(tme_handler, 72);
99#endif /* CONFIG_MMU */
100#ifdef CONFIG_SMP
101	eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
102	eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
103	eit_vector[186] = (unsigned long)smp_call_function_interrupt;
104	eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
105	eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
106	eit_vector[189] = 0;	/* CPU_BOOT_IPI */
107	eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
108	eit_vector[191] = 0;
109#endif
110	_flush_cache_copyback_all();
111}
112
113void __init trap_init(void)
114{
115	set_eit_vector_entries();
116
117	/*
118	 * Should be a barrier for any external CPU state.
119	 */
120	cpu_init();
121}
122
123static int kstack_depth_to_print = 24;
124
125static void show_trace(struct task_struct *task, unsigned long *stack)
126{
127	unsigned long addr;
128
129	if (!stack)
130		stack = (unsigned long*)&stack;
131
132	printk("Call Trace: ");
133	while (!kstack_end(stack)) {
134		addr = *stack++;
135		if (__kernel_text_address(addr))
136			printk("[<%08lx>] %pSR\n", addr, (void *)addr);
137	}
138	printk("\n");
139}
140
141void show_stack(struct task_struct *task, unsigned long *sp)
142{
143	unsigned long  *stack;
144	int  i;
145
146	/*
147	 * debugging aid: "show_stack(NULL);" prints the
148	 * back trace for this cpu.
149	 */
150
151	if(sp==NULL) {
152		if (task)
153			sp = (unsigned long *)task->thread.sp;
154		else
155			sp=(unsigned long*)&sp;
156	}
157
158	stack = sp;
159	for(i=0; i < kstack_depth_to_print; i++) {
160		if (kstack_end(stack))
161			break;
162		if (i && ((i % 4) == 0))
163			printk("\n       ");
164		printk("%08lx ", *stack++);
165	}
166	printk("\n");
167	show_trace(task, sp);
168}
169
170static void show_registers(struct pt_regs *regs)
171{
172	int i = 0;
173	int in_kernel = 1;
174	unsigned long sp;
175
176	printk("CPU:    %d\n", smp_processor_id());
177	show_regs(regs);
178
179	sp = (unsigned long) (1+regs);
180	if (user_mode(regs)) {
181		in_kernel = 0;
182		sp = regs->spu;
183		printk("SPU: %08lx\n", sp);
184	} else {
185		printk("SPI: %08lx\n", sp);
186	}
187	printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
188		current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
189
190	/*
191	 * When in-kernel, we also print out the stack and code at the
192	 * time of the fault..
193	 */
194	if (in_kernel) {
195		printk("\nStack: ");
196		show_stack(current, (unsigned long*) sp);
197
198		printk("\nCode: ");
199		if (regs->bpc < PAGE_OFFSET)
200			goto bad;
201
202		for(i=0;i<20;i++) {
203			unsigned char c;
204			if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
205bad:
206				printk(" Bad PC value.");
207				break;
208			}
209			printk("%02x ", c);
210		}
211	}
212	printk("\n");
213}
214
215static DEFINE_SPINLOCK(die_lock);
216
217void die(const char * str, struct pt_regs * regs, long err)
218{
219	console_verbose();
220	spin_lock_irq(&die_lock);
221	bust_spinlocks(1);
222	printk("%s: %04lx\n", str, err & 0xffff);
223	show_registers(regs);
224	bust_spinlocks(0);
225	spin_unlock_irq(&die_lock);
226	do_exit(SIGSEGV);
227}
228
229static __inline__ void die_if_kernel(const char * str,
230	struct pt_regs * regs, long err)
231{
232	if (!user_mode(regs))
233		die(str, regs, err);
234}
235
236static __inline__ void do_trap(int trapnr, int signr, const char * str,
237	struct pt_regs * regs, long error_code, siginfo_t *info)
238{
239	if (user_mode(regs)) {
240		/* trap_signal */
241		struct task_struct *tsk = current;
242		tsk->thread.error_code = error_code;
243		tsk->thread.trap_no = trapnr;
244		if (info)
245			force_sig_info(signr, info, tsk);
246		else
247			force_sig(signr, tsk);
248		return;
249	} else {
250		/* kernel_trap */
251		if (!fixup_exception(regs))
252			die(str, regs, error_code);
253		return;
254	}
255}
256
257#define DO_ERROR(trapnr, signr, str, name) \
258asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
259{ \
260	do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
261}
262
263#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
264asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
265{ \
266	siginfo_t info; \
267	info.si_signo = signr; \
268	info.si_errno = 0; \
269	info.si_code = sicode; \
270	info.si_addr = (void __user *)siaddr; \
271	do_trap(trapnr, signr, str, regs, error_code, &info); \
272}
273
274DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
275DO_ERROR_INFO(0x20, SIGILL,  "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
276DO_ERROR_INFO(0x100, SIGILL,  "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
277DO_ERROR_INFO(-1, SIGILL,  "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
278
279extern int handle_unaligned_access(unsigned long, struct pt_regs *);
280
281/* This code taken from arch/sh/kernel/traps.c */
282asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
283{
284	mm_segment_t oldfs;
285	unsigned long insn;
286	int tmp;
287
288	oldfs = get_fs();
289
290	if (user_mode(regs)) {
291		local_irq_enable();
292		current->thread.error_code = error_code;
293		current->thread.trap_no = 0x17;
294
295		set_fs(USER_DS);
296		if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
297			set_fs(oldfs);
298			goto uspace_segv;
299		}
300		tmp = handle_unaligned_access(insn, regs);
301		set_fs(oldfs);
302
303		if (!tmp)
304			return;
305
306	uspace_segv:
307		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
308			"access\n", current->comm);
309		force_sig(SIGSEGV, current);
310	} else {
311		set_fs(KERNEL_DS);
312		if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
313			set_fs(oldfs);
314			die("insn faulting in do_address_error", regs, 0);
315		}
316		handle_unaligned_access(insn, regs);
317		set_fs(oldfs);
318	}
319}
320