1/*
2 * linux/arch/unicore32/kernel/traps.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 *  'traps.c' handles hardware exceptions after we have saved some state.
13 *  Mostly a debugging aid, but will probably kill the offending process.
14 */
15#include <linux/module.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/personality.h>
19#include <linux/kallsyms.h>
20#include <linux/kdebug.h>
21#include <linux/uaccess.h>
22#include <linux/delay.h>
23#include <linux/hardirq.h>
24#include <linux/init.h>
25#include <linux/atomic.h>
26#include <linux/unistd.h>
27
28#include <asm/cacheflush.h>
29#include <asm/traps.h>
30
31#include "setup.h"
32
33static void dump_mem(const char *, const char *, unsigned long, unsigned long);
34
35void dump_backtrace_entry(unsigned long where,
36		unsigned long from, unsigned long frame)
37{
38#ifdef CONFIG_KALLSYMS
39	printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n",
40			where, (void *)where, from, (void *)from);
41#else
42	printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n",
43			where, from);
44#endif
45}
46
47/*
48 * Stack pointers should always be within the kernels view of
49 * physical memory.  If it is not there, then we can't dump
50 * out any information relating to the stack.
51 */
52static int verify_stack(unsigned long sp)
53{
54	if (sp < PAGE_OFFSET ||
55	    (sp > (unsigned long)high_memory && high_memory != NULL))
56		return -EFAULT;
57
58	return 0;
59}
60
61/*
62 * Dump out the contents of some memory nicely...
63 */
64static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
65		     unsigned long top)
66{
67	unsigned long first;
68	mm_segment_t fs;
69	int i;
70
71	/*
72	 * We need to switch to kernel mode so that we can use __get_user
73	 * to safely read from kernel space.  Note that we now dump the
74	 * code first, just in case the backtrace kills us.
75	 */
76	fs = get_fs();
77	set_fs(KERNEL_DS);
78
79	printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n",
80			lvl, str, bottom, top);
81
82	for (first = bottom & ~31; first < top; first += 32) {
83		unsigned long p;
84		char str[sizeof(" 12345678") * 8 + 1];
85
86		memset(str, ' ', sizeof(str));
87		str[sizeof(str) - 1] = '\0';
88
89		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
90			if (p >= bottom && p < top) {
91				unsigned long val;
92				if (__get_user(val, (unsigned long *)p) == 0)
93					sprintf(str + i * 9, " %08lx", val);
94				else
95					sprintf(str + i * 9, " ????????");
96			}
97		}
98		printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str);
99	}
100
101	set_fs(fs);
102}
103
104static void dump_instr(const char *lvl, struct pt_regs *regs)
105{
106	unsigned long addr = instruction_pointer(regs);
107	const int width = 8;
108	mm_segment_t fs;
109	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
110	int i;
111
112	/*
113	 * We need to switch to kernel mode so that we can use __get_user
114	 * to safely read from kernel space.  Note that we now dump the
115	 * code first, just in case the backtrace kills us.
116	 */
117	fs = get_fs();
118	set_fs(KERNEL_DS);
119
120	for (i = -4; i < 1; i++) {
121		unsigned int val, bad;
122
123		bad = __get_user(val, &((u32 *)addr)[i]);
124
125		if (!bad)
126			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
127					width, val);
128		else {
129			p += sprintf(p, "bad PC value");
130			break;
131		}
132	}
133	printk(KERN_DEFAULT "%sCode: %s\n", lvl, str);
134
135	set_fs(fs);
136}
137
138static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
139{
140	unsigned int fp, mode;
141	int ok = 1;
142
143	printk(KERN_DEFAULT "Backtrace: ");
144
145	if (!tsk)
146		tsk = current;
147
148	if (regs) {
149		fp = regs->UCreg_fp;
150		mode = processor_mode(regs);
151	} else if (tsk != current) {
152		fp = thread_saved_fp(tsk);
153		mode = 0x10;
154	} else {
155		asm("mov %0, fp" : "=r" (fp) : : "cc");
156		mode = 0x10;
157	}
158
159	if (!fp) {
160		printk("no frame pointer");
161		ok = 0;
162	} else if (verify_stack(fp)) {
163		printk("invalid frame pointer 0x%08x", fp);
164		ok = 0;
165	} else if (fp < (unsigned long)end_of_stack(tsk))
166		printk("frame pointer underflow");
167	printk("\n");
168
169	if (ok)
170		c_backtrace(fp, mode);
171}
172
173void dump_stack(void)
174{
175	dump_backtrace(NULL, NULL);
176}
177EXPORT_SYMBOL(dump_stack);
178
179void show_stack(struct task_struct *tsk, unsigned long *sp)
180{
181	dump_backtrace(NULL, tsk);
182	barrier();
183}
184
185static int __die(const char *str, int err, struct thread_info *thread,
186		struct pt_regs *regs)
187{
188	struct task_struct *tsk = thread->task;
189	static int die_counter;
190	int ret;
191
192	printk(KERN_EMERG "Internal error: %s: %x [#%d]\n",
193	       str, err, ++die_counter);
194
195	/* trap and error numbers are mostly meaningless on UniCore */
196	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \
197			SIGSEGV);
198	if (ret == NOTIFY_STOP)
199		return ret;
200
201	print_modules();
202	__show_regs(regs);
203	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
204		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
205
206	if (!user_mode(regs) || in_interrupt()) {
207		dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
208			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
209		dump_backtrace(regs, tsk);
210		dump_instr(KERN_EMERG, regs);
211	}
212
213	return ret;
214}
215
216DEFINE_SPINLOCK(die_lock);
217
218/*
219 * This function is protected against re-entrancy.
220 */
221void die(const char *str, struct pt_regs *regs, int err)
222{
223	struct thread_info *thread = current_thread_info();
224	int ret;
225
226	oops_enter();
227
228	spin_lock_irq(&die_lock);
229	console_verbose();
230	bust_spinlocks(1);
231	ret = __die(str, err, thread, regs);
232
233	bust_spinlocks(0);
234	add_taint(TAINT_DIE);
235	spin_unlock_irq(&die_lock);
236	oops_exit();
237
238	if (in_interrupt())
239		panic("Fatal exception in interrupt");
240	if (panic_on_oops)
241		panic("Fatal exception");
242	if (ret != NOTIFY_STOP)
243		do_exit(SIGSEGV);
244}
245
246void uc32_notify_die(const char *str, struct pt_regs *regs,
247		struct siginfo *info, unsigned long err, unsigned long trap)
248{
249	if (user_mode(regs)) {
250		current->thread.error_code = err;
251		current->thread.trap_no = trap;
252
253		force_sig_info(info->si_signo, info, current);
254	} else
255		die(str, regs, err);
256}
257
258/*
259 * bad_mode handles the impossible case in the vectors.  If you see one of
260 * these, then it's extremely serious, and could mean you have buggy hardware.
261 * It never returns, and never tries to sync.  We hope that we can at least
262 * dump out some state information...
263 */
264asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason)
265{
266	console_verbose();
267
268	printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason);
269
270	die("Oops - bad mode", regs, 0);
271	local_irq_disable();
272	panic("bad mode");
273}
274
275void __pte_error(const char *file, int line, unsigned long val)
276{
277	printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val);
278}
279
280void __pmd_error(const char *file, int line, unsigned long val)
281{
282	printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val);
283}
284
285void __pgd_error(const char *file, int line, unsigned long val)
286{
287	printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val);
288}
289
290asmlinkage void __div0(void)
291{
292	printk(KERN_DEFAULT "Division by zero in kernel.\n");
293	dump_stack();
294}
295EXPORT_SYMBOL(__div0);
296
297void abort(void)
298{
299	BUG();
300
301	/* if that doesn't kill us, halt */
302	panic("Oops failed to kill thread");
303}
304EXPORT_SYMBOL(abort);
305
306void __init trap_init(void)
307{
308	return;
309}
310
311void __init early_trap_init(void)
312{
313	unsigned long vectors = VECTORS_BASE;
314
315	/*
316	 * Copy the vectors, stubs (in entry-unicore.S)
317	 * into the vector page, mapped at 0xffff0000, and ensure these
318	 * are visible to the instruction stream.
319	 */
320	memcpy((void *)vectors,
321			__vectors_start,
322			__vectors_end - __vectors_start);
323	memcpy((void *)vectors + 0x200,
324			__stubs_start,
325			__stubs_end - __stubs_start);
326
327	early_signal_init();
328
329	flush_icache_range(vectors, vectors + PAGE_SIZE);
330}
331