1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 *
14 * TILE-Gx KGDB support.
15 */
16
17#include <linux/ptrace.h>
18#include <linux/kgdb.h>
19#include <linux/kdebug.h>
20#include <linux/uaccess.h>
21#include <linux/module.h>
22#include <asm/cacheflush.h>
23
24static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
25static unsigned long stepped_addr;
26static tile_bundle_bits stepped_instr;
27
28struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
29	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
30	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
31	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
32	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
33	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
34	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
35	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
36	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
37	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
38	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
39	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
40	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
41	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
42	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
43	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
44	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
45	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
46	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
47	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
48	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
49	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
50	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
51	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
52	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
53	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
54	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
55	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
56	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
57	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
58	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
59	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
60	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
61	{ "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
62	{ "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
63	{ "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
64	{ "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
65	{ "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
66	{ "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
67	{ "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
68	{ "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
69	{ "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
70	{ "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
71	{ "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
72	{ "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
73	{ "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
74	{ "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
75	{ "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
76	{ "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
77	{ "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
78	{ "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
79	{ "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
80	{ "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
81	{ "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
82	{ "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
83	{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
84	{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
85	{ "sn", GDB_SIZEOF_REG, -1},
86	{ "idn0", GDB_SIZEOF_REG, -1},
87	{ "idn1", GDB_SIZEOF_REG, -1},
88	{ "udn0", GDB_SIZEOF_REG, -1},
89	{ "udn1", GDB_SIZEOF_REG, -1},
90	{ "udn2", GDB_SIZEOF_REG, -1},
91	{ "udn3", GDB_SIZEOF_REG, -1},
92	{ "zero", GDB_SIZEOF_REG, -1},
93	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
94	{ "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
95};
96
97char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
98{
99	if (regno >= DBG_MAX_REG_NUM || regno < 0)
100		return NULL;
101
102	if (dbg_reg_def[regno].offset != -1)
103		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
104		       dbg_reg_def[regno].size);
105	else
106		memset(mem, 0, dbg_reg_def[regno].size);
107	return dbg_reg_def[regno].name;
108}
109
110int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
111{
112	if (regno >= DBG_MAX_REG_NUM || regno < 0)
113		return -EINVAL;
114
115	if (dbg_reg_def[regno].offset != -1)
116		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
117		       dbg_reg_def[regno].size);
118	return 0;
119}
120
121/*
122 * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
123 * we may not be able to get all the info.
124 */
125void
126sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
127{
128	int reg;
129	struct pt_regs *thread_regs;
130	unsigned long *ptr = gdb_regs;
131
132	if (task == NULL)
133		return;
134
135	/* Initialize to zero. */
136	memset(gdb_regs, 0, NUMREGBYTES);
137
138	thread_regs = task_pt_regs(task);
139	for (reg = 0; reg <= TREG_LAST_GPR; reg++)
140		*(ptr++) = thread_regs->regs[reg];
141
142	gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
143	gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
144}
145
146void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
147{
148	regs->pc = pc;
149}
150
151static void kgdb_call_nmi_hook(void *ignored)
152{
153	kgdb_nmicallback(raw_smp_processor_id(), NULL);
154}
155
156void kgdb_roundup_cpus(unsigned long flags)
157{
158	local_irq_enable();
159	smp_call_function(kgdb_call_nmi_hook, NULL, 0);
160	local_irq_disable();
161}
162
163/*
164 * Convert a kernel address to the writable kernel text mapping.
165 */
166static unsigned long writable_address(unsigned long addr)
167{
168	unsigned long ret = 0;
169
170	if (core_kernel_text(addr))
171		ret = addr - MEM_SV_START + PAGE_OFFSET;
172	else if (is_module_text_address(addr))
173		ret = addr;
174	else
175		pr_err("Unknown virtual address 0x%lx\n", addr);
176
177	return ret;
178}
179
180/*
181 * Calculate the new address for after a step.
182 */
183static unsigned long get_step_address(struct pt_regs *regs)
184{
185	int src_reg;
186	int jump_off;
187	int br_off;
188	unsigned long addr;
189	unsigned int opcode;
190	tile_bundle_bits bundle;
191
192	/* Move to the next instruction by default. */
193	addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
194	bundle = *(unsigned long *)instruction_pointer(regs);
195
196	/* 0: X mode, Otherwise: Y mode. */
197	if (bundle & TILEGX_BUNDLE_MODE_MASK) {
198		if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
199		    get_RRROpcodeExtension_Y1(bundle) ==
200		    UNARY_RRR_1_OPCODE_Y1) {
201			opcode = get_UnaryOpcodeExtension_Y1(bundle);
202
203			switch (opcode) {
204			case JALR_UNARY_OPCODE_Y1:
205			case JALRP_UNARY_OPCODE_Y1:
206			case JR_UNARY_OPCODE_Y1:
207			case JRP_UNARY_OPCODE_Y1:
208				src_reg = get_SrcA_Y1(bundle);
209				dbg_get_reg(src_reg, &addr, regs);
210				break;
211			}
212		}
213	} else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
214		if (get_RRROpcodeExtension_X1(bundle) ==
215		    UNARY_RRR_0_OPCODE_X1) {
216			opcode = get_UnaryOpcodeExtension_X1(bundle);
217
218			switch (opcode) {
219			case JALR_UNARY_OPCODE_X1:
220			case JALRP_UNARY_OPCODE_X1:
221			case JR_UNARY_OPCODE_X1:
222			case JRP_UNARY_OPCODE_X1:
223				src_reg = get_SrcA_X1(bundle);
224				dbg_get_reg(src_reg, &addr, regs);
225				break;
226			}
227		}
228	} else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
229		opcode = get_JumpOpcodeExtension_X1(bundle);
230
231		switch (opcode) {
232		case JAL_JUMP_OPCODE_X1:
233		case J_JUMP_OPCODE_X1:
234			jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
235			addr = regs->pc +
236				(jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
237			break;
238		}
239	} else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
240		br_off = 0;
241		opcode = get_BrType_X1(bundle);
242
243		switch (opcode) {
244		case BEQZT_BRANCH_OPCODE_X1:
245		case BEQZ_BRANCH_OPCODE_X1:
246			if (get_SrcA_X1(bundle) == 0)
247				br_off = get_BrOff_X1(bundle);
248			break;
249		case BGEZT_BRANCH_OPCODE_X1:
250		case BGEZ_BRANCH_OPCODE_X1:
251			if (get_SrcA_X1(bundle) >= 0)
252				br_off = get_BrOff_X1(bundle);
253			break;
254		case BGTZT_BRANCH_OPCODE_X1:
255		case BGTZ_BRANCH_OPCODE_X1:
256			if (get_SrcA_X1(bundle) > 0)
257				br_off = get_BrOff_X1(bundle);
258			break;
259		case BLBCT_BRANCH_OPCODE_X1:
260		case BLBC_BRANCH_OPCODE_X1:
261			if (!(get_SrcA_X1(bundle) & 1))
262				br_off = get_BrOff_X1(bundle);
263			break;
264		case BLBST_BRANCH_OPCODE_X1:
265		case BLBS_BRANCH_OPCODE_X1:
266			if (get_SrcA_X1(bundle) & 1)
267				br_off = get_BrOff_X1(bundle);
268			break;
269		case BLEZT_BRANCH_OPCODE_X1:
270		case BLEZ_BRANCH_OPCODE_X1:
271			if (get_SrcA_X1(bundle) <= 0)
272				br_off = get_BrOff_X1(bundle);
273			break;
274		case BLTZT_BRANCH_OPCODE_X1:
275		case BLTZ_BRANCH_OPCODE_X1:
276			if (get_SrcA_X1(bundle) < 0)
277				br_off = get_BrOff_X1(bundle);
278			break;
279		case BNEZT_BRANCH_OPCODE_X1:
280		case BNEZ_BRANCH_OPCODE_X1:
281			if (get_SrcA_X1(bundle) != 0)
282				br_off = get_BrOff_X1(bundle);
283			break;
284		}
285
286		if (br_off != 0) {
287			br_off = sign_extend(br_off, 17);
288			addr = regs->pc +
289				(br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
290		}
291	}
292
293	return addr;
294}
295
296/*
297 * Replace the next instruction after the current instruction with a
298 * breakpoint instruction.
299 */
300static void do_single_step(struct pt_regs *regs)
301{
302	unsigned long addr_wr;
303
304	/* Determine where the target instruction will send us to. */
305	stepped_addr = get_step_address(regs);
306	probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
307			  BREAK_INSTR_SIZE);
308
309	addr_wr = writable_address(stepped_addr);
310	probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
311			   BREAK_INSTR_SIZE);
312	smp_wmb();
313	flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
314}
315
316static void undo_single_step(struct pt_regs *regs)
317{
318	unsigned long addr_wr;
319
320	if (stepped_instr == 0)
321		return;
322
323	addr_wr = writable_address(stepped_addr);
324	probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
325			   BREAK_INSTR_SIZE);
326	stepped_instr = 0;
327	smp_wmb();
328	flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
329}
330
331/*
332 * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
333 * then try to fall into the debugger.
334 */
335static int
336kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
337{
338	int ret;
339	unsigned long flags;
340	struct die_args *args = (struct die_args *)ptr;
341	struct pt_regs *regs = args->regs;
342
343#ifdef CONFIG_KPROBES
344	/*
345	 * Return immediately if the kprobes fault notifier has set
346	 * DIE_PAGE_FAULT.
347	 */
348	if (cmd == DIE_PAGE_FAULT)
349		return NOTIFY_DONE;
350#endif /* CONFIG_KPROBES */
351
352	switch (cmd) {
353	case DIE_BREAK:
354	case DIE_COMPILED_BPT:
355		break;
356	case DIE_SSTEPBP:
357		local_irq_save(flags);
358		kgdb_handle_exception(0, SIGTRAP, 0, regs);
359		local_irq_restore(flags);
360		return NOTIFY_STOP;
361	default:
362		/* Userspace events, ignore. */
363		if (user_mode(regs))
364			return NOTIFY_DONE;
365	}
366
367	local_irq_save(flags);
368	ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
369	local_irq_restore(flags);
370	if (ret)
371		return NOTIFY_DONE;
372
373	return NOTIFY_STOP;
374}
375
376static struct notifier_block kgdb_notifier = {
377	.notifier_call = kgdb_notify,
378};
379
380/*
381 * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
382 * @vector: The error vector of the exception that happened.
383 * @signo: The signal number of the exception that happened.
384 * @err_code: The error code of the exception that happened.
385 * @remcom_in_buffer: The buffer of the packet we have read.
386 * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
387 * @regs: The &struct pt_regs of the current process.
388 *
389 * This function MUST handle the 'c' and 's' command packets,
390 * as well packets to set / remove a hardware breakpoint, if used.
391 * If there are additional packets which the hardware needs to handle,
392 * they are handled here. The code should return -1 if it wants to
393 * process more packets, and a %0 or %1 if it wants to exit from the
394 * kgdb callback.
395 */
396int kgdb_arch_handle_exception(int vector, int signo, int err_code,
397			       char *remcom_in_buffer, char *remcom_out_buffer,
398			       struct pt_regs *regs)
399{
400	char *ptr;
401	unsigned long address;
402
403	/* Undo any stepping we may have done. */
404	undo_single_step(regs);
405
406	switch (remcom_in_buffer[0]) {
407	case 'c':
408	case 's':
409	case 'D':
410	case 'k':
411		/*
412		 * Try to read optional parameter, pc unchanged if no parm.
413		 * If this was a compiled-in breakpoint, we need to move
414		 * to the next instruction or we will just breakpoint
415		 * over and over again.
416		 */
417		ptr = &remcom_in_buffer[1];
418		if (kgdb_hex2long(&ptr, &address))
419			regs->pc = address;
420		else if (*(unsigned long *)regs->pc == compiled_bpt)
421			regs->pc += BREAK_INSTR_SIZE;
422
423		if (remcom_in_buffer[0] == 's') {
424			do_single_step(regs);
425			kgdb_single_step = 1;
426			atomic_set(&kgdb_cpu_doing_single_step,
427				   raw_smp_processor_id());
428		} else
429			atomic_set(&kgdb_cpu_doing_single_step, -1);
430
431		return 0;
432	}
433
434	return -1; /* this means that we do not want to exit from the handler */
435}
436
437struct kgdb_arch arch_kgdb_ops;
438
439/*
440 * kgdb_arch_init - Perform any architecture specific initalization.
441 *
442 * This function will handle the initalization of any architecture
443 * specific callbacks.
444 */
445int kgdb_arch_init(void)
446{
447	tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
448
449	memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
450	return register_die_notifier(&kgdb_notifier);
451}
452
453/*
454 * kgdb_arch_exit - Perform any architecture specific uninitalization.
455 *
456 * This function will handle the uninitalization of any architecture
457 * specific callbacks, for dynamic registration and unregistration.
458 */
459void kgdb_arch_exit(void)
460{
461	unregister_die_notifier(&kgdb_notifier);
462}
463
464int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
465{
466	int err;
467	unsigned long addr_wr = writable_address(bpt->bpt_addr);
468
469	if (addr_wr == 0)
470		return -1;
471
472	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
473				BREAK_INSTR_SIZE);
474	if (err)
475		return err;
476
477	err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
478				 BREAK_INSTR_SIZE);
479	smp_wmb();
480	flush_icache_range((unsigned long)bpt->bpt_addr,
481			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
482	return err;
483}
484
485int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
486{
487	int err;
488	unsigned long addr_wr = writable_address(bpt->bpt_addr);
489
490	if (addr_wr == 0)
491		return -1;
492
493	err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
494				 BREAK_INSTR_SIZE);
495	smp_wmb();
496	flush_icache_range((unsigned long)bpt->bpt_addr,
497			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
498	return err;
499}
500