1/*
2 *  Kernel Probes Jump Optimization (Optprobes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2004
19 * Copyright (C) Hitachi Ltd., 2012
20 */
21#include <linux/kprobes.h>
22#include <linux/ptrace.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/hardirq.h>
26#include <linux/preempt.h>
27#include <linux/module.h>
28#include <linux/kdebug.h>
29#include <linux/kallsyms.h>
30#include <linux/ftrace.h>
31
32#include <asm/cacheflush.h>
33#include <asm/desc.h>
34#include <asm/pgtable.h>
35#include <asm/uaccess.h>
36#include <asm/alternative.h>
37#include <asm/insn.h>
38#include <asm/debugreg.h>
39
40#include "common.h"
41
42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
43{
44	struct optimized_kprobe *op;
45	struct kprobe *kp;
46	long offs;
47	int i;
48
49	for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
50		kp = get_kprobe((void *)addr - i);
51		/* This function only handles jump-optimized kprobe */
52		if (kp && kprobe_optimized(kp)) {
53			op = container_of(kp, struct optimized_kprobe, kp);
54			/* If op->list is not empty, op is under optimizing */
55			if (list_empty(&op->list))
56				goto found;
57		}
58	}
59
60	return addr;
61found:
62	/*
63	 * If the kprobe can be optimized, original bytes which can be
64	 * overwritten by jump destination address. In this case, original
65	 * bytes must be recovered from op->optinsn.copied_insn buffer.
66	 */
67	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68	if (addr == (unsigned long)kp->addr) {
69		buf[0] = kp->opcode;
70		memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
71	} else {
72		offs = addr - (unsigned long)kp->addr - 1;
73		memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
74	}
75
76	return (unsigned long)buf;
77}
78
79/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
80static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
81{
82#ifdef CONFIG_X86_64
83	*addr++ = 0x48;
84	*addr++ = 0xbf;
85#else
86	*addr++ = 0xb8;
87#endif
88	*(unsigned long *)addr = val;
89}
90
91asm (
92			".global optprobe_template_entry\n"
93			"optprobe_template_entry:\n"
94#ifdef CONFIG_X86_64
95			/* We don't bother saving the ss register */
96			"	pushq %rsp\n"
97			"	pushfq\n"
98			SAVE_REGS_STRING
99			"	movq %rsp, %rsi\n"
100			".global optprobe_template_val\n"
101			"optprobe_template_val:\n"
102			ASM_NOP5
103			ASM_NOP5
104			".global optprobe_template_call\n"
105			"optprobe_template_call:\n"
106			ASM_NOP5
107			/* Move flags to rsp */
108			"	movq 144(%rsp), %rdx\n"
109			"	movq %rdx, 152(%rsp)\n"
110			RESTORE_REGS_STRING
111			/* Skip flags entry */
112			"	addq $8, %rsp\n"
113			"	popfq\n"
114#else /* CONFIG_X86_32 */
115			"	pushf\n"
116			SAVE_REGS_STRING
117			"	movl %esp, %edx\n"
118			".global optprobe_template_val\n"
119			"optprobe_template_val:\n"
120			ASM_NOP5
121			".global optprobe_template_call\n"
122			"optprobe_template_call:\n"
123			ASM_NOP5
124			RESTORE_REGS_STRING
125			"	addl $4, %esp\n"	/* skip cs */
126			"	popf\n"
127#endif
128			".global optprobe_template_end\n"
129			"optprobe_template_end:\n");
130
131#define TMPL_MOVE_IDX \
132	((long)&optprobe_template_val - (long)&optprobe_template_entry)
133#define TMPL_CALL_IDX \
134	((long)&optprobe_template_call - (long)&optprobe_template_entry)
135#define TMPL_END_IDX \
136	((long)&optprobe_template_end - (long)&optprobe_template_entry)
137
138#define INT3_SIZE sizeof(kprobe_opcode_t)
139
140/* Optimized kprobe call back function: called from optinsn */
141static void
142optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
143{
144	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
145	unsigned long flags;
146
147	/* This is possible if op is under delayed unoptimizing */
148	if (kprobe_disabled(&op->kp))
149		return;
150
151	local_irq_save(flags);
152	if (kprobe_running()) {
153		kprobes_inc_nmissed_count(&op->kp);
154	} else {
155		/* Save skipped registers */
156#ifdef CONFIG_X86_64
157		regs->cs = __KERNEL_CS;
158#else
159		regs->cs = __KERNEL_CS | get_kernel_rpl();
160		regs->gs = 0;
161#endif
162		regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
163		regs->orig_ax = ~0UL;
164
165		__this_cpu_write(current_kprobe, &op->kp);
166		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
167		opt_pre_handler(&op->kp, regs);
168		__this_cpu_write(current_kprobe, NULL);
169	}
170	local_irq_restore(flags);
171}
172NOKPROBE_SYMBOL(optimized_callback);
173
174static int copy_optimized_instructions(u8 *dest, u8 *src)
175{
176	int len = 0, ret;
177
178	while (len < RELATIVEJUMP_SIZE) {
179		ret = __copy_instruction(dest + len, src + len);
180		if (!ret || !can_boost(dest + len))
181			return -EINVAL;
182		len += ret;
183	}
184	/* Check whether the address range is reserved */
185	if (ftrace_text_reserved(src, src + len - 1) ||
186	    alternatives_text_reserved(src, src + len - 1) ||
187	    jump_label_text_reserved(src, src + len - 1))
188		return -EBUSY;
189
190	return len;
191}
192
193/* Check whether insn is indirect jump */
194static int insn_is_indirect_jump(struct insn *insn)
195{
196	return ((insn->opcode.bytes[0] == 0xff &&
197		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
198		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
199}
200
201/* Check whether insn jumps into specified address range */
202static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
203{
204	unsigned long target = 0;
205
206	switch (insn->opcode.bytes[0]) {
207	case 0xe0:	/* loopne */
208	case 0xe1:	/* loope */
209	case 0xe2:	/* loop */
210	case 0xe3:	/* jcxz */
211	case 0xe9:	/* near relative jump */
212	case 0xeb:	/* short relative jump */
213		break;
214	case 0x0f:
215		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
216			break;
217		return 0;
218	default:
219		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
220			break;
221		return 0;
222	}
223	target = (unsigned long)insn->next_byte + insn->immediate.value;
224
225	return (start <= target && target <= start + len);
226}
227
228/* Decode whole function to ensure any instructions don't jump into target */
229static int can_optimize(unsigned long paddr)
230{
231	unsigned long addr, size = 0, offset = 0;
232	struct insn insn;
233	kprobe_opcode_t buf[MAX_INSN_SIZE];
234
235	/* Lookup symbol including addr */
236	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
237		return 0;
238
239	/*
240	 * Do not optimize in the entry code due to the unstable
241	 * stack handling.
242	 */
243	if ((paddr >= (unsigned long)__entry_text_start) &&
244	    (paddr <  (unsigned long)__entry_text_end))
245		return 0;
246
247	/* Check there is enough space for a relative jump. */
248	if (size - offset < RELATIVEJUMP_SIZE)
249		return 0;
250
251	/* Decode instructions */
252	addr = paddr - offset;
253	while (addr < paddr - offset + size) { /* Decode until function end */
254		if (search_exception_tables(addr))
255			/*
256			 * Since some fixup code will jumps into this function,
257			 * we can't optimize kprobe in this function.
258			 */
259			return 0;
260		kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
261		insn_get_length(&insn);
262		/* Another subsystem puts a breakpoint */
263		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
264			return 0;
265		/* Recover address */
266		insn.kaddr = (void *)addr;
267		insn.next_byte = (void *)(addr + insn.length);
268		/* Check any instructions don't jump into target */
269		if (insn_is_indirect_jump(&insn) ||
270		    insn_jump_into_range(&insn, paddr + INT3_SIZE,
271					 RELATIVE_ADDR_SIZE))
272			return 0;
273		addr += insn.length;
274	}
275
276	return 1;
277}
278
279/* Check optimized_kprobe can actually be optimized. */
280int arch_check_optimized_kprobe(struct optimized_kprobe *op)
281{
282	int i;
283	struct kprobe *p;
284
285	for (i = 1; i < op->optinsn.size; i++) {
286		p = get_kprobe(op->kp.addr + i);
287		if (p && !kprobe_disabled(p))
288			return -EEXIST;
289	}
290
291	return 0;
292}
293
294/* Check the addr is within the optimized instructions. */
295int arch_within_optimized_kprobe(struct optimized_kprobe *op,
296				 unsigned long addr)
297{
298	return ((unsigned long)op->kp.addr <= addr &&
299		(unsigned long)op->kp.addr + op->optinsn.size > addr);
300}
301
302/* Free optimized instruction slot */
303static
304void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
305{
306	if (op->optinsn.insn) {
307		free_optinsn_slot(op->optinsn.insn, dirty);
308		op->optinsn.insn = NULL;
309		op->optinsn.size = 0;
310	}
311}
312
313void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
314{
315	__arch_remove_optimized_kprobe(op, 1);
316}
317
318/*
319 * Copy replacing target instructions
320 * Target instructions MUST be relocatable (checked inside)
321 * This is called when new aggr(opt)probe is allocated or reused.
322 */
323int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
324{
325	u8 *buf;
326	int ret;
327	long rel;
328
329	if (!can_optimize((unsigned long)op->kp.addr))
330		return -EILSEQ;
331
332	op->optinsn.insn = get_optinsn_slot();
333	if (!op->optinsn.insn)
334		return -ENOMEM;
335
336	/*
337	 * Verify if the address gap is in 2GB range, because this uses
338	 * a relative jump.
339	 */
340	rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
341	if (abs(rel) > 0x7fffffff) {
342		__arch_remove_optimized_kprobe(op, 0);
343		return -ERANGE;
344	}
345
346	buf = (u8 *)op->optinsn.insn;
347
348	/* Copy instructions into the out-of-line buffer */
349	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
350	if (ret < 0) {
351		__arch_remove_optimized_kprobe(op, 0);
352		return ret;
353	}
354	op->optinsn.size = ret;
355
356	/* Copy arch-dep-instance from template */
357	memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
358
359	/* Set probe information */
360	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
361
362	/* Set probe function call */
363	synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
364
365	/* Set returning jmp instruction at the tail of out-of-line buffer */
366	synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
367			   (u8 *)op->kp.addr + op->optinsn.size);
368
369	flush_icache_range((unsigned long) buf,
370			   (unsigned long) buf + TMPL_END_IDX +
371			   op->optinsn.size + RELATIVEJUMP_SIZE);
372	return 0;
373}
374
375/*
376 * Replace breakpoints (int3) with relative jumps.
377 * Caller must call with locking kprobe_mutex and text_mutex.
378 */
379void arch_optimize_kprobes(struct list_head *oplist)
380{
381	struct optimized_kprobe *op, *tmp;
382	u8 insn_buf[RELATIVEJUMP_SIZE];
383
384	list_for_each_entry_safe(op, tmp, oplist, list) {
385		s32 rel = (s32)((long)op->optinsn.insn -
386			((long)op->kp.addr + RELATIVEJUMP_SIZE));
387
388		WARN_ON(kprobe_disabled(&op->kp));
389
390		/* Backup instructions which will be replaced by jump address */
391		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
392		       RELATIVE_ADDR_SIZE);
393
394		insn_buf[0] = RELATIVEJUMP_OPCODE;
395		*(s32 *)(&insn_buf[1]) = rel;
396
397		text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
398			     op->optinsn.insn);
399
400		list_del_init(&op->list);
401	}
402}
403
404/* Replace a relative jump with a breakpoint (int3).  */
405void arch_unoptimize_kprobe(struct optimized_kprobe *op)
406{
407	u8 insn_buf[RELATIVEJUMP_SIZE];
408
409	/* Set int3 to first byte for kprobes */
410	insn_buf[0] = BREAKPOINT_INSTRUCTION;
411	memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
412	text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
413		     op->optinsn.insn);
414}
415
416/*
417 * Recover original instructions and breakpoints from relative jumps.
418 * Caller must call with locking kprobe_mutex.
419 */
420extern void arch_unoptimize_kprobes(struct list_head *oplist,
421				    struct list_head *done_list)
422{
423	struct optimized_kprobe *op, *tmp;
424
425	list_for_each_entry_safe(op, tmp, oplist, list) {
426		arch_unoptimize_kprobe(op);
427		list_move(&op->list, done_list);
428	}
429}
430
431int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
432{
433	struct optimized_kprobe *op;
434
435	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
436		/* This kprobe is really able to run optimized path. */
437		op = container_of(p, struct optimized_kprobe, kp);
438		/* Detour through copied instructions */
439		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
440		if (!reenter)
441			reset_current_kprobe();
442		preempt_enable_no_resched();
443		return 1;
444	}
445	return 0;
446}
447NOKPROBE_SYMBOL(setup_detour_execution);
448