kprobes.c revision 64f562c6df3cfc5d1b2b4bdbcb7951457df9c237
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 */
31#include <linux/kprobes.h>
32#include <linux/spinlock.h>
33#include <linux/hash.h>
34#include <linux/init.h>
35#include <linux/module.h>
36#include <asm/cacheflush.h>
37#include <asm/errno.h>
38#include <asm/kdebug.h>
39
40#define KPROBE_HASH_BITS 6
41#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
42
43static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
44
45unsigned int kprobe_cpu = NR_CPUS;
46static DEFINE_SPINLOCK(kprobe_lock);
47static struct kprobe *curr_kprobe;
48
49/* Locks kprobe: irqs must be disabled */
50void lock_kprobes(void)
51{
52	spin_lock(&kprobe_lock);
53	kprobe_cpu = smp_processor_id();
54}
55
56void unlock_kprobes(void)
57{
58	kprobe_cpu = NR_CPUS;
59	spin_unlock(&kprobe_lock);
60}
61
62/* You have to be holding the kprobe_lock */
63struct kprobe *get_kprobe(void *addr)
64{
65	struct hlist_head *head;
66	struct hlist_node *node;
67
68	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
69	hlist_for_each(node, head) {
70		struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
71		if (p->addr == addr)
72			return p;
73	}
74	return NULL;
75}
76
77/*
78 * Aggregate handlers for multiple kprobes support - these handlers
79 * take care of invoking the individual kprobe handlers on p->list
80 */
81int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
82{
83	struct kprobe *kp;
84
85	list_for_each_entry(kp, &p->list, list) {
86		if (kp->pre_handler) {
87			curr_kprobe = kp;
88			kp->pre_handler(kp, regs);
89			curr_kprobe = NULL;
90		}
91	}
92	return 0;
93}
94
95void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
96		unsigned long flags)
97{
98	struct kprobe *kp;
99
100	list_for_each_entry(kp, &p->list, list) {
101		if (kp->post_handler) {
102			curr_kprobe = kp;
103			kp->post_handler(kp, regs, flags);
104			curr_kprobe = NULL;
105		}
106	}
107	return;
108}
109
110int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
111{
112	/*
113	 * if we faulted "during" the execution of a user specified
114	 * probe handler, invoke just that probe's fault handler
115	 */
116	if (curr_kprobe && curr_kprobe->fault_handler) {
117		if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
118			return 1;
119	}
120	return 0;
121}
122
123/*
124 * Fill in the required fields of the "manager kprobe". Replace the
125 * earlier kprobe in the hlist with the manager kprobe
126 */
127static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
128{
129	ap->addr = p->addr;
130	ap->opcode = p->opcode;
131	memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn));
132
133	ap->pre_handler = aggr_pre_handler;
134	ap->post_handler = aggr_post_handler;
135	ap->fault_handler = aggr_fault_handler;
136
137	INIT_LIST_HEAD(&ap->list);
138	list_add(&p->list, &ap->list);
139
140	INIT_HLIST_NODE(&ap->hlist);
141	hlist_del(&p->hlist);
142	hlist_add_head(&ap->hlist,
143		&kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
144}
145
146/*
147 * This is the second or subsequent kprobe at the address - handle
148 * the intricacies
149 * TODO: Move kcalloc outside the spinlock
150 */
151static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
152{
153	int ret = 0;
154	struct kprobe *ap;
155
156	if (old_p->break_handler || p->break_handler) {
157		ret = -EEXIST;	/* kprobe and jprobe can't (yet) coexist */
158	} else if (old_p->pre_handler == aggr_pre_handler) {
159		list_add(&p->list, &old_p->list);
160	} else {
161		ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
162		if (!ap)
163			return -ENOMEM;
164		add_aggr_kprobe(ap, old_p);
165		list_add(&p->list, &ap->list);
166	}
167	return ret;
168}
169
170/* kprobe removal house-keeping routines */
171static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
172{
173	*p->addr = p->opcode;
174	hlist_del(&p->hlist);
175	flush_icache_range((unsigned long) p->addr,
176		   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
177	spin_unlock_irqrestore(&kprobe_lock, flags);
178	arch_remove_kprobe(p);
179}
180
181static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
182		struct kprobe *p, unsigned long flags)
183{
184	list_del(&p->list);
185	if (list_empty(&old_p->list)) {
186		cleanup_kprobe(old_p, flags);
187		kfree(old_p);
188	} else
189		spin_unlock_irqrestore(&kprobe_lock, flags);
190}
191
192int register_kprobe(struct kprobe *p)
193{
194	int ret = 0;
195	unsigned long flags = 0;
196	struct kprobe *old_p;
197
198	if ((ret = arch_prepare_kprobe(p)) != 0) {
199		goto rm_kprobe;
200	}
201	spin_lock_irqsave(&kprobe_lock, flags);
202	old_p = get_kprobe(p->addr);
203	if (old_p) {
204		ret = register_aggr_kprobe(old_p, p);
205		goto out;
206	}
207
208	arch_copy_kprobe(p);
209	INIT_HLIST_NODE(&p->hlist);
210	hlist_add_head(&p->hlist,
211		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
212
213	p->opcode = *p->addr;
214	*p->addr = BREAKPOINT_INSTRUCTION;
215	flush_icache_range((unsigned long) p->addr,
216			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
217out:
218	spin_unlock_irqrestore(&kprobe_lock, flags);
219rm_kprobe:
220	if (ret == -EEXIST)
221		arch_remove_kprobe(p);
222	return ret;
223}
224
225void unregister_kprobe(struct kprobe *p)
226{
227	unsigned long flags;
228	struct kprobe *old_p;
229
230	spin_lock_irqsave(&kprobe_lock, flags);
231	old_p = get_kprobe(p->addr);
232	if (old_p) {
233		if (old_p->pre_handler == aggr_pre_handler)
234			cleanup_aggr_kprobe(old_p, p, flags);
235		else
236			cleanup_kprobe(p, flags);
237	} else
238		spin_unlock_irqrestore(&kprobe_lock, flags);
239}
240
241static struct notifier_block kprobe_exceptions_nb = {
242	.notifier_call = kprobe_exceptions_notify,
243	.priority = 0x7fffffff /* we need to notified first */
244};
245
246int register_jprobe(struct jprobe *jp)
247{
248	/* Todo: Verify probepoint is a function entry point */
249	jp->kp.pre_handler = setjmp_pre_handler;
250	jp->kp.break_handler = longjmp_break_handler;
251
252	return register_kprobe(&jp->kp);
253}
254
255void unregister_jprobe(struct jprobe *jp)
256{
257	unregister_kprobe(&jp->kp);
258}
259
260static int __init init_kprobes(void)
261{
262	int i, err = 0;
263
264	/* FIXME allocate the probe table, currently defined statically */
265	/* initialize all list heads */
266	for (i = 0; i < KPROBE_TABLE_SIZE; i++)
267		INIT_HLIST_HEAD(&kprobe_table[i]);
268
269	err = register_die_notifier(&kprobe_exceptions_nb);
270	return err;
271}
272
273__initcall(init_kprobes);
274
275EXPORT_SYMBOL_GPL(register_kprobe);
276EXPORT_SYMBOL_GPL(unregister_kprobe);
277EXPORT_SYMBOL_GPL(register_jprobe);
278EXPORT_SYMBOL_GPL(unregister_jprobe);
279EXPORT_SYMBOL_GPL(jprobe_return);
280