kprobes.c revision 99219a3fbc2dcf2eaa954f7b2ac27299fd7894cd
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/module.h>
39#include <linux/moduleloader.h>
40#include <linux/kallsyms.h>
41#include <asm-generic/sections.h>
42#include <asm/cacheflush.h>
43#include <asm/errno.h>
44#include <asm/kdebug.h>
45
46#define KPROBE_HASH_BITS 6
47#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
48
49
50/*
51 * Some oddball architectures like 64bit powerpc have function descriptors
52 * so this must be overridable.
53 */
54#ifndef kprobe_lookup_name
55#define kprobe_lookup_name(name, addr) \
56	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
57#endif
58
59static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
60static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
61static atomic_t kprobe_count;
62
63DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
64DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
65static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
66
67static struct notifier_block kprobe_page_fault_nb = {
68	.notifier_call = kprobe_exceptions_notify,
69	.priority = 0x7fffffff /* we need to notified first */
70};
71
72#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
73/*
74 * kprobe->ainsn.insn points to the copy of the instruction to be
75 * single-stepped. x86_64, POWER4 and above have no-exec support and
76 * stepping on the instruction on a vmalloced/kmalloced/data page
77 * is a recipe for disaster
78 */
79#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
80
81struct kprobe_insn_page {
82	struct hlist_node hlist;
83	kprobe_opcode_t *insns;		/* Page of instruction slots */
84	char slot_used[INSNS_PER_PAGE];
85	int nused;
86};
87
88static struct hlist_head kprobe_insn_pages;
89
90/**
91 * get_insn_slot() - Find a slot on an executable page for an instruction.
92 * We allocate an executable page if there's no room on existing ones.
93 */
94kprobe_opcode_t __kprobes *get_insn_slot(void)
95{
96	struct kprobe_insn_page *kip;
97	struct hlist_node *pos;
98
99	hlist_for_each(pos, &kprobe_insn_pages) {
100		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
101		if (kip->nused < INSNS_PER_PAGE) {
102			int i;
103			for (i = 0; i < INSNS_PER_PAGE; i++) {
104				if (!kip->slot_used[i]) {
105					kip->slot_used[i] = 1;
106					kip->nused++;
107					return kip->insns + (i * MAX_INSN_SIZE);
108				}
109			}
110			/* Surprise!  No unused slots.  Fix kip->nused. */
111			kip->nused = INSNS_PER_PAGE;
112		}
113	}
114
115	/* All out of space.  Need to allocate a new page. Use slot 0.*/
116	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
117	if (!kip) {
118		return NULL;
119	}
120
121	/*
122	 * Use module_alloc so this page is within +/- 2GB of where the
123	 * kernel image and loaded module images reside. This is required
124	 * so x86_64 can correctly handle the %rip-relative fixups.
125	 */
126	kip->insns = module_alloc(PAGE_SIZE);
127	if (!kip->insns) {
128		kfree(kip);
129		return NULL;
130	}
131	INIT_HLIST_NODE(&kip->hlist);
132	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
133	memset(kip->slot_used, 0, INSNS_PER_PAGE);
134	kip->slot_used[0] = 1;
135	kip->nused = 1;
136	return kip->insns;
137}
138
139void __kprobes free_insn_slot(kprobe_opcode_t *slot)
140{
141	struct kprobe_insn_page *kip;
142	struct hlist_node *pos;
143
144	hlist_for_each(pos, &kprobe_insn_pages) {
145		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
146		if (kip->insns <= slot &&
147		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
148			int i = (slot - kip->insns) / MAX_INSN_SIZE;
149			kip->slot_used[i] = 0;
150			kip->nused--;
151			if (kip->nused == 0) {
152				/*
153				 * Page is no longer in use.  Free it unless
154				 * it's the last one.  We keep the last one
155				 * so as not to have to set it up again the
156				 * next time somebody inserts a probe.
157				 */
158				hlist_del(&kip->hlist);
159				if (hlist_empty(&kprobe_insn_pages)) {
160					INIT_HLIST_NODE(&kip->hlist);
161					hlist_add_head(&kip->hlist,
162						&kprobe_insn_pages);
163				} else {
164					module_free(NULL, kip->insns);
165					kfree(kip);
166				}
167			}
168			return;
169		}
170	}
171}
172#endif
173
174/* We have preemption disabled.. so it is safe to use __ versions */
175static inline void set_kprobe_instance(struct kprobe *kp)
176{
177	__get_cpu_var(kprobe_instance) = kp;
178}
179
180static inline void reset_kprobe_instance(void)
181{
182	__get_cpu_var(kprobe_instance) = NULL;
183}
184
185/*
186 * This routine is called either:
187 * 	- under the kprobe_mutex - during kprobe_[un]register()
188 * 				OR
189 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
190 */
191struct kprobe __kprobes *get_kprobe(void *addr)
192{
193	struct hlist_head *head;
194	struct hlist_node *node;
195	struct kprobe *p;
196
197	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
198	hlist_for_each_entry_rcu(p, node, head, hlist) {
199		if (p->addr == addr)
200			return p;
201	}
202	return NULL;
203}
204
205/*
206 * Aggregate handlers for multiple kprobes support - these handlers
207 * take care of invoking the individual kprobe handlers on p->list
208 */
209static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
210{
211	struct kprobe *kp;
212
213	list_for_each_entry_rcu(kp, &p->list, list) {
214		if (kp->pre_handler) {
215			set_kprobe_instance(kp);
216			if (kp->pre_handler(kp, regs))
217				return 1;
218		}
219		reset_kprobe_instance();
220	}
221	return 0;
222}
223
224static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
225					unsigned long flags)
226{
227	struct kprobe *kp;
228
229	list_for_each_entry_rcu(kp, &p->list, list) {
230		if (kp->post_handler) {
231			set_kprobe_instance(kp);
232			kp->post_handler(kp, regs, flags);
233			reset_kprobe_instance();
234		}
235	}
236	return;
237}
238
239static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
240					int trapnr)
241{
242	struct kprobe *cur = __get_cpu_var(kprobe_instance);
243
244	/*
245	 * if we faulted "during" the execution of a user specified
246	 * probe handler, invoke just that probe's fault handler
247	 */
248	if (cur && cur->fault_handler) {
249		if (cur->fault_handler(cur, regs, trapnr))
250			return 1;
251	}
252	return 0;
253}
254
255static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
256{
257	struct kprobe *cur = __get_cpu_var(kprobe_instance);
258	int ret = 0;
259
260	if (cur && cur->break_handler) {
261		if (cur->break_handler(cur, regs))
262			ret = 1;
263	}
264	reset_kprobe_instance();
265	return ret;
266}
267
268/* Walks the list and increments nmissed count for multiprobe case */
269void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
270{
271	struct kprobe *kp;
272	if (p->pre_handler != aggr_pre_handler) {
273		p->nmissed++;
274	} else {
275		list_for_each_entry_rcu(kp, &p->list, list)
276			kp->nmissed++;
277	}
278	return;
279}
280
281/* Called with kretprobe_lock held */
282struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
283{
284	struct hlist_node *node;
285	struct kretprobe_instance *ri;
286	hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
287		return ri;
288	return NULL;
289}
290
291/* Called with kretprobe_lock held */
292static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
293							      *rp)
294{
295	struct hlist_node *node;
296	struct kretprobe_instance *ri;
297	hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
298		return ri;
299	return NULL;
300}
301
302/* Called with kretprobe_lock held */
303void __kprobes add_rp_inst(struct kretprobe_instance *ri)
304{
305	/*
306	 * Remove rp inst off the free list -
307	 * Add it back when probed function returns
308	 */
309	hlist_del(&ri->uflist);
310
311	/* Add rp inst onto table */
312	INIT_HLIST_NODE(&ri->hlist);
313	hlist_add_head(&ri->hlist,
314			&kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
315
316	/* Also add this rp inst to the used list. */
317	INIT_HLIST_NODE(&ri->uflist);
318	hlist_add_head(&ri->uflist, &ri->rp->used_instances);
319}
320
321/* Called with kretprobe_lock held */
322void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
323				struct hlist_head *head)
324{
325	/* remove rp inst off the rprobe_inst_table */
326	hlist_del(&ri->hlist);
327	if (ri->rp) {
328		/* remove rp inst off the used list */
329		hlist_del(&ri->uflist);
330		/* put rp inst back onto the free list */
331		INIT_HLIST_NODE(&ri->uflist);
332		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
333	} else
334		/* Unregistering */
335		hlist_add_head(&ri->hlist, head);
336}
337
338struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
339{
340	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
341}
342
343/*
344 * This function is called from finish_task_switch when task tk becomes dead,
345 * so that we can recycle any function-return probe instances associated
346 * with this task. These left over instances represent probed functions
347 * that have been called but will never return.
348 */
349void __kprobes kprobe_flush_task(struct task_struct *tk)
350{
351	struct kretprobe_instance *ri;
352	struct hlist_head *head, empty_rp;
353	struct hlist_node *node, *tmp;
354	unsigned long flags = 0;
355
356	INIT_HLIST_HEAD(&empty_rp);
357	spin_lock_irqsave(&kretprobe_lock, flags);
358	head = kretprobe_inst_table_head(tk);
359	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
360		if (ri->task == tk)
361			recycle_rp_inst(ri, &empty_rp);
362	}
363	spin_unlock_irqrestore(&kretprobe_lock, flags);
364
365	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
366		hlist_del(&ri->hlist);
367		kfree(ri);
368	}
369}
370
371static inline void free_rp_inst(struct kretprobe *rp)
372{
373	struct kretprobe_instance *ri;
374	while ((ri = get_free_rp_inst(rp)) != NULL) {
375		hlist_del(&ri->uflist);
376		kfree(ri);
377	}
378}
379
380/*
381 * Keep all fields in the kprobe consistent
382 */
383static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
384{
385	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
386	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
387}
388
389/*
390* Add the new probe to old_p->list. Fail if this is the
391* second jprobe at the address - two jprobes can't coexist
392*/
393static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
394{
395	if (p->break_handler) {
396		if (old_p->break_handler)
397			return -EEXIST;
398		list_add_tail_rcu(&p->list, &old_p->list);
399		old_p->break_handler = aggr_break_handler;
400	} else
401		list_add_rcu(&p->list, &old_p->list);
402	if (p->post_handler && !old_p->post_handler)
403		old_p->post_handler = aggr_post_handler;
404	return 0;
405}
406
407/*
408 * Fill in the required fields of the "manager kprobe". Replace the
409 * earlier kprobe in the hlist with the manager kprobe
410 */
411static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
412{
413	copy_kprobe(p, ap);
414	flush_insn_slot(ap);
415	ap->addr = p->addr;
416	ap->pre_handler = aggr_pre_handler;
417	ap->fault_handler = aggr_fault_handler;
418	if (p->post_handler)
419		ap->post_handler = aggr_post_handler;
420	if (p->break_handler)
421		ap->break_handler = aggr_break_handler;
422
423	INIT_LIST_HEAD(&ap->list);
424	list_add_rcu(&p->list, &ap->list);
425
426	hlist_replace_rcu(&p->hlist, &ap->hlist);
427}
428
429/*
430 * This is the second or subsequent kprobe at the address - handle
431 * the intricacies
432 */
433static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
434					  struct kprobe *p)
435{
436	int ret = 0;
437	struct kprobe *ap;
438
439	if (old_p->pre_handler == aggr_pre_handler) {
440		copy_kprobe(old_p, p);
441		ret = add_new_kprobe(old_p, p);
442	} else {
443		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
444		if (!ap)
445			return -ENOMEM;
446		add_aggr_kprobe(ap, old_p);
447		copy_kprobe(ap, p);
448		ret = add_new_kprobe(ap, p);
449	}
450	return ret;
451}
452
453static int __kprobes in_kprobes_functions(unsigned long addr)
454{
455	if (addr >= (unsigned long)__kprobes_text_start
456		&& addr < (unsigned long)__kprobes_text_end)
457		return -EINVAL;
458	return 0;
459}
460
461static int __kprobes __register_kprobe(struct kprobe *p,
462	unsigned long called_from)
463{
464	int ret = 0;
465	struct kprobe *old_p;
466	struct module *probed_mod;
467
468	/*
469	 * If we have a symbol_name argument look it up,
470	 * and add it to the address.  That way the addr
471	 * field can either be global or relative to a symbol.
472	 */
473	if (p->symbol_name) {
474		if (p->addr)
475			return -EINVAL;
476		kprobe_lookup_name(p->symbol_name, p->addr);
477	}
478
479	if (!p->addr)
480		return -EINVAL;
481	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
482
483	if ((!kernel_text_address((unsigned long) p->addr)) ||
484		in_kprobes_functions((unsigned long) p->addr))
485		return -EINVAL;
486
487	p->mod_refcounted = 0;
488	/* Check are we probing a module */
489	if ((probed_mod = module_text_address((unsigned long) p->addr))) {
490		struct module *calling_mod = module_text_address(called_from);
491		/* We must allow modules to probe themself and
492		 * in this case avoid incrementing the module refcount,
493		 * so as to allow unloading of self probing modules.
494		 */
495		if (calling_mod && (calling_mod != probed_mod)) {
496			if (unlikely(!try_module_get(probed_mod)))
497				return -EINVAL;
498			p->mod_refcounted = 1;
499		} else
500			probed_mod = NULL;
501	}
502
503	p->nmissed = 0;
504	mutex_lock(&kprobe_mutex);
505	old_p = get_kprobe(p->addr);
506	if (old_p) {
507		ret = register_aggr_kprobe(old_p, p);
508		if (!ret)
509			atomic_inc(&kprobe_count);
510		goto out;
511	}
512
513	if ((ret = arch_prepare_kprobe(p)) != 0)
514		goto out;
515
516	INIT_HLIST_NODE(&p->hlist);
517	hlist_add_head_rcu(&p->hlist,
518		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
519
520	if (atomic_add_return(1, &kprobe_count) == \
521				(ARCH_INACTIVE_KPROBE_COUNT + 1))
522		register_page_fault_notifier(&kprobe_page_fault_nb);
523
524	arch_arm_kprobe(p);
525
526out:
527	mutex_unlock(&kprobe_mutex);
528
529	if (ret && probed_mod)
530		module_put(probed_mod);
531	return ret;
532}
533
534int __kprobes register_kprobe(struct kprobe *p)
535{
536	return __register_kprobe(p,
537		(unsigned long)__builtin_return_address(0));
538}
539
540void __kprobes unregister_kprobe(struct kprobe *p)
541{
542	struct module *mod;
543	struct kprobe *old_p, *list_p;
544	int cleanup_p;
545
546	mutex_lock(&kprobe_mutex);
547	old_p = get_kprobe(p->addr);
548	if (unlikely(!old_p)) {
549		mutex_unlock(&kprobe_mutex);
550		return;
551	}
552	if (p != old_p) {
553		list_for_each_entry_rcu(list_p, &old_p->list, list)
554			if (list_p == p)
555			/* kprobe p is a valid probe */
556				goto valid_p;
557		mutex_unlock(&kprobe_mutex);
558		return;
559	}
560valid_p:
561	if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
562		(p->list.next == &old_p->list) &&
563		(p->list.prev == &old_p->list))) {
564		/* Only probe on the hash list */
565		arch_disarm_kprobe(p);
566		hlist_del_rcu(&old_p->hlist);
567		cleanup_p = 1;
568	} else {
569		list_del_rcu(&p->list);
570		cleanup_p = 0;
571	}
572
573	mutex_unlock(&kprobe_mutex);
574
575	synchronize_sched();
576	if (p->mod_refcounted &&
577	    (mod = module_text_address((unsigned long)p->addr)))
578		module_put(mod);
579
580	if (cleanup_p) {
581		if (p != old_p) {
582			list_del_rcu(&p->list);
583			kfree(old_p);
584		}
585		arch_remove_kprobe(p);
586	} else {
587		mutex_lock(&kprobe_mutex);
588		if (p->break_handler)
589			old_p->break_handler = NULL;
590		if (p->post_handler){
591			list_for_each_entry_rcu(list_p, &old_p->list, list){
592				if (list_p->post_handler){
593					cleanup_p = 2;
594					break;
595				}
596			}
597			if (cleanup_p == 0)
598				old_p->post_handler = NULL;
599		}
600		mutex_unlock(&kprobe_mutex);
601	}
602
603	/* Call unregister_page_fault_notifier()
604	 * if no probes are active
605	 */
606	mutex_lock(&kprobe_mutex);
607	if (atomic_add_return(-1, &kprobe_count) == \
608				ARCH_INACTIVE_KPROBE_COUNT)
609		unregister_page_fault_notifier(&kprobe_page_fault_nb);
610	mutex_unlock(&kprobe_mutex);
611	return;
612}
613
614static struct notifier_block kprobe_exceptions_nb = {
615	.notifier_call = kprobe_exceptions_notify,
616	.priority = 0x7fffffff /* we need to be notified first */
617};
618
619
620int __kprobes register_jprobe(struct jprobe *jp)
621{
622	/* Todo: Verify probepoint is a function entry point */
623	jp->kp.pre_handler = setjmp_pre_handler;
624	jp->kp.break_handler = longjmp_break_handler;
625
626	return __register_kprobe(&jp->kp,
627		(unsigned long)__builtin_return_address(0));
628}
629
630void __kprobes unregister_jprobe(struct jprobe *jp)
631{
632	unregister_kprobe(&jp->kp);
633}
634
635#ifdef ARCH_SUPPORTS_KRETPROBES
636
637/*
638 * This kprobe pre_handler is registered with every kretprobe. When probe
639 * hits it will set up the return probe.
640 */
641static int __kprobes pre_handler_kretprobe(struct kprobe *p,
642					   struct pt_regs *regs)
643{
644	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
645	unsigned long flags = 0;
646
647	/*TODO: consider to only swap the RA after the last pre_handler fired */
648	spin_lock_irqsave(&kretprobe_lock, flags);
649	arch_prepare_kretprobe(rp, regs);
650	spin_unlock_irqrestore(&kretprobe_lock, flags);
651	return 0;
652}
653
654int __kprobes register_kretprobe(struct kretprobe *rp)
655{
656	int ret = 0;
657	struct kretprobe_instance *inst;
658	int i;
659
660	rp->kp.pre_handler = pre_handler_kretprobe;
661	rp->kp.post_handler = NULL;
662	rp->kp.fault_handler = NULL;
663	rp->kp.break_handler = NULL;
664
665	/* Pre-allocate memory for max kretprobe instances */
666	if (rp->maxactive <= 0) {
667#ifdef CONFIG_PREEMPT
668		rp->maxactive = max(10, 2 * NR_CPUS);
669#else
670		rp->maxactive = NR_CPUS;
671#endif
672	}
673	INIT_HLIST_HEAD(&rp->used_instances);
674	INIT_HLIST_HEAD(&rp->free_instances);
675	for (i = 0; i < rp->maxactive; i++) {
676		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
677		if (inst == NULL) {
678			free_rp_inst(rp);
679			return -ENOMEM;
680		}
681		INIT_HLIST_NODE(&inst->uflist);
682		hlist_add_head(&inst->uflist, &rp->free_instances);
683	}
684
685	rp->nmissed = 0;
686	/* Establish function entry probe point */
687	if ((ret = __register_kprobe(&rp->kp,
688		(unsigned long)__builtin_return_address(0))) != 0)
689		free_rp_inst(rp);
690	return ret;
691}
692
693#else /* ARCH_SUPPORTS_KRETPROBES */
694
695int __kprobes register_kretprobe(struct kretprobe *rp)
696{
697	return -ENOSYS;
698}
699
700#endif /* ARCH_SUPPORTS_KRETPROBES */
701
702void __kprobes unregister_kretprobe(struct kretprobe *rp)
703{
704	unsigned long flags;
705	struct kretprobe_instance *ri;
706
707	unregister_kprobe(&rp->kp);
708	/* No race here */
709	spin_lock_irqsave(&kretprobe_lock, flags);
710	while ((ri = get_used_rp_inst(rp)) != NULL) {
711		ri->rp = NULL;
712		hlist_del(&ri->uflist);
713	}
714	spin_unlock_irqrestore(&kretprobe_lock, flags);
715	free_rp_inst(rp);
716}
717
718static int __init init_kprobes(void)
719{
720	int i, err = 0;
721
722	/* FIXME allocate the probe table, currently defined statically */
723	/* initialize all list heads */
724	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
725		INIT_HLIST_HEAD(&kprobe_table[i]);
726		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
727	}
728	atomic_set(&kprobe_count, 0);
729
730	err = arch_init_kprobes();
731	if (!err)
732		err = register_die_notifier(&kprobe_exceptions_nb);
733
734	return err;
735}
736
737__initcall(init_kprobes);
738
739EXPORT_SYMBOL_GPL(register_kprobe);
740EXPORT_SYMBOL_GPL(unregister_kprobe);
741EXPORT_SYMBOL_GPL(register_jprobe);
742EXPORT_SYMBOL_GPL(unregister_jprobe);
743EXPORT_SYMBOL_GPL(jprobe_return);
744EXPORT_SYMBOL_GPL(register_kretprobe);
745EXPORT_SYMBOL_GPL(unregister_kretprobe);
746
747