kprobes.c revision 3d7e33825d8799115dd2495c9944badd3272a623
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46
47#include <asm-generic/sections.h>
48#include <asm/cacheflush.h>
49#include <asm/errno.h>
50#include <asm/uaccess.h>
51
52#define KPROBE_HASH_BITS 6
53#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54
55
56/*
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
59 */
60#ifndef kprobe_lookup_name
61#define kprobe_lookup_name(name, addr) \
62	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63#endif
64
65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67static atomic_t kprobe_count;
68
69/* NOTE: change this value only with kprobe_mutex held */
70static bool kprobe_enabled;
71
72DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
73DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75
76static struct notifier_block kprobe_page_fault_nb = {
77	.notifier_call = kprobe_exceptions_notify,
78	.priority = 0x7fffffff /* we need to notified first */
79};
80
81#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
82/*
83 * kprobe->ainsn.insn points to the copy of the instruction to be
84 * single-stepped. x86_64, POWER4 and above have no-exec support and
85 * stepping on the instruction on a vmalloced/kmalloced/data page
86 * is a recipe for disaster
87 */
88#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
89
90struct kprobe_insn_page {
91	struct hlist_node hlist;
92	kprobe_opcode_t *insns;		/* Page of instruction slots */
93	char slot_used[INSNS_PER_PAGE];
94	int nused;
95	int ngarbage;
96};
97
98enum kprobe_slot_state {
99	SLOT_CLEAN = 0,
100	SLOT_DIRTY = 1,
101	SLOT_USED = 2,
102};
103
104static struct hlist_head kprobe_insn_pages;
105static int kprobe_garbage_slots;
106static int collect_garbage_slots(void);
107
108static int __kprobes check_safety(void)
109{
110	int ret = 0;
111#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
112	ret = freeze_processes();
113	if (ret == 0) {
114		struct task_struct *p, *q;
115		do_each_thread(p, q) {
116			if (p != current && p->state == TASK_RUNNING &&
117			    p->pid != 0) {
118				printk("Check failed: %s is running\n",p->comm);
119				ret = -1;
120				goto loop_end;
121			}
122		} while_each_thread(p, q);
123	}
124loop_end:
125	thaw_processes();
126#else
127	synchronize_sched();
128#endif
129	return ret;
130}
131
132/**
133 * get_insn_slot() - Find a slot on an executable page for an instruction.
134 * We allocate an executable page if there's no room on existing ones.
135 */
136kprobe_opcode_t __kprobes *get_insn_slot(void)
137{
138	struct kprobe_insn_page *kip;
139	struct hlist_node *pos;
140
141 retry:
142	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
143		if (kip->nused < INSNS_PER_PAGE) {
144			int i;
145			for (i = 0; i < INSNS_PER_PAGE; i++) {
146				if (kip->slot_used[i] == SLOT_CLEAN) {
147					kip->slot_used[i] = SLOT_USED;
148					kip->nused++;
149					return kip->insns + (i * MAX_INSN_SIZE);
150				}
151			}
152			/* Surprise!  No unused slots.  Fix kip->nused. */
153			kip->nused = INSNS_PER_PAGE;
154		}
155	}
156
157	/* If there are any garbage slots, collect it and try again. */
158	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
159		goto retry;
160	}
161	/* All out of space.  Need to allocate a new page. Use slot 0. */
162	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
163	if (!kip)
164		return NULL;
165
166	/*
167	 * Use module_alloc so this page is within +/- 2GB of where the
168	 * kernel image and loaded module images reside. This is required
169	 * so x86_64 can correctly handle the %rip-relative fixups.
170	 */
171	kip->insns = module_alloc(PAGE_SIZE);
172	if (!kip->insns) {
173		kfree(kip);
174		return NULL;
175	}
176	INIT_HLIST_NODE(&kip->hlist);
177	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
178	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
179	kip->slot_used[0] = SLOT_USED;
180	kip->nused = 1;
181	kip->ngarbage = 0;
182	return kip->insns;
183}
184
185/* Return 1 if all garbages are collected, otherwise 0. */
186static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
187{
188	kip->slot_used[idx] = SLOT_CLEAN;
189	kip->nused--;
190	if (kip->nused == 0) {
191		/*
192		 * Page is no longer in use.  Free it unless
193		 * it's the last one.  We keep the last one
194		 * so as not to have to set it up again the
195		 * next time somebody inserts a probe.
196		 */
197		hlist_del(&kip->hlist);
198		if (hlist_empty(&kprobe_insn_pages)) {
199			INIT_HLIST_NODE(&kip->hlist);
200			hlist_add_head(&kip->hlist,
201				       &kprobe_insn_pages);
202		} else {
203			module_free(NULL, kip->insns);
204			kfree(kip);
205		}
206		return 1;
207	}
208	return 0;
209}
210
211static int __kprobes collect_garbage_slots(void)
212{
213	struct kprobe_insn_page *kip;
214	struct hlist_node *pos, *next;
215
216	/* Ensure no-one is preepmted on the garbages */
217	if (check_safety() != 0)
218		return -EAGAIN;
219
220	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
221		int i;
222		if (kip->ngarbage == 0)
223			continue;
224		kip->ngarbage = 0;	/* we will collect all garbages */
225		for (i = 0; i < INSNS_PER_PAGE; i++) {
226			if (kip->slot_used[i] == SLOT_DIRTY &&
227			    collect_one_slot(kip, i))
228				break;
229		}
230	}
231	kprobe_garbage_slots = 0;
232	return 0;
233}
234
235void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
236{
237	struct kprobe_insn_page *kip;
238	struct hlist_node *pos;
239
240	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
241		if (kip->insns <= slot &&
242		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
243			int i = (slot - kip->insns) / MAX_INSN_SIZE;
244			if (dirty) {
245				kip->slot_used[i] = SLOT_DIRTY;
246				kip->ngarbage++;
247			} else {
248				collect_one_slot(kip, i);
249			}
250			break;
251		}
252	}
253
254	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
255		collect_garbage_slots();
256}
257#endif
258
259/* We have preemption disabled.. so it is safe to use __ versions */
260static inline void set_kprobe_instance(struct kprobe *kp)
261{
262	__get_cpu_var(kprobe_instance) = kp;
263}
264
265static inline void reset_kprobe_instance(void)
266{
267	__get_cpu_var(kprobe_instance) = NULL;
268}
269
270/*
271 * This routine is called either:
272 * 	- under the kprobe_mutex - during kprobe_[un]register()
273 * 				OR
274 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
275 */
276struct kprobe __kprobes *get_kprobe(void *addr)
277{
278	struct hlist_head *head;
279	struct hlist_node *node;
280	struct kprobe *p;
281
282	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
283	hlist_for_each_entry_rcu(p, node, head, hlist) {
284		if (p->addr == addr)
285			return p;
286	}
287	return NULL;
288}
289
290/*
291 * Aggregate handlers for multiple kprobes support - these handlers
292 * take care of invoking the individual kprobe handlers on p->list
293 */
294static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
295{
296	struct kprobe *kp;
297
298	list_for_each_entry_rcu(kp, &p->list, list) {
299		if (kp->pre_handler) {
300			set_kprobe_instance(kp);
301			if (kp->pre_handler(kp, regs))
302				return 1;
303		}
304		reset_kprobe_instance();
305	}
306	return 0;
307}
308
309static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
310					unsigned long flags)
311{
312	struct kprobe *kp;
313
314	list_for_each_entry_rcu(kp, &p->list, list) {
315		if (kp->post_handler) {
316			set_kprobe_instance(kp);
317			kp->post_handler(kp, regs, flags);
318			reset_kprobe_instance();
319		}
320	}
321}
322
323static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
324					int trapnr)
325{
326	struct kprobe *cur = __get_cpu_var(kprobe_instance);
327
328	/*
329	 * if we faulted "during" the execution of a user specified
330	 * probe handler, invoke just that probe's fault handler
331	 */
332	if (cur && cur->fault_handler) {
333		if (cur->fault_handler(cur, regs, trapnr))
334			return 1;
335	}
336	return 0;
337}
338
339static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
340{
341	struct kprobe *cur = __get_cpu_var(kprobe_instance);
342	int ret = 0;
343
344	if (cur && cur->break_handler) {
345		if (cur->break_handler(cur, regs))
346			ret = 1;
347	}
348	reset_kprobe_instance();
349	return ret;
350}
351
352/* Walks the list and increments nmissed count for multiprobe case */
353void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
354{
355	struct kprobe *kp;
356	if (p->pre_handler != aggr_pre_handler) {
357		p->nmissed++;
358	} else {
359		list_for_each_entry_rcu(kp, &p->list, list)
360			kp->nmissed++;
361	}
362	return;
363}
364
365/* Called with kretprobe_lock held */
366void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
367				struct hlist_head *head)
368{
369	/* remove rp inst off the rprobe_inst_table */
370	hlist_del(&ri->hlist);
371	if (ri->rp) {
372		/* remove rp inst off the used list */
373		hlist_del(&ri->uflist);
374		/* put rp inst back onto the free list */
375		INIT_HLIST_NODE(&ri->uflist);
376		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
377	} else
378		/* Unregistering */
379		hlist_add_head(&ri->hlist, head);
380}
381
382struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
383{
384	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
385}
386
387/*
388 * This function is called from finish_task_switch when task tk becomes dead,
389 * so that we can recycle any function-return probe instances associated
390 * with this task. These left over instances represent probed functions
391 * that have been called but will never return.
392 */
393void __kprobes kprobe_flush_task(struct task_struct *tk)
394{
395	struct kretprobe_instance *ri;
396	struct hlist_head *head, empty_rp;
397	struct hlist_node *node, *tmp;
398	unsigned long flags = 0;
399
400	INIT_HLIST_HEAD(&empty_rp);
401	spin_lock_irqsave(&kretprobe_lock, flags);
402	head = kretprobe_inst_table_head(tk);
403	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
404		if (ri->task == tk)
405			recycle_rp_inst(ri, &empty_rp);
406	}
407	spin_unlock_irqrestore(&kretprobe_lock, flags);
408
409	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
410		hlist_del(&ri->hlist);
411		kfree(ri);
412	}
413}
414
415static inline void free_rp_inst(struct kretprobe *rp)
416{
417	struct kretprobe_instance *ri;
418	struct hlist_node *pos, *next;
419
420	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
421		hlist_del(&ri->uflist);
422		kfree(ri);
423	}
424}
425
426/*
427 * Keep all fields in the kprobe consistent
428 */
429static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
430{
431	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
432	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
433}
434
435/*
436* Add the new probe to old_p->list. Fail if this is the
437* second jprobe at the address - two jprobes can't coexist
438*/
439static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
440{
441	if (p->break_handler) {
442		if (old_p->break_handler)
443			return -EEXIST;
444		list_add_tail_rcu(&p->list, &old_p->list);
445		old_p->break_handler = aggr_break_handler;
446	} else
447		list_add_rcu(&p->list, &old_p->list);
448	if (p->post_handler && !old_p->post_handler)
449		old_p->post_handler = aggr_post_handler;
450	return 0;
451}
452
453/*
454 * Fill in the required fields of the "manager kprobe". Replace the
455 * earlier kprobe in the hlist with the manager kprobe
456 */
457static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
458{
459	copy_kprobe(p, ap);
460	flush_insn_slot(ap);
461	ap->addr = p->addr;
462	ap->pre_handler = aggr_pre_handler;
463	ap->fault_handler = aggr_fault_handler;
464	if (p->post_handler)
465		ap->post_handler = aggr_post_handler;
466	if (p->break_handler)
467		ap->break_handler = aggr_break_handler;
468
469	INIT_LIST_HEAD(&ap->list);
470	list_add_rcu(&p->list, &ap->list);
471
472	hlist_replace_rcu(&p->hlist, &ap->hlist);
473}
474
475/*
476 * This is the second or subsequent kprobe at the address - handle
477 * the intricacies
478 */
479static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
480					  struct kprobe *p)
481{
482	int ret = 0;
483	struct kprobe *ap;
484
485	if (old_p->pre_handler == aggr_pre_handler) {
486		copy_kprobe(old_p, p);
487		ret = add_new_kprobe(old_p, p);
488	} else {
489		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
490		if (!ap)
491			return -ENOMEM;
492		add_aggr_kprobe(ap, old_p);
493		copy_kprobe(ap, p);
494		ret = add_new_kprobe(ap, p);
495	}
496	return ret;
497}
498
499static int __kprobes in_kprobes_functions(unsigned long addr)
500{
501	if (addr >= (unsigned long)__kprobes_text_start &&
502	    addr < (unsigned long)__kprobes_text_end)
503		return -EINVAL;
504	return 0;
505}
506
507static int __kprobes __register_kprobe(struct kprobe *p,
508	unsigned long called_from)
509{
510	int ret = 0;
511	struct kprobe *old_p;
512	struct module *probed_mod;
513
514	/*
515	 * If we have a symbol_name argument look it up,
516	 * and add it to the address.  That way the addr
517	 * field can either be global or relative to a symbol.
518	 */
519	if (p->symbol_name) {
520		if (p->addr)
521			return -EINVAL;
522		kprobe_lookup_name(p->symbol_name, p->addr);
523	}
524
525	if (!p->addr)
526		return -EINVAL;
527	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
528
529	if (!kernel_text_address((unsigned long) p->addr) ||
530	    in_kprobes_functions((unsigned long) p->addr))
531		return -EINVAL;
532
533	p->mod_refcounted = 0;
534
535	/*
536	 * Check if are we probing a module.
537	 */
538	probed_mod = module_text_address((unsigned long) p->addr);
539	if (probed_mod) {
540		struct module *calling_mod = module_text_address(called_from);
541		/*
542		 * We must allow modules to probe themself and in this case
543		 * avoid incrementing the module refcount, so as to allow
544		 * unloading of self probing modules.
545		 */
546		if (calling_mod && calling_mod != probed_mod) {
547			if (unlikely(!try_module_get(probed_mod)))
548				return -EINVAL;
549			p->mod_refcounted = 1;
550		} else
551			probed_mod = NULL;
552	}
553
554	p->nmissed = 0;
555	mutex_lock(&kprobe_mutex);
556	old_p = get_kprobe(p->addr);
557	if (old_p) {
558		ret = register_aggr_kprobe(old_p, p);
559		if (!ret)
560			atomic_inc(&kprobe_count);
561		goto out;
562	}
563
564	ret = arch_prepare_kprobe(p);
565	if (ret)
566		goto out;
567
568	INIT_HLIST_NODE(&p->hlist);
569	hlist_add_head_rcu(&p->hlist,
570		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
571
572	if (kprobe_enabled) {
573		if (atomic_add_return(1, &kprobe_count) == \
574				(ARCH_INACTIVE_KPROBE_COUNT + 1))
575			register_page_fault_notifier(&kprobe_page_fault_nb);
576
577		arch_arm_kprobe(p);
578	}
579out:
580	mutex_unlock(&kprobe_mutex);
581
582	if (ret && probed_mod)
583		module_put(probed_mod);
584	return ret;
585}
586
587int __kprobes register_kprobe(struct kprobe *p)
588{
589	return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
590}
591
592void __kprobes unregister_kprobe(struct kprobe *p)
593{
594	struct module *mod;
595	struct kprobe *old_p, *list_p;
596	int cleanup_p;
597
598	mutex_lock(&kprobe_mutex);
599	old_p = get_kprobe(p->addr);
600	if (unlikely(!old_p)) {
601		mutex_unlock(&kprobe_mutex);
602		return;
603	}
604	if (p != old_p) {
605		list_for_each_entry_rcu(list_p, &old_p->list, list)
606			if (list_p == p)
607			/* kprobe p is a valid probe */
608				goto valid_p;
609		mutex_unlock(&kprobe_mutex);
610		return;
611	}
612valid_p:
613	if (old_p == p ||
614	    (old_p->pre_handler == aggr_pre_handler &&
615	     p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
616		/*
617		 * Only probe on the hash list. Disarm only if kprobes are
618		 * enabled - otherwise, the breakpoint would already have
619		 * been removed. We save on flushing icache.
620		 */
621		if (kprobe_enabled)
622			arch_disarm_kprobe(p);
623		hlist_del_rcu(&old_p->hlist);
624		cleanup_p = 1;
625	} else {
626		list_del_rcu(&p->list);
627		cleanup_p = 0;
628	}
629
630	mutex_unlock(&kprobe_mutex);
631
632	synchronize_sched();
633	if (p->mod_refcounted) {
634		mod = module_text_address((unsigned long)p->addr);
635		if (mod)
636			module_put(mod);
637	}
638
639	if (cleanup_p) {
640		if (p != old_p) {
641			list_del_rcu(&p->list);
642			kfree(old_p);
643		}
644		arch_remove_kprobe(p);
645	} else {
646		mutex_lock(&kprobe_mutex);
647		if (p->break_handler)
648			old_p->break_handler = NULL;
649		if (p->post_handler){
650			list_for_each_entry_rcu(list_p, &old_p->list, list){
651				if (list_p->post_handler){
652					cleanup_p = 2;
653					break;
654				}
655			}
656			if (cleanup_p == 0)
657				old_p->post_handler = NULL;
658		}
659		mutex_unlock(&kprobe_mutex);
660	}
661
662	/* Call unregister_page_fault_notifier()
663	 * if no probes are active
664	 */
665	mutex_lock(&kprobe_mutex);
666	if (atomic_add_return(-1, &kprobe_count) == \
667				ARCH_INACTIVE_KPROBE_COUNT)
668		unregister_page_fault_notifier(&kprobe_page_fault_nb);
669	mutex_unlock(&kprobe_mutex);
670	return;
671}
672
673static struct notifier_block kprobe_exceptions_nb = {
674	.notifier_call = kprobe_exceptions_notify,
675	.priority = 0x7fffffff /* we need to be notified first */
676};
677
678unsigned long __weak arch_deref_entry_point(void *entry)
679{
680	return (unsigned long)entry;
681}
682
683int __kprobes register_jprobe(struct jprobe *jp)
684{
685	unsigned long addr = arch_deref_entry_point(jp->entry);
686
687	if (!kernel_text_address(addr))
688		return -EINVAL;
689
690	/* Todo: Verify probepoint is a function entry point */
691	jp->kp.pre_handler = setjmp_pre_handler;
692	jp->kp.break_handler = longjmp_break_handler;
693
694	return __register_kprobe(&jp->kp,
695		(unsigned long)__builtin_return_address(0));
696}
697
698void __kprobes unregister_jprobe(struct jprobe *jp)
699{
700	unregister_kprobe(&jp->kp);
701}
702
703#ifdef ARCH_SUPPORTS_KRETPROBES
704
705/*
706 * This kprobe pre_handler is registered with every kretprobe. When probe
707 * hits it will set up the return probe.
708 */
709static int __kprobes pre_handler_kretprobe(struct kprobe *p,
710					   struct pt_regs *regs)
711{
712	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
713	unsigned long flags = 0;
714
715	/*TODO: consider to only swap the RA after the last pre_handler fired */
716	spin_lock_irqsave(&kretprobe_lock, flags);
717	if (!hlist_empty(&rp->free_instances)) {
718		struct kretprobe_instance *ri;
719
720		ri = hlist_entry(rp->free_instances.first,
721				 struct kretprobe_instance, uflist);
722		ri->rp = rp;
723		ri->task = current;
724		arch_prepare_kretprobe(ri, regs);
725
726		/* XXX(hch): why is there no hlist_move_head? */
727		hlist_del(&ri->uflist);
728		hlist_add_head(&ri->uflist, &ri->rp->used_instances);
729		hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
730	} else
731		rp->nmissed++;
732	spin_unlock_irqrestore(&kretprobe_lock, flags);
733	return 0;
734}
735
736int __kprobes register_kretprobe(struct kretprobe *rp)
737{
738	int ret = 0;
739	struct kretprobe_instance *inst;
740	int i;
741
742	rp->kp.pre_handler = pre_handler_kretprobe;
743	rp->kp.post_handler = NULL;
744	rp->kp.fault_handler = NULL;
745	rp->kp.break_handler = NULL;
746
747	/* Pre-allocate memory for max kretprobe instances */
748	if (rp->maxactive <= 0) {
749#ifdef CONFIG_PREEMPT
750		rp->maxactive = max(10, 2 * NR_CPUS);
751#else
752		rp->maxactive = NR_CPUS;
753#endif
754	}
755	INIT_HLIST_HEAD(&rp->used_instances);
756	INIT_HLIST_HEAD(&rp->free_instances);
757	for (i = 0; i < rp->maxactive; i++) {
758		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
759		if (inst == NULL) {
760			free_rp_inst(rp);
761			return -ENOMEM;
762		}
763		INIT_HLIST_NODE(&inst->uflist);
764		hlist_add_head(&inst->uflist, &rp->free_instances);
765	}
766
767	rp->nmissed = 0;
768	/* Establish function entry probe point */
769	if ((ret = __register_kprobe(&rp->kp,
770		(unsigned long)__builtin_return_address(0))) != 0)
771		free_rp_inst(rp);
772	return ret;
773}
774
775#else /* ARCH_SUPPORTS_KRETPROBES */
776
777int __kprobes register_kretprobe(struct kretprobe *rp)
778{
779	return -ENOSYS;
780}
781
782static int __kprobes pre_handler_kretprobe(struct kprobe *p,
783					   struct pt_regs *regs)
784{
785	return 0;
786}
787
788#endif /* ARCH_SUPPORTS_KRETPROBES */
789
790void __kprobes unregister_kretprobe(struct kretprobe *rp)
791{
792	unsigned long flags;
793	struct kretprobe_instance *ri;
794	struct hlist_node *pos, *next;
795
796	unregister_kprobe(&rp->kp);
797
798	/* No race here */
799	spin_lock_irqsave(&kretprobe_lock, flags);
800	hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
801		ri->rp = NULL;
802		hlist_del(&ri->uflist);
803	}
804	spin_unlock_irqrestore(&kretprobe_lock, flags);
805	free_rp_inst(rp);
806}
807
808static int __init init_kprobes(void)
809{
810	int i, err = 0;
811
812	/* FIXME allocate the probe table, currently defined statically */
813	/* initialize all list heads */
814	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
815		INIT_HLIST_HEAD(&kprobe_table[i]);
816		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
817	}
818	atomic_set(&kprobe_count, 0);
819
820	/* By default, kprobes are enabled */
821	kprobe_enabled = true;
822
823	err = arch_init_kprobes();
824	if (!err)
825		err = register_die_notifier(&kprobe_exceptions_nb);
826
827	return err;
828}
829
830#ifdef CONFIG_DEBUG_FS
831static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
832		const char *sym, int offset,char *modname)
833{
834	char *kprobe_type;
835
836	if (p->pre_handler == pre_handler_kretprobe)
837		kprobe_type = "r";
838	else if (p->pre_handler == setjmp_pre_handler)
839		kprobe_type = "j";
840	else
841		kprobe_type = "k";
842	if (sym)
843		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
844			sym, offset, (modname ? modname : " "));
845	else
846		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
847}
848
849static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
850{
851	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
852}
853
854static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
855{
856	(*pos)++;
857	if (*pos >= KPROBE_TABLE_SIZE)
858		return NULL;
859	return pos;
860}
861
862static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
863{
864	/* Nothing to do */
865}
866
867static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
868{
869	struct hlist_head *head;
870	struct hlist_node *node;
871	struct kprobe *p, *kp;
872	const char *sym = NULL;
873	unsigned int i = *(loff_t *) v;
874	unsigned long offset = 0;
875	char *modname, namebuf[128];
876
877	head = &kprobe_table[i];
878	preempt_disable();
879	hlist_for_each_entry_rcu(p, node, head, hlist) {
880		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
881					&offset, &modname, namebuf);
882		if (p->pre_handler == aggr_pre_handler) {
883			list_for_each_entry_rcu(kp, &p->list, list)
884				report_probe(pi, kp, sym, offset, modname);
885		} else
886			report_probe(pi, p, sym, offset, modname);
887	}
888	preempt_enable();
889	return 0;
890}
891
892static struct seq_operations kprobes_seq_ops = {
893	.start = kprobe_seq_start,
894	.next  = kprobe_seq_next,
895	.stop  = kprobe_seq_stop,
896	.show  = show_kprobe_addr
897};
898
899static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
900{
901	return seq_open(filp, &kprobes_seq_ops);
902}
903
904static struct file_operations debugfs_kprobes_operations = {
905	.open           = kprobes_open,
906	.read           = seq_read,
907	.llseek         = seq_lseek,
908	.release        = seq_release,
909};
910
911static void __kprobes enable_all_kprobes(void)
912{
913	struct hlist_head *head;
914	struct hlist_node *node;
915	struct kprobe *p;
916	unsigned int i;
917
918	mutex_lock(&kprobe_mutex);
919
920	/* If kprobes are already enabled, just return */
921	if (kprobe_enabled)
922		goto already_enabled;
923
924	/*
925	 * Re-register the page fault notifier only if there are any
926	 * active probes at the time of enabling kprobes globally
927	 */
928	if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
929		register_page_fault_notifier(&kprobe_page_fault_nb);
930
931	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
932		head = &kprobe_table[i];
933		hlist_for_each_entry_rcu(p, node, head, hlist)
934			arch_arm_kprobe(p);
935	}
936
937	kprobe_enabled = true;
938	printk(KERN_INFO "Kprobes globally enabled\n");
939
940already_enabled:
941	mutex_unlock(&kprobe_mutex);
942	return;
943}
944
945static void __kprobes disable_all_kprobes(void)
946{
947	struct hlist_head *head;
948	struct hlist_node *node;
949	struct kprobe *p;
950	unsigned int i;
951
952	mutex_lock(&kprobe_mutex);
953
954	/* If kprobes are already disabled, just return */
955	if (!kprobe_enabled)
956		goto already_disabled;
957
958	kprobe_enabled = false;
959	printk(KERN_INFO "Kprobes globally disabled\n");
960	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
961		head = &kprobe_table[i];
962		hlist_for_each_entry_rcu(p, node, head, hlist) {
963			if (!arch_trampoline_kprobe(p))
964				arch_disarm_kprobe(p);
965		}
966	}
967
968	mutex_unlock(&kprobe_mutex);
969	/* Allow all currently running kprobes to complete */
970	synchronize_sched();
971
972	mutex_lock(&kprobe_mutex);
973	/* Unconditionally unregister the page_fault notifier */
974	unregister_page_fault_notifier(&kprobe_page_fault_nb);
975
976already_disabled:
977	mutex_unlock(&kprobe_mutex);
978	return;
979}
980
981/*
982 * XXX: The debugfs bool file interface doesn't allow for callbacks
983 * when the bool state is switched. We can reuse that facility when
984 * available
985 */
986static ssize_t read_enabled_file_bool(struct file *file,
987	       char __user *user_buf, size_t count, loff_t *ppos)
988{
989	char buf[3];
990
991	if (kprobe_enabled)
992		buf[0] = '1';
993	else
994		buf[0] = '0';
995	buf[1] = '\n';
996	buf[2] = 0x00;
997	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
998}
999
1000static ssize_t write_enabled_file_bool(struct file *file,
1001	       const char __user *user_buf, size_t count, loff_t *ppos)
1002{
1003	char buf[32];
1004	int buf_size;
1005
1006	buf_size = min(count, (sizeof(buf)-1));
1007	if (copy_from_user(buf, user_buf, buf_size))
1008		return -EFAULT;
1009
1010	switch (buf[0]) {
1011	case 'y':
1012	case 'Y':
1013	case '1':
1014		enable_all_kprobes();
1015		break;
1016	case 'n':
1017	case 'N':
1018	case '0':
1019		disable_all_kprobes();
1020		break;
1021	}
1022
1023	return count;
1024}
1025
1026static struct file_operations fops_kp = {
1027	.read =         read_enabled_file_bool,
1028	.write =        write_enabled_file_bool,
1029};
1030
1031static int __kprobes debugfs_kprobe_init(void)
1032{
1033	struct dentry *dir, *file;
1034	unsigned int value = 1;
1035
1036	dir = debugfs_create_dir("kprobes", NULL);
1037	if (!dir)
1038		return -ENOMEM;
1039
1040	file = debugfs_create_file("list", 0444, dir, NULL,
1041				&debugfs_kprobes_operations);
1042	if (!file) {
1043		debugfs_remove(dir);
1044		return -ENOMEM;
1045	}
1046
1047	file = debugfs_create_file("enabled", 0600, dir,
1048					&value, &fops_kp);
1049	if (!file) {
1050		debugfs_remove(dir);
1051		return -ENOMEM;
1052	}
1053
1054	return 0;
1055}
1056
1057late_initcall(debugfs_kprobe_init);
1058#endif /* CONFIG_DEBUG_FS */
1059
1060module_init(init_kprobes);
1061
1062EXPORT_SYMBOL_GPL(register_kprobe);
1063EXPORT_SYMBOL_GPL(unregister_kprobe);
1064EXPORT_SYMBOL_GPL(register_jprobe);
1065EXPORT_SYMBOL_GPL(unregister_jprobe);
1066EXPORT_SYMBOL_GPL(jprobe_return);
1067EXPORT_SYMBOL_GPL(register_kretprobe);
1068EXPORT_SYMBOL_GPL(unregister_kretprobe);
1069