kprobes.c revision 74a0b5762713a26496db72eac34fbbed46f20fce
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46
47#include <asm-generic/sections.h>
48#include <asm/cacheflush.h>
49#include <asm/errno.h>
50#include <asm/uaccess.h>
51
52#define KPROBE_HASH_BITS 6
53#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54
55
56/*
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
59 */
60#ifndef kprobe_lookup_name
61#define kprobe_lookup_name(name, addr) \
62	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63#endif
64
65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67
68/* NOTE: change this value only with kprobe_mutex held */
69static bool kprobe_enabled;
70
71DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
72DEFINE_SPINLOCK(kretprobe_lock);	/* Protects kretprobe_inst_table */
73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74
75#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
76/*
77 * kprobe->ainsn.insn points to the copy of the instruction to be
78 * single-stepped. x86_64, POWER4 and above have no-exec support and
79 * stepping on the instruction on a vmalloced/kmalloced/data page
80 * is a recipe for disaster
81 */
82#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
83
84struct kprobe_insn_page {
85	struct hlist_node hlist;
86	kprobe_opcode_t *insns;		/* Page of instruction slots */
87	char slot_used[INSNS_PER_PAGE];
88	int nused;
89	int ngarbage;
90};
91
92enum kprobe_slot_state {
93	SLOT_CLEAN = 0,
94	SLOT_DIRTY = 1,
95	SLOT_USED = 2,
96};
97
98static struct hlist_head kprobe_insn_pages;
99static int kprobe_garbage_slots;
100static int collect_garbage_slots(void);
101
102static int __kprobes check_safety(void)
103{
104	int ret = 0;
105#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
106	ret = freeze_processes();
107	if (ret == 0) {
108		struct task_struct *p, *q;
109		do_each_thread(p, q) {
110			if (p != current && p->state == TASK_RUNNING &&
111			    p->pid != 0) {
112				printk("Check failed: %s is running\n",p->comm);
113				ret = -1;
114				goto loop_end;
115			}
116		} while_each_thread(p, q);
117	}
118loop_end:
119	thaw_processes();
120#else
121	synchronize_sched();
122#endif
123	return ret;
124}
125
126/**
127 * get_insn_slot() - Find a slot on an executable page for an instruction.
128 * We allocate an executable page if there's no room on existing ones.
129 */
130kprobe_opcode_t __kprobes *get_insn_slot(void)
131{
132	struct kprobe_insn_page *kip;
133	struct hlist_node *pos;
134
135 retry:
136	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
137		if (kip->nused < INSNS_PER_PAGE) {
138			int i;
139			for (i = 0; i < INSNS_PER_PAGE; i++) {
140				if (kip->slot_used[i] == SLOT_CLEAN) {
141					kip->slot_used[i] = SLOT_USED;
142					kip->nused++;
143					return kip->insns + (i * MAX_INSN_SIZE);
144				}
145			}
146			/* Surprise!  No unused slots.  Fix kip->nused. */
147			kip->nused = INSNS_PER_PAGE;
148		}
149	}
150
151	/* If there are any garbage slots, collect it and try again. */
152	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
153		goto retry;
154	}
155	/* All out of space.  Need to allocate a new page. Use slot 0. */
156	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
157	if (!kip)
158		return NULL;
159
160	/*
161	 * Use module_alloc so this page is within +/- 2GB of where the
162	 * kernel image and loaded module images reside. This is required
163	 * so x86_64 can correctly handle the %rip-relative fixups.
164	 */
165	kip->insns = module_alloc(PAGE_SIZE);
166	if (!kip->insns) {
167		kfree(kip);
168		return NULL;
169	}
170	INIT_HLIST_NODE(&kip->hlist);
171	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
172	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173	kip->slot_used[0] = SLOT_USED;
174	kip->nused = 1;
175	kip->ngarbage = 0;
176	return kip->insns;
177}
178
179/* Return 1 if all garbages are collected, otherwise 0. */
180static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
181{
182	kip->slot_used[idx] = SLOT_CLEAN;
183	kip->nused--;
184	if (kip->nused == 0) {
185		/*
186		 * Page is no longer in use.  Free it unless
187		 * it's the last one.  We keep the last one
188		 * so as not to have to set it up again the
189		 * next time somebody inserts a probe.
190		 */
191		hlist_del(&kip->hlist);
192		if (hlist_empty(&kprobe_insn_pages)) {
193			INIT_HLIST_NODE(&kip->hlist);
194			hlist_add_head(&kip->hlist,
195				       &kprobe_insn_pages);
196		} else {
197			module_free(NULL, kip->insns);
198			kfree(kip);
199		}
200		return 1;
201	}
202	return 0;
203}
204
205static int __kprobes collect_garbage_slots(void)
206{
207	struct kprobe_insn_page *kip;
208	struct hlist_node *pos, *next;
209
210	/* Ensure no-one is preepmted on the garbages */
211	if (check_safety() != 0)
212		return -EAGAIN;
213
214	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
215		int i;
216		if (kip->ngarbage == 0)
217			continue;
218		kip->ngarbage = 0;	/* we will collect all garbages */
219		for (i = 0; i < INSNS_PER_PAGE; i++) {
220			if (kip->slot_used[i] == SLOT_DIRTY &&
221			    collect_one_slot(kip, i))
222				break;
223		}
224	}
225	kprobe_garbage_slots = 0;
226	return 0;
227}
228
229void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
230{
231	struct kprobe_insn_page *kip;
232	struct hlist_node *pos;
233
234	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
235		if (kip->insns <= slot &&
236		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
237			int i = (slot - kip->insns) / MAX_INSN_SIZE;
238			if (dirty) {
239				kip->slot_used[i] = SLOT_DIRTY;
240				kip->ngarbage++;
241			} else {
242				collect_one_slot(kip, i);
243			}
244			break;
245		}
246	}
247
248	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
249		collect_garbage_slots();
250}
251#endif
252
253/* We have preemption disabled.. so it is safe to use __ versions */
254static inline void set_kprobe_instance(struct kprobe *kp)
255{
256	__get_cpu_var(kprobe_instance) = kp;
257}
258
259static inline void reset_kprobe_instance(void)
260{
261	__get_cpu_var(kprobe_instance) = NULL;
262}
263
264/*
265 * This routine is called either:
266 * 	- under the kprobe_mutex - during kprobe_[un]register()
267 * 				OR
268 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
269 */
270struct kprobe __kprobes *get_kprobe(void *addr)
271{
272	struct hlist_head *head;
273	struct hlist_node *node;
274	struct kprobe *p;
275
276	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
277	hlist_for_each_entry_rcu(p, node, head, hlist) {
278		if (p->addr == addr)
279			return p;
280	}
281	return NULL;
282}
283
284/*
285 * Aggregate handlers for multiple kprobes support - these handlers
286 * take care of invoking the individual kprobe handlers on p->list
287 */
288static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
289{
290	struct kprobe *kp;
291
292	list_for_each_entry_rcu(kp, &p->list, list) {
293		if (kp->pre_handler) {
294			set_kprobe_instance(kp);
295			if (kp->pre_handler(kp, regs))
296				return 1;
297		}
298		reset_kprobe_instance();
299	}
300	return 0;
301}
302
303static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
304					unsigned long flags)
305{
306	struct kprobe *kp;
307
308	list_for_each_entry_rcu(kp, &p->list, list) {
309		if (kp->post_handler) {
310			set_kprobe_instance(kp);
311			kp->post_handler(kp, regs, flags);
312			reset_kprobe_instance();
313		}
314	}
315}
316
317static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
318					int trapnr)
319{
320	struct kprobe *cur = __get_cpu_var(kprobe_instance);
321
322	/*
323	 * if we faulted "during" the execution of a user specified
324	 * probe handler, invoke just that probe's fault handler
325	 */
326	if (cur && cur->fault_handler) {
327		if (cur->fault_handler(cur, regs, trapnr))
328			return 1;
329	}
330	return 0;
331}
332
333static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
334{
335	struct kprobe *cur = __get_cpu_var(kprobe_instance);
336	int ret = 0;
337
338	if (cur && cur->break_handler) {
339		if (cur->break_handler(cur, regs))
340			ret = 1;
341	}
342	reset_kprobe_instance();
343	return ret;
344}
345
346/* Walks the list and increments nmissed count for multiprobe case */
347void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
348{
349	struct kprobe *kp;
350	if (p->pre_handler != aggr_pre_handler) {
351		p->nmissed++;
352	} else {
353		list_for_each_entry_rcu(kp, &p->list, list)
354			kp->nmissed++;
355	}
356	return;
357}
358
359/* Called with kretprobe_lock held */
360void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
361				struct hlist_head *head)
362{
363	/* remove rp inst off the rprobe_inst_table */
364	hlist_del(&ri->hlist);
365	if (ri->rp) {
366		/* remove rp inst off the used list */
367		hlist_del(&ri->uflist);
368		/* put rp inst back onto the free list */
369		INIT_HLIST_NODE(&ri->uflist);
370		hlist_add_head(&ri->uflist, &ri->rp->free_instances);
371	} else
372		/* Unregistering */
373		hlist_add_head(&ri->hlist, head);
374}
375
376struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
377{
378	return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
379}
380
381/*
382 * This function is called from finish_task_switch when task tk becomes dead,
383 * so that we can recycle any function-return probe instances associated
384 * with this task. These left over instances represent probed functions
385 * that have been called but will never return.
386 */
387void __kprobes kprobe_flush_task(struct task_struct *tk)
388{
389	struct kretprobe_instance *ri;
390	struct hlist_head *head, empty_rp;
391	struct hlist_node *node, *tmp;
392	unsigned long flags = 0;
393
394	INIT_HLIST_HEAD(&empty_rp);
395	spin_lock_irqsave(&kretprobe_lock, flags);
396	head = kretprobe_inst_table_head(tk);
397	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
398		if (ri->task == tk)
399			recycle_rp_inst(ri, &empty_rp);
400	}
401	spin_unlock_irqrestore(&kretprobe_lock, flags);
402
403	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
404		hlist_del(&ri->hlist);
405		kfree(ri);
406	}
407}
408
409static inline void free_rp_inst(struct kretprobe *rp)
410{
411	struct kretprobe_instance *ri;
412	struct hlist_node *pos, *next;
413
414	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
415		hlist_del(&ri->uflist);
416		kfree(ri);
417	}
418}
419
420/*
421 * Keep all fields in the kprobe consistent
422 */
423static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
424{
425	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
426	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
427}
428
429/*
430* Add the new probe to old_p->list. Fail if this is the
431* second jprobe at the address - two jprobes can't coexist
432*/
433static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
434{
435	if (p->break_handler) {
436		if (old_p->break_handler)
437			return -EEXIST;
438		list_add_tail_rcu(&p->list, &old_p->list);
439		old_p->break_handler = aggr_break_handler;
440	} else
441		list_add_rcu(&p->list, &old_p->list);
442	if (p->post_handler && !old_p->post_handler)
443		old_p->post_handler = aggr_post_handler;
444	return 0;
445}
446
447/*
448 * Fill in the required fields of the "manager kprobe". Replace the
449 * earlier kprobe in the hlist with the manager kprobe
450 */
451static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
452{
453	copy_kprobe(p, ap);
454	flush_insn_slot(ap);
455	ap->addr = p->addr;
456	ap->pre_handler = aggr_pre_handler;
457	ap->fault_handler = aggr_fault_handler;
458	if (p->post_handler)
459		ap->post_handler = aggr_post_handler;
460	if (p->break_handler)
461		ap->break_handler = aggr_break_handler;
462
463	INIT_LIST_HEAD(&ap->list);
464	list_add_rcu(&p->list, &ap->list);
465
466	hlist_replace_rcu(&p->hlist, &ap->hlist);
467}
468
469/*
470 * This is the second or subsequent kprobe at the address - handle
471 * the intricacies
472 */
473static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
474					  struct kprobe *p)
475{
476	int ret = 0;
477	struct kprobe *ap;
478
479	if (old_p->pre_handler == aggr_pre_handler) {
480		copy_kprobe(old_p, p);
481		ret = add_new_kprobe(old_p, p);
482	} else {
483		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
484		if (!ap)
485			return -ENOMEM;
486		add_aggr_kprobe(ap, old_p);
487		copy_kprobe(ap, p);
488		ret = add_new_kprobe(ap, p);
489	}
490	return ret;
491}
492
493static int __kprobes in_kprobes_functions(unsigned long addr)
494{
495	if (addr >= (unsigned long)__kprobes_text_start &&
496	    addr < (unsigned long)__kprobes_text_end)
497		return -EINVAL;
498	return 0;
499}
500
501static int __kprobes __register_kprobe(struct kprobe *p,
502	unsigned long called_from)
503{
504	int ret = 0;
505	struct kprobe *old_p;
506	struct module *probed_mod;
507
508	/*
509	 * If we have a symbol_name argument look it up,
510	 * and add it to the address.  That way the addr
511	 * field can either be global or relative to a symbol.
512	 */
513	if (p->symbol_name) {
514		if (p->addr)
515			return -EINVAL;
516		kprobe_lookup_name(p->symbol_name, p->addr);
517	}
518
519	if (!p->addr)
520		return -EINVAL;
521	p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
522
523	if (!kernel_text_address((unsigned long) p->addr) ||
524	    in_kprobes_functions((unsigned long) p->addr))
525		return -EINVAL;
526
527	p->mod_refcounted = 0;
528
529	/*
530	 * Check if are we probing a module.
531	 */
532	probed_mod = module_text_address((unsigned long) p->addr);
533	if (probed_mod) {
534		struct module *calling_mod = module_text_address(called_from);
535		/*
536		 * We must allow modules to probe themself and in this case
537		 * avoid incrementing the module refcount, so as to allow
538		 * unloading of self probing modules.
539		 */
540		if (calling_mod && calling_mod != probed_mod) {
541			if (unlikely(!try_module_get(probed_mod)))
542				return -EINVAL;
543			p->mod_refcounted = 1;
544		} else
545			probed_mod = NULL;
546	}
547
548	p->nmissed = 0;
549	mutex_lock(&kprobe_mutex);
550	old_p = get_kprobe(p->addr);
551	if (old_p) {
552		ret = register_aggr_kprobe(old_p, p);
553		goto out;
554	}
555
556	ret = arch_prepare_kprobe(p);
557	if (ret)
558		goto out;
559
560	INIT_HLIST_NODE(&p->hlist);
561	hlist_add_head_rcu(&p->hlist,
562		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
563
564	if (kprobe_enabled)
565		arch_arm_kprobe(p);
566
567out:
568	mutex_unlock(&kprobe_mutex);
569
570	if (ret && probed_mod)
571		module_put(probed_mod);
572	return ret;
573}
574
575int __kprobes register_kprobe(struct kprobe *p)
576{
577	return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
578}
579
580void __kprobes unregister_kprobe(struct kprobe *p)
581{
582	struct module *mod;
583	struct kprobe *old_p, *list_p;
584	int cleanup_p;
585
586	mutex_lock(&kprobe_mutex);
587	old_p = get_kprobe(p->addr);
588	if (unlikely(!old_p)) {
589		mutex_unlock(&kprobe_mutex);
590		return;
591	}
592	if (p != old_p) {
593		list_for_each_entry_rcu(list_p, &old_p->list, list)
594			if (list_p == p)
595			/* kprobe p is a valid probe */
596				goto valid_p;
597		mutex_unlock(&kprobe_mutex);
598		return;
599	}
600valid_p:
601	if (old_p == p ||
602	    (old_p->pre_handler == aggr_pre_handler &&
603	     p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
604		/*
605		 * Only probe on the hash list. Disarm only if kprobes are
606		 * enabled - otherwise, the breakpoint would already have
607		 * been removed. We save on flushing icache.
608		 */
609		if (kprobe_enabled)
610			arch_disarm_kprobe(p);
611		hlist_del_rcu(&old_p->hlist);
612		cleanup_p = 1;
613	} else {
614		list_del_rcu(&p->list);
615		cleanup_p = 0;
616	}
617
618	mutex_unlock(&kprobe_mutex);
619
620	synchronize_sched();
621	if (p->mod_refcounted) {
622		mod = module_text_address((unsigned long)p->addr);
623		if (mod)
624			module_put(mod);
625	}
626
627	if (cleanup_p) {
628		if (p != old_p) {
629			list_del_rcu(&p->list);
630			kfree(old_p);
631		}
632		arch_remove_kprobe(p);
633	} else {
634		mutex_lock(&kprobe_mutex);
635		if (p->break_handler)
636			old_p->break_handler = NULL;
637		if (p->post_handler){
638			list_for_each_entry_rcu(list_p, &old_p->list, list){
639				if (list_p->post_handler){
640					cleanup_p = 2;
641					break;
642				}
643			}
644			if (cleanup_p == 0)
645				old_p->post_handler = NULL;
646		}
647		mutex_unlock(&kprobe_mutex);
648	}
649}
650
651static struct notifier_block kprobe_exceptions_nb = {
652	.notifier_call = kprobe_exceptions_notify,
653	.priority = 0x7fffffff /* we need to be notified first */
654};
655
656unsigned long __weak arch_deref_entry_point(void *entry)
657{
658	return (unsigned long)entry;
659}
660
661int __kprobes register_jprobe(struct jprobe *jp)
662{
663	unsigned long addr = arch_deref_entry_point(jp->entry);
664
665	if (!kernel_text_address(addr))
666		return -EINVAL;
667
668	/* Todo: Verify probepoint is a function entry point */
669	jp->kp.pre_handler = setjmp_pre_handler;
670	jp->kp.break_handler = longjmp_break_handler;
671
672	return __register_kprobe(&jp->kp,
673		(unsigned long)__builtin_return_address(0));
674}
675
676void __kprobes unregister_jprobe(struct jprobe *jp)
677{
678	unregister_kprobe(&jp->kp);
679}
680
681#ifdef ARCH_SUPPORTS_KRETPROBES
682
683/*
684 * This kprobe pre_handler is registered with every kretprobe. When probe
685 * hits it will set up the return probe.
686 */
687static int __kprobes pre_handler_kretprobe(struct kprobe *p,
688					   struct pt_regs *regs)
689{
690	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
691	unsigned long flags = 0;
692
693	/*TODO: consider to only swap the RA after the last pre_handler fired */
694	spin_lock_irqsave(&kretprobe_lock, flags);
695	if (!hlist_empty(&rp->free_instances)) {
696		struct kretprobe_instance *ri;
697
698		ri = hlist_entry(rp->free_instances.first,
699				 struct kretprobe_instance, uflist);
700		ri->rp = rp;
701		ri->task = current;
702		arch_prepare_kretprobe(ri, regs);
703
704		/* XXX(hch): why is there no hlist_move_head? */
705		hlist_del(&ri->uflist);
706		hlist_add_head(&ri->uflist, &ri->rp->used_instances);
707		hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
708	} else
709		rp->nmissed++;
710	spin_unlock_irqrestore(&kretprobe_lock, flags);
711	return 0;
712}
713
714int __kprobes register_kretprobe(struct kretprobe *rp)
715{
716	int ret = 0;
717	struct kretprobe_instance *inst;
718	int i;
719
720	rp->kp.pre_handler = pre_handler_kretprobe;
721	rp->kp.post_handler = NULL;
722	rp->kp.fault_handler = NULL;
723	rp->kp.break_handler = NULL;
724
725	/* Pre-allocate memory for max kretprobe instances */
726	if (rp->maxactive <= 0) {
727#ifdef CONFIG_PREEMPT
728		rp->maxactive = max(10, 2 * NR_CPUS);
729#else
730		rp->maxactive = NR_CPUS;
731#endif
732	}
733	INIT_HLIST_HEAD(&rp->used_instances);
734	INIT_HLIST_HEAD(&rp->free_instances);
735	for (i = 0; i < rp->maxactive; i++) {
736		inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
737		if (inst == NULL) {
738			free_rp_inst(rp);
739			return -ENOMEM;
740		}
741		INIT_HLIST_NODE(&inst->uflist);
742		hlist_add_head(&inst->uflist, &rp->free_instances);
743	}
744
745	rp->nmissed = 0;
746	/* Establish function entry probe point */
747	if ((ret = __register_kprobe(&rp->kp,
748		(unsigned long)__builtin_return_address(0))) != 0)
749		free_rp_inst(rp);
750	return ret;
751}
752
753#else /* ARCH_SUPPORTS_KRETPROBES */
754
755int __kprobes register_kretprobe(struct kretprobe *rp)
756{
757	return -ENOSYS;
758}
759
760static int __kprobes pre_handler_kretprobe(struct kprobe *p,
761					   struct pt_regs *regs)
762{
763	return 0;
764}
765
766#endif /* ARCH_SUPPORTS_KRETPROBES */
767
768void __kprobes unregister_kretprobe(struct kretprobe *rp)
769{
770	unsigned long flags;
771	struct kretprobe_instance *ri;
772	struct hlist_node *pos, *next;
773
774	unregister_kprobe(&rp->kp);
775
776	/* No race here */
777	spin_lock_irqsave(&kretprobe_lock, flags);
778	hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
779		ri->rp = NULL;
780		hlist_del(&ri->uflist);
781	}
782	spin_unlock_irqrestore(&kretprobe_lock, flags);
783	free_rp_inst(rp);
784}
785
786static int __init init_kprobes(void)
787{
788	int i, err = 0;
789
790	/* FIXME allocate the probe table, currently defined statically */
791	/* initialize all list heads */
792	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
793		INIT_HLIST_HEAD(&kprobe_table[i]);
794		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
795	}
796
797	/* By default, kprobes are enabled */
798	kprobe_enabled = true;
799
800	err = arch_init_kprobes();
801	if (!err)
802		err = register_die_notifier(&kprobe_exceptions_nb);
803
804	return err;
805}
806
807#ifdef CONFIG_DEBUG_FS
808static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
809		const char *sym, int offset,char *modname)
810{
811	char *kprobe_type;
812
813	if (p->pre_handler == pre_handler_kretprobe)
814		kprobe_type = "r";
815	else if (p->pre_handler == setjmp_pre_handler)
816		kprobe_type = "j";
817	else
818		kprobe_type = "k";
819	if (sym)
820		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
821			sym, offset, (modname ? modname : " "));
822	else
823		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
824}
825
826static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
827{
828	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
829}
830
831static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
832{
833	(*pos)++;
834	if (*pos >= KPROBE_TABLE_SIZE)
835		return NULL;
836	return pos;
837}
838
839static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
840{
841	/* Nothing to do */
842}
843
844static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
845{
846	struct hlist_head *head;
847	struct hlist_node *node;
848	struct kprobe *p, *kp;
849	const char *sym = NULL;
850	unsigned int i = *(loff_t *) v;
851	unsigned long offset = 0;
852	char *modname, namebuf[128];
853
854	head = &kprobe_table[i];
855	preempt_disable();
856	hlist_for_each_entry_rcu(p, node, head, hlist) {
857		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
858					&offset, &modname, namebuf);
859		if (p->pre_handler == aggr_pre_handler) {
860			list_for_each_entry_rcu(kp, &p->list, list)
861				report_probe(pi, kp, sym, offset, modname);
862		} else
863			report_probe(pi, p, sym, offset, modname);
864	}
865	preempt_enable();
866	return 0;
867}
868
869static struct seq_operations kprobes_seq_ops = {
870	.start = kprobe_seq_start,
871	.next  = kprobe_seq_next,
872	.stop  = kprobe_seq_stop,
873	.show  = show_kprobe_addr
874};
875
876static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
877{
878	return seq_open(filp, &kprobes_seq_ops);
879}
880
881static struct file_operations debugfs_kprobes_operations = {
882	.open           = kprobes_open,
883	.read           = seq_read,
884	.llseek         = seq_lseek,
885	.release        = seq_release,
886};
887
888static void __kprobes enable_all_kprobes(void)
889{
890	struct hlist_head *head;
891	struct hlist_node *node;
892	struct kprobe *p;
893	unsigned int i;
894
895	mutex_lock(&kprobe_mutex);
896
897	/* If kprobes are already enabled, just return */
898	if (kprobe_enabled)
899		goto already_enabled;
900
901	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
902		head = &kprobe_table[i];
903		hlist_for_each_entry_rcu(p, node, head, hlist)
904			arch_arm_kprobe(p);
905	}
906
907	kprobe_enabled = true;
908	printk(KERN_INFO "Kprobes globally enabled\n");
909
910already_enabled:
911	mutex_unlock(&kprobe_mutex);
912	return;
913}
914
915static void __kprobes disable_all_kprobes(void)
916{
917	struct hlist_head *head;
918	struct hlist_node *node;
919	struct kprobe *p;
920	unsigned int i;
921
922	mutex_lock(&kprobe_mutex);
923
924	/* If kprobes are already disabled, just return */
925	if (!kprobe_enabled)
926		goto already_disabled;
927
928	kprobe_enabled = false;
929	printk(KERN_INFO "Kprobes globally disabled\n");
930	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
931		head = &kprobe_table[i];
932		hlist_for_each_entry_rcu(p, node, head, hlist) {
933			if (!arch_trampoline_kprobe(p))
934				arch_disarm_kprobe(p);
935		}
936	}
937
938	mutex_unlock(&kprobe_mutex);
939	/* Allow all currently running kprobes to complete */
940	synchronize_sched();
941	return;
942
943already_disabled:
944	mutex_unlock(&kprobe_mutex);
945	return;
946}
947
948/*
949 * XXX: The debugfs bool file interface doesn't allow for callbacks
950 * when the bool state is switched. We can reuse that facility when
951 * available
952 */
953static ssize_t read_enabled_file_bool(struct file *file,
954	       char __user *user_buf, size_t count, loff_t *ppos)
955{
956	char buf[3];
957
958	if (kprobe_enabled)
959		buf[0] = '1';
960	else
961		buf[0] = '0';
962	buf[1] = '\n';
963	buf[2] = 0x00;
964	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
965}
966
967static ssize_t write_enabled_file_bool(struct file *file,
968	       const char __user *user_buf, size_t count, loff_t *ppos)
969{
970	char buf[32];
971	int buf_size;
972
973	buf_size = min(count, (sizeof(buf)-1));
974	if (copy_from_user(buf, user_buf, buf_size))
975		return -EFAULT;
976
977	switch (buf[0]) {
978	case 'y':
979	case 'Y':
980	case '1':
981		enable_all_kprobes();
982		break;
983	case 'n':
984	case 'N':
985	case '0':
986		disable_all_kprobes();
987		break;
988	}
989
990	return count;
991}
992
993static struct file_operations fops_kp = {
994	.read =         read_enabled_file_bool,
995	.write =        write_enabled_file_bool,
996};
997
998static int __kprobes debugfs_kprobe_init(void)
999{
1000	struct dentry *dir, *file;
1001	unsigned int value = 1;
1002
1003	dir = debugfs_create_dir("kprobes", NULL);
1004	if (!dir)
1005		return -ENOMEM;
1006
1007	file = debugfs_create_file("list", 0444, dir, NULL,
1008				&debugfs_kprobes_operations);
1009	if (!file) {
1010		debugfs_remove(dir);
1011		return -ENOMEM;
1012	}
1013
1014	file = debugfs_create_file("enabled", 0600, dir,
1015					&value, &fops_kp);
1016	if (!file) {
1017		debugfs_remove(dir);
1018		return -ENOMEM;
1019	}
1020
1021	return 0;
1022}
1023
1024late_initcall(debugfs_kprobe_init);
1025#endif /* CONFIG_DEBUG_FS */
1026
1027module_init(init_kprobes);
1028
1029EXPORT_SYMBOL_GPL(register_kprobe);
1030EXPORT_SYMBOL_GPL(unregister_kprobe);
1031EXPORT_SYMBOL_GPL(register_jprobe);
1032EXPORT_SYMBOL_GPL(unregister_jprobe);
1033#ifdef CONFIG_KPROBES
1034EXPORT_SYMBOL_GPL(jprobe_return);
1035#endif
1036
1037#ifdef CONFIG_KPROBES
1038EXPORT_SYMBOL_GPL(register_kretprobe);
1039EXPORT_SYMBOL_GPL(unregister_kretprobe);
1040#endif
1041