kprobes.c revision 4a2bb6fcc80e6330ca2f2393e98605052cc7780b
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46#include <linux/memory.h>
47
48#include <asm-generic/sections.h>
49#include <asm/cacheflush.h>
50#include <asm/errno.h>
51#include <asm/uaccess.h>
52
53#define KPROBE_HASH_BITS 6
54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
56
57/*
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
60 */
61#ifndef kprobe_lookup_name
62#define kprobe_lookup_name(name, addr) \
63	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64#endif
65
66static int kprobes_initialized;
67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69
70/* NOTE: change this value only with kprobe_mutex held */
71static bool kprobes_all_disarmed;
72
73static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75static struct {
76	spinlock_t lock ____cacheline_aligned_in_smp;
77} kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80{
81	return &(kretprobe_table_locks[hash].lock);
82}
83
84/*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
88 *
89 * For such cases, we now have a blacklist
90 */
91static struct kprobe_blackpoint kprobe_blacklist[] = {
92	{"preempt_schedule",},
93	{NULL}    /* Terminator */
94};
95
96#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
97/*
98 * kprobe->ainsn.insn points to the copy of the instruction to be
99 * single-stepped. x86_64, POWER4 and above have no-exec support and
100 * stepping on the instruction on a vmalloced/kmalloced/data page
101 * is a recipe for disaster
102 */
103#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104
105struct kprobe_insn_page {
106	struct hlist_node hlist;
107	kprobe_opcode_t *insns;		/* Page of instruction slots */
108	char slot_used[INSNS_PER_PAGE];
109	int nused;
110	int ngarbage;
111};
112
113enum kprobe_slot_state {
114	SLOT_CLEAN = 0,
115	SLOT_DIRTY = 1,
116	SLOT_USED = 2,
117};
118
119static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_pages */
120static struct hlist_head kprobe_insn_pages;
121static int kprobe_garbage_slots;
122static int collect_garbage_slots(void);
123
124static int __kprobes check_safety(void)
125{
126	int ret = 0;
127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
128	ret = freeze_processes();
129	if (ret == 0) {
130		struct task_struct *p, *q;
131		do_each_thread(p, q) {
132			if (p != current && p->state == TASK_RUNNING &&
133			    p->pid != 0) {
134				printk("Check failed: %s is running\n",p->comm);
135				ret = -1;
136				goto loop_end;
137			}
138		} while_each_thread(p, q);
139	}
140loop_end:
141	thaw_processes();
142#else
143	synchronize_sched();
144#endif
145	return ret;
146}
147
148/**
149 * __get_insn_slot() - Find a slot on an executable page for an instruction.
150 * We allocate an executable page if there's no room on existing ones.
151 */
152static kprobe_opcode_t __kprobes *__get_insn_slot(void)
153{
154	struct kprobe_insn_page *kip;
155	struct hlist_node *pos;
156
157 retry:
158	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
159		if (kip->nused < INSNS_PER_PAGE) {
160			int i;
161			for (i = 0; i < INSNS_PER_PAGE; i++) {
162				if (kip->slot_used[i] == SLOT_CLEAN) {
163					kip->slot_used[i] = SLOT_USED;
164					kip->nused++;
165					return kip->insns + (i * MAX_INSN_SIZE);
166				}
167			}
168			/* Surprise!  No unused slots.  Fix kip->nused. */
169			kip->nused = INSNS_PER_PAGE;
170		}
171	}
172
173	/* If there are any garbage slots, collect it and try again. */
174	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
175		goto retry;
176	}
177	/* All out of space.  Need to allocate a new page. Use slot 0. */
178	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
179	if (!kip)
180		return NULL;
181
182	/*
183	 * Use module_alloc so this page is within +/- 2GB of where the
184	 * kernel image and loaded module images reside. This is required
185	 * so x86_64 can correctly handle the %rip-relative fixups.
186	 */
187	kip->insns = module_alloc(PAGE_SIZE);
188	if (!kip->insns) {
189		kfree(kip);
190		return NULL;
191	}
192	INIT_HLIST_NODE(&kip->hlist);
193	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
194	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
195	kip->slot_used[0] = SLOT_USED;
196	kip->nused = 1;
197	kip->ngarbage = 0;
198	return kip->insns;
199}
200
201kprobe_opcode_t __kprobes *get_insn_slot(void)
202{
203	kprobe_opcode_t *ret;
204	mutex_lock(&kprobe_insn_mutex);
205	ret = __get_insn_slot();
206	mutex_unlock(&kprobe_insn_mutex);
207	return ret;
208}
209
210/* Return 1 if all garbages are collected, otherwise 0. */
211static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
212{
213	kip->slot_used[idx] = SLOT_CLEAN;
214	kip->nused--;
215	if (kip->nused == 0) {
216		/*
217		 * Page is no longer in use.  Free it unless
218		 * it's the last one.  We keep the last one
219		 * so as not to have to set it up again the
220		 * next time somebody inserts a probe.
221		 */
222		hlist_del(&kip->hlist);
223		if (hlist_empty(&kprobe_insn_pages)) {
224			INIT_HLIST_NODE(&kip->hlist);
225			hlist_add_head(&kip->hlist,
226				       &kprobe_insn_pages);
227		} else {
228			module_free(NULL, kip->insns);
229			kfree(kip);
230		}
231		return 1;
232	}
233	return 0;
234}
235
236static int __kprobes collect_garbage_slots(void)
237{
238	struct kprobe_insn_page *kip;
239	struct hlist_node *pos, *next;
240
241	/* Ensure no-one is preepmted on the garbages */
242	if (check_safety())
243		return -EAGAIN;
244
245	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
246		int i;
247		if (kip->ngarbage == 0)
248			continue;
249		kip->ngarbage = 0;	/* we will collect all garbages */
250		for (i = 0; i < INSNS_PER_PAGE; i++) {
251			if (kip->slot_used[i] == SLOT_DIRTY &&
252			    collect_one_slot(kip, i))
253				break;
254		}
255	}
256	kprobe_garbage_slots = 0;
257	return 0;
258}
259
260void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
261{
262	struct kprobe_insn_page *kip;
263	struct hlist_node *pos;
264
265	mutex_lock(&kprobe_insn_mutex);
266	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
267		if (kip->insns <= slot &&
268		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
269			int i = (slot - kip->insns) / MAX_INSN_SIZE;
270			if (dirty) {
271				kip->slot_used[i] = SLOT_DIRTY;
272				kip->ngarbage++;
273			} else {
274				collect_one_slot(kip, i);
275			}
276			break;
277		}
278	}
279
280	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
281		collect_garbage_slots();
282
283	mutex_unlock(&kprobe_insn_mutex);
284}
285#endif
286
287/* We have preemption disabled.. so it is safe to use __ versions */
288static inline void set_kprobe_instance(struct kprobe *kp)
289{
290	__get_cpu_var(kprobe_instance) = kp;
291}
292
293static inline void reset_kprobe_instance(void)
294{
295	__get_cpu_var(kprobe_instance) = NULL;
296}
297
298/*
299 * This routine is called either:
300 * 	- under the kprobe_mutex - during kprobe_[un]register()
301 * 				OR
302 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
303 */
304struct kprobe __kprobes *get_kprobe(void *addr)
305{
306	struct hlist_head *head;
307	struct hlist_node *node;
308	struct kprobe *p;
309
310	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
311	hlist_for_each_entry_rcu(p, node, head, hlist) {
312		if (p->addr == addr)
313			return p;
314	}
315	return NULL;
316}
317
318/* Arm a kprobe with text_mutex */
319static void __kprobes arm_kprobe(struct kprobe *kp)
320{
321	mutex_lock(&text_mutex);
322	arch_arm_kprobe(kp);
323	mutex_unlock(&text_mutex);
324}
325
326/* Disarm a kprobe with text_mutex */
327static void __kprobes disarm_kprobe(struct kprobe *kp)
328{
329	mutex_lock(&text_mutex);
330	arch_disarm_kprobe(kp);
331	mutex_unlock(&text_mutex);
332}
333
334/*
335 * Aggregate handlers for multiple kprobes support - these handlers
336 * take care of invoking the individual kprobe handlers on p->list
337 */
338static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
339{
340	struct kprobe *kp;
341
342	list_for_each_entry_rcu(kp, &p->list, list) {
343		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
344			set_kprobe_instance(kp);
345			if (kp->pre_handler(kp, regs))
346				return 1;
347		}
348		reset_kprobe_instance();
349	}
350	return 0;
351}
352
353static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
354					unsigned long flags)
355{
356	struct kprobe *kp;
357
358	list_for_each_entry_rcu(kp, &p->list, list) {
359		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
360			set_kprobe_instance(kp);
361			kp->post_handler(kp, regs, flags);
362			reset_kprobe_instance();
363		}
364	}
365}
366
367static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
368					int trapnr)
369{
370	struct kprobe *cur = __get_cpu_var(kprobe_instance);
371
372	/*
373	 * if we faulted "during" the execution of a user specified
374	 * probe handler, invoke just that probe's fault handler
375	 */
376	if (cur && cur->fault_handler) {
377		if (cur->fault_handler(cur, regs, trapnr))
378			return 1;
379	}
380	return 0;
381}
382
383static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
384{
385	struct kprobe *cur = __get_cpu_var(kprobe_instance);
386	int ret = 0;
387
388	if (cur && cur->break_handler) {
389		if (cur->break_handler(cur, regs))
390			ret = 1;
391	}
392	reset_kprobe_instance();
393	return ret;
394}
395
396/* Walks the list and increments nmissed count for multiprobe case */
397void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
398{
399	struct kprobe *kp;
400	if (p->pre_handler != aggr_pre_handler) {
401		p->nmissed++;
402	} else {
403		list_for_each_entry_rcu(kp, &p->list, list)
404			kp->nmissed++;
405	}
406	return;
407}
408
409void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
410				struct hlist_head *head)
411{
412	struct kretprobe *rp = ri->rp;
413
414	/* remove rp inst off the rprobe_inst_table */
415	hlist_del(&ri->hlist);
416	INIT_HLIST_NODE(&ri->hlist);
417	if (likely(rp)) {
418		spin_lock(&rp->lock);
419		hlist_add_head(&ri->hlist, &rp->free_instances);
420		spin_unlock(&rp->lock);
421	} else
422		/* Unregistering */
423		hlist_add_head(&ri->hlist, head);
424}
425
426void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
427			 struct hlist_head **head, unsigned long *flags)
428{
429	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
430	spinlock_t *hlist_lock;
431
432	*head = &kretprobe_inst_table[hash];
433	hlist_lock = kretprobe_table_lock_ptr(hash);
434	spin_lock_irqsave(hlist_lock, *flags);
435}
436
437static void __kprobes kretprobe_table_lock(unsigned long hash,
438	unsigned long *flags)
439{
440	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
441	spin_lock_irqsave(hlist_lock, *flags);
442}
443
444void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
445	unsigned long *flags)
446{
447	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
448	spinlock_t *hlist_lock;
449
450	hlist_lock = kretprobe_table_lock_ptr(hash);
451	spin_unlock_irqrestore(hlist_lock, *flags);
452}
453
454void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
455{
456	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
457	spin_unlock_irqrestore(hlist_lock, *flags);
458}
459
460/*
461 * This function is called from finish_task_switch when task tk becomes dead,
462 * so that we can recycle any function-return probe instances associated
463 * with this task. These left over instances represent probed functions
464 * that have been called but will never return.
465 */
466void __kprobes kprobe_flush_task(struct task_struct *tk)
467{
468	struct kretprobe_instance *ri;
469	struct hlist_head *head, empty_rp;
470	struct hlist_node *node, *tmp;
471	unsigned long hash, flags = 0;
472
473	if (unlikely(!kprobes_initialized))
474		/* Early boot.  kretprobe_table_locks not yet initialized. */
475		return;
476
477	hash = hash_ptr(tk, KPROBE_HASH_BITS);
478	head = &kretprobe_inst_table[hash];
479	kretprobe_table_lock(hash, &flags);
480	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
481		if (ri->task == tk)
482			recycle_rp_inst(ri, &empty_rp);
483	}
484	kretprobe_table_unlock(hash, &flags);
485	INIT_HLIST_HEAD(&empty_rp);
486	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
487		hlist_del(&ri->hlist);
488		kfree(ri);
489	}
490}
491
492static inline void free_rp_inst(struct kretprobe *rp)
493{
494	struct kretprobe_instance *ri;
495	struct hlist_node *pos, *next;
496
497	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
498		hlist_del(&ri->hlist);
499		kfree(ri);
500	}
501}
502
503static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
504{
505	unsigned long flags, hash;
506	struct kretprobe_instance *ri;
507	struct hlist_node *pos, *next;
508	struct hlist_head *head;
509
510	/* No race here */
511	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
512		kretprobe_table_lock(hash, &flags);
513		head = &kretprobe_inst_table[hash];
514		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
515			if (ri->rp == rp)
516				ri->rp = NULL;
517		}
518		kretprobe_table_unlock(hash, &flags);
519	}
520	free_rp_inst(rp);
521}
522
523/*
524 * Keep all fields in the kprobe consistent
525 */
526static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
527{
528	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
529	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
530}
531
532/*
533* Add the new probe to ap->list. Fail if this is the
534* second jprobe at the address - two jprobes can't coexist
535*/
536static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
537{
538	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
539	if (p->break_handler) {
540		if (ap->break_handler)
541			return -EEXIST;
542		list_add_tail_rcu(&p->list, &ap->list);
543		ap->break_handler = aggr_break_handler;
544	} else
545		list_add_rcu(&p->list, &ap->list);
546	if (p->post_handler && !ap->post_handler)
547		ap->post_handler = aggr_post_handler;
548
549	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
550		ap->flags &= ~KPROBE_FLAG_DISABLED;
551		if (!kprobes_all_disarmed)
552			/* Arm the breakpoint again. */
553			arm_kprobe(ap);
554	}
555	return 0;
556}
557
558/*
559 * Fill in the required fields of the "manager kprobe". Replace the
560 * earlier kprobe in the hlist with the manager kprobe
561 */
562static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
563{
564	copy_kprobe(p, ap);
565	flush_insn_slot(ap);
566	ap->addr = p->addr;
567	ap->flags = p->flags;
568	ap->pre_handler = aggr_pre_handler;
569	ap->fault_handler = aggr_fault_handler;
570	/* We don't care the kprobe which has gone. */
571	if (p->post_handler && !kprobe_gone(p))
572		ap->post_handler = aggr_post_handler;
573	if (p->break_handler && !kprobe_gone(p))
574		ap->break_handler = aggr_break_handler;
575
576	INIT_LIST_HEAD(&ap->list);
577	list_add_rcu(&p->list, &ap->list);
578
579	hlist_replace_rcu(&p->hlist, &ap->hlist);
580}
581
582/*
583 * This is the second or subsequent kprobe at the address - handle
584 * the intricacies
585 */
586static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
587					  struct kprobe *p)
588{
589	int ret = 0;
590	struct kprobe *ap = old_p;
591
592	if (old_p->pre_handler != aggr_pre_handler) {
593		/* If old_p is not an aggr_probe, create new aggr_kprobe. */
594		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
595		if (!ap)
596			return -ENOMEM;
597		add_aggr_kprobe(ap, old_p);
598	}
599
600	if (kprobe_gone(ap)) {
601		/*
602		 * Attempting to insert new probe at the same location that
603		 * had a probe in the module vaddr area which already
604		 * freed. So, the instruction slot has already been
605		 * released. We need a new slot for the new probe.
606		 */
607		ret = arch_prepare_kprobe(ap);
608		if (ret)
609			/*
610			 * Even if fail to allocate new slot, don't need to
611			 * free aggr_probe. It will be used next time, or
612			 * freed by unregister_kprobe.
613			 */
614			return ret;
615
616		/*
617		 * Clear gone flag to prevent allocating new slot again, and
618		 * set disabled flag because it is not armed yet.
619		 */
620		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
621			    | KPROBE_FLAG_DISABLED;
622	}
623
624	copy_kprobe(ap, p);
625	return add_new_kprobe(ap, p);
626}
627
628/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
629static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
630{
631	struct kprobe *kp;
632
633	list_for_each_entry_rcu(kp, &p->list, list) {
634		if (!kprobe_disabled(kp))
635			/*
636			 * There is an active probe on the list.
637			 * We can't disable aggr_kprobe.
638			 */
639			return 0;
640	}
641	p->flags |= KPROBE_FLAG_DISABLED;
642	return 1;
643}
644
645static int __kprobes in_kprobes_functions(unsigned long addr)
646{
647	struct kprobe_blackpoint *kb;
648
649	if (addr >= (unsigned long)__kprobes_text_start &&
650	    addr < (unsigned long)__kprobes_text_end)
651		return -EINVAL;
652	/*
653	 * If there exists a kprobe_blacklist, verify and
654	 * fail any probe registration in the prohibited area
655	 */
656	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
657		if (kb->start_addr) {
658			if (addr >= kb->start_addr &&
659			    addr < (kb->start_addr + kb->range))
660				return -EINVAL;
661		}
662	}
663	return 0;
664}
665
666/*
667 * If we have a symbol_name argument, look it up and add the offset field
668 * to it. This way, we can specify a relative address to a symbol.
669 */
670static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
671{
672	kprobe_opcode_t *addr = p->addr;
673	if (p->symbol_name) {
674		if (addr)
675			return NULL;
676		kprobe_lookup_name(p->symbol_name, addr);
677	}
678
679	if (!addr)
680		return NULL;
681	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
682}
683
684int __kprobes register_kprobe(struct kprobe *p)
685{
686	int ret = 0;
687	struct kprobe *old_p;
688	struct module *probed_mod;
689	kprobe_opcode_t *addr;
690
691	addr = kprobe_addr(p);
692	if (!addr)
693		return -EINVAL;
694	p->addr = addr;
695
696	preempt_disable();
697	if (!__kernel_text_address((unsigned long) p->addr) ||
698	    in_kprobes_functions((unsigned long) p->addr)) {
699		preempt_enable();
700		return -EINVAL;
701	}
702
703	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
704	p->flags &= KPROBE_FLAG_DISABLED;
705
706	/*
707	 * Check if are we probing a module.
708	 */
709	probed_mod = __module_text_address((unsigned long) p->addr);
710	if (probed_mod) {
711		/*
712		 * We must hold a refcount of the probed module while updating
713		 * its code to prohibit unexpected unloading.
714		 */
715		if (unlikely(!try_module_get(probed_mod))) {
716			preempt_enable();
717			return -EINVAL;
718		}
719		/*
720		 * If the module freed .init.text, we couldn't insert
721		 * kprobes in there.
722		 */
723		if (within_module_init((unsigned long)p->addr, probed_mod) &&
724		    probed_mod->state != MODULE_STATE_COMING) {
725			module_put(probed_mod);
726			preempt_enable();
727			return -EINVAL;
728		}
729	}
730	preempt_enable();
731
732	p->nmissed = 0;
733	INIT_LIST_HEAD(&p->list);
734	mutex_lock(&kprobe_mutex);
735	old_p = get_kprobe(p->addr);
736	if (old_p) {
737		ret = register_aggr_kprobe(old_p, p);
738		goto out;
739	}
740
741	mutex_lock(&text_mutex);
742	ret = arch_prepare_kprobe(p);
743	if (ret)
744		goto out_unlock_text;
745
746	INIT_HLIST_NODE(&p->hlist);
747	hlist_add_head_rcu(&p->hlist,
748		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
749
750	if (!kprobes_all_disarmed && !kprobe_disabled(p))
751		arch_arm_kprobe(p);
752
753out_unlock_text:
754	mutex_unlock(&text_mutex);
755out:
756	mutex_unlock(&kprobe_mutex);
757
758	if (probed_mod)
759		module_put(probed_mod);
760
761	return ret;
762}
763EXPORT_SYMBOL_GPL(register_kprobe);
764
765/* Check passed kprobe is valid and return kprobe in kprobe_table. */
766static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
767{
768	struct kprobe *old_p, *list_p;
769
770	old_p = get_kprobe(p->addr);
771	if (unlikely(!old_p))
772		return NULL;
773
774	if (p != old_p) {
775		list_for_each_entry_rcu(list_p, &old_p->list, list)
776			if (list_p == p)
777			/* kprobe p is a valid probe */
778				goto valid;
779		return NULL;
780	}
781valid:
782	return old_p;
783}
784
785/*
786 * Unregister a kprobe without a scheduler synchronization.
787 */
788static int __kprobes __unregister_kprobe_top(struct kprobe *p)
789{
790	struct kprobe *old_p, *list_p;
791
792	old_p = __get_valid_kprobe(p);
793	if (old_p == NULL)
794		return -EINVAL;
795
796	if (old_p == p ||
797	    (old_p->pre_handler == aggr_pre_handler &&
798	     list_is_singular(&old_p->list))) {
799		/*
800		 * Only probe on the hash list. Disarm only if kprobes are
801		 * enabled and not gone - otherwise, the breakpoint would
802		 * already have been removed. We save on flushing icache.
803		 */
804		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
805			disarm_kprobe(p);
806		hlist_del_rcu(&old_p->hlist);
807	} else {
808		if (p->break_handler && !kprobe_gone(p))
809			old_p->break_handler = NULL;
810		if (p->post_handler && !kprobe_gone(p)) {
811			list_for_each_entry_rcu(list_p, &old_p->list, list) {
812				if ((list_p != p) && (list_p->post_handler))
813					goto noclean;
814			}
815			old_p->post_handler = NULL;
816		}
817noclean:
818		list_del_rcu(&p->list);
819		if (!kprobe_disabled(old_p)) {
820			try_to_disable_aggr_kprobe(old_p);
821			if (!kprobes_all_disarmed && kprobe_disabled(old_p))
822				disarm_kprobe(old_p);
823		}
824	}
825	return 0;
826}
827
828static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
829{
830	struct kprobe *old_p;
831
832	if (list_empty(&p->list))
833		arch_remove_kprobe(p);
834	else if (list_is_singular(&p->list)) {
835		/* "p" is the last child of an aggr_kprobe */
836		old_p = list_entry(p->list.next, struct kprobe, list);
837		list_del(&p->list);
838		arch_remove_kprobe(old_p);
839		kfree(old_p);
840	}
841}
842
843int __kprobes register_kprobes(struct kprobe **kps, int num)
844{
845	int i, ret = 0;
846
847	if (num <= 0)
848		return -EINVAL;
849	for (i = 0; i < num; i++) {
850		ret = register_kprobe(kps[i]);
851		if (ret < 0) {
852			if (i > 0)
853				unregister_kprobes(kps, i);
854			break;
855		}
856	}
857	return ret;
858}
859EXPORT_SYMBOL_GPL(register_kprobes);
860
861void __kprobes unregister_kprobe(struct kprobe *p)
862{
863	unregister_kprobes(&p, 1);
864}
865EXPORT_SYMBOL_GPL(unregister_kprobe);
866
867void __kprobes unregister_kprobes(struct kprobe **kps, int num)
868{
869	int i;
870
871	if (num <= 0)
872		return;
873	mutex_lock(&kprobe_mutex);
874	for (i = 0; i < num; i++)
875		if (__unregister_kprobe_top(kps[i]) < 0)
876			kps[i]->addr = NULL;
877	mutex_unlock(&kprobe_mutex);
878
879	synchronize_sched();
880	for (i = 0; i < num; i++)
881		if (kps[i]->addr)
882			__unregister_kprobe_bottom(kps[i]);
883}
884EXPORT_SYMBOL_GPL(unregister_kprobes);
885
886static struct notifier_block kprobe_exceptions_nb = {
887	.notifier_call = kprobe_exceptions_notify,
888	.priority = 0x7fffffff /* we need to be notified first */
889};
890
891unsigned long __weak arch_deref_entry_point(void *entry)
892{
893	return (unsigned long)entry;
894}
895
896int __kprobes register_jprobes(struct jprobe **jps, int num)
897{
898	struct jprobe *jp;
899	int ret = 0, i;
900
901	if (num <= 0)
902		return -EINVAL;
903	for (i = 0; i < num; i++) {
904		unsigned long addr;
905		jp = jps[i];
906		addr = arch_deref_entry_point(jp->entry);
907
908		if (!kernel_text_address(addr))
909			ret = -EINVAL;
910		else {
911			/* Todo: Verify probepoint is a function entry point */
912			jp->kp.pre_handler = setjmp_pre_handler;
913			jp->kp.break_handler = longjmp_break_handler;
914			ret = register_kprobe(&jp->kp);
915		}
916		if (ret < 0) {
917			if (i > 0)
918				unregister_jprobes(jps, i);
919			break;
920		}
921	}
922	return ret;
923}
924EXPORT_SYMBOL_GPL(register_jprobes);
925
926int __kprobes register_jprobe(struct jprobe *jp)
927{
928	return register_jprobes(&jp, 1);
929}
930EXPORT_SYMBOL_GPL(register_jprobe);
931
932void __kprobes unregister_jprobe(struct jprobe *jp)
933{
934	unregister_jprobes(&jp, 1);
935}
936EXPORT_SYMBOL_GPL(unregister_jprobe);
937
938void __kprobes unregister_jprobes(struct jprobe **jps, int num)
939{
940	int i;
941
942	if (num <= 0)
943		return;
944	mutex_lock(&kprobe_mutex);
945	for (i = 0; i < num; i++)
946		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
947			jps[i]->kp.addr = NULL;
948	mutex_unlock(&kprobe_mutex);
949
950	synchronize_sched();
951	for (i = 0; i < num; i++) {
952		if (jps[i]->kp.addr)
953			__unregister_kprobe_bottom(&jps[i]->kp);
954	}
955}
956EXPORT_SYMBOL_GPL(unregister_jprobes);
957
958#ifdef CONFIG_KRETPROBES
959/*
960 * This kprobe pre_handler is registered with every kretprobe. When probe
961 * hits it will set up the return probe.
962 */
963static int __kprobes pre_handler_kretprobe(struct kprobe *p,
964					   struct pt_regs *regs)
965{
966	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
967	unsigned long hash, flags = 0;
968	struct kretprobe_instance *ri;
969
970	/*TODO: consider to only swap the RA after the last pre_handler fired */
971	hash = hash_ptr(current, KPROBE_HASH_BITS);
972	spin_lock_irqsave(&rp->lock, flags);
973	if (!hlist_empty(&rp->free_instances)) {
974		ri = hlist_entry(rp->free_instances.first,
975				struct kretprobe_instance, hlist);
976		hlist_del(&ri->hlist);
977		spin_unlock_irqrestore(&rp->lock, flags);
978
979		ri->rp = rp;
980		ri->task = current;
981
982		if (rp->entry_handler && rp->entry_handler(ri, regs))
983			return 0;
984
985		arch_prepare_kretprobe(ri, regs);
986
987		/* XXX(hch): why is there no hlist_move_head? */
988		INIT_HLIST_NODE(&ri->hlist);
989		kretprobe_table_lock(hash, &flags);
990		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
991		kretprobe_table_unlock(hash, &flags);
992	} else {
993		rp->nmissed++;
994		spin_unlock_irqrestore(&rp->lock, flags);
995	}
996	return 0;
997}
998
999int __kprobes register_kretprobe(struct kretprobe *rp)
1000{
1001	int ret = 0;
1002	struct kretprobe_instance *inst;
1003	int i;
1004	void *addr;
1005
1006	if (kretprobe_blacklist_size) {
1007		addr = kprobe_addr(&rp->kp);
1008		if (!addr)
1009			return -EINVAL;
1010
1011		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1012			if (kretprobe_blacklist[i].addr == addr)
1013				return -EINVAL;
1014		}
1015	}
1016
1017	rp->kp.pre_handler = pre_handler_kretprobe;
1018	rp->kp.post_handler = NULL;
1019	rp->kp.fault_handler = NULL;
1020	rp->kp.break_handler = NULL;
1021
1022	/* Pre-allocate memory for max kretprobe instances */
1023	if (rp->maxactive <= 0) {
1024#ifdef CONFIG_PREEMPT
1025		rp->maxactive = max(10, 2 * NR_CPUS);
1026#else
1027		rp->maxactive = NR_CPUS;
1028#endif
1029	}
1030	spin_lock_init(&rp->lock);
1031	INIT_HLIST_HEAD(&rp->free_instances);
1032	for (i = 0; i < rp->maxactive; i++) {
1033		inst = kmalloc(sizeof(struct kretprobe_instance) +
1034			       rp->data_size, GFP_KERNEL);
1035		if (inst == NULL) {
1036			free_rp_inst(rp);
1037			return -ENOMEM;
1038		}
1039		INIT_HLIST_NODE(&inst->hlist);
1040		hlist_add_head(&inst->hlist, &rp->free_instances);
1041	}
1042
1043	rp->nmissed = 0;
1044	/* Establish function entry probe point */
1045	ret = register_kprobe(&rp->kp);
1046	if (ret != 0)
1047		free_rp_inst(rp);
1048	return ret;
1049}
1050EXPORT_SYMBOL_GPL(register_kretprobe);
1051
1052int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1053{
1054	int ret = 0, i;
1055
1056	if (num <= 0)
1057		return -EINVAL;
1058	for (i = 0; i < num; i++) {
1059		ret = register_kretprobe(rps[i]);
1060		if (ret < 0) {
1061			if (i > 0)
1062				unregister_kretprobes(rps, i);
1063			break;
1064		}
1065	}
1066	return ret;
1067}
1068EXPORT_SYMBOL_GPL(register_kretprobes);
1069
1070void __kprobes unregister_kretprobe(struct kretprobe *rp)
1071{
1072	unregister_kretprobes(&rp, 1);
1073}
1074EXPORT_SYMBOL_GPL(unregister_kretprobe);
1075
1076void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1077{
1078	int i;
1079
1080	if (num <= 0)
1081		return;
1082	mutex_lock(&kprobe_mutex);
1083	for (i = 0; i < num; i++)
1084		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1085			rps[i]->kp.addr = NULL;
1086	mutex_unlock(&kprobe_mutex);
1087
1088	synchronize_sched();
1089	for (i = 0; i < num; i++) {
1090		if (rps[i]->kp.addr) {
1091			__unregister_kprobe_bottom(&rps[i]->kp);
1092			cleanup_rp_inst(rps[i]);
1093		}
1094	}
1095}
1096EXPORT_SYMBOL_GPL(unregister_kretprobes);
1097
1098#else /* CONFIG_KRETPROBES */
1099int __kprobes register_kretprobe(struct kretprobe *rp)
1100{
1101	return -ENOSYS;
1102}
1103EXPORT_SYMBOL_GPL(register_kretprobe);
1104
1105int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1106{
1107	return -ENOSYS;
1108}
1109EXPORT_SYMBOL_GPL(register_kretprobes);
1110
1111void __kprobes unregister_kretprobe(struct kretprobe *rp)
1112{
1113}
1114EXPORT_SYMBOL_GPL(unregister_kretprobe);
1115
1116void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1117{
1118}
1119EXPORT_SYMBOL_GPL(unregister_kretprobes);
1120
1121static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1122					   struct pt_regs *regs)
1123{
1124	return 0;
1125}
1126
1127#endif /* CONFIG_KRETPROBES */
1128
1129/* Set the kprobe gone and remove its instruction buffer. */
1130static void __kprobes kill_kprobe(struct kprobe *p)
1131{
1132	struct kprobe *kp;
1133
1134	p->flags |= KPROBE_FLAG_GONE;
1135	if (p->pre_handler == aggr_pre_handler) {
1136		/*
1137		 * If this is an aggr_kprobe, we have to list all the
1138		 * chained probes and mark them GONE.
1139		 */
1140		list_for_each_entry_rcu(kp, &p->list, list)
1141			kp->flags |= KPROBE_FLAG_GONE;
1142		p->post_handler = NULL;
1143		p->break_handler = NULL;
1144	}
1145	/*
1146	 * Here, we can remove insn_slot safely, because no thread calls
1147	 * the original probed function (which will be freed soon) any more.
1148	 */
1149	arch_remove_kprobe(p);
1150}
1151
1152/* Module notifier call back, checking kprobes on the module */
1153static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1154					     unsigned long val, void *data)
1155{
1156	struct module *mod = data;
1157	struct hlist_head *head;
1158	struct hlist_node *node;
1159	struct kprobe *p;
1160	unsigned int i;
1161	int checkcore = (val == MODULE_STATE_GOING);
1162
1163	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1164		return NOTIFY_DONE;
1165
1166	/*
1167	 * When MODULE_STATE_GOING was notified, both of module .text and
1168	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1169	 * notified, only .init.text section would be freed. We need to
1170	 * disable kprobes which have been inserted in the sections.
1171	 */
1172	mutex_lock(&kprobe_mutex);
1173	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1174		head = &kprobe_table[i];
1175		hlist_for_each_entry_rcu(p, node, head, hlist)
1176			if (within_module_init((unsigned long)p->addr, mod) ||
1177			    (checkcore &&
1178			     within_module_core((unsigned long)p->addr, mod))) {
1179				/*
1180				 * The vaddr this probe is installed will soon
1181				 * be vfreed buy not synced to disk. Hence,
1182				 * disarming the breakpoint isn't needed.
1183				 */
1184				kill_kprobe(p);
1185			}
1186	}
1187	mutex_unlock(&kprobe_mutex);
1188	return NOTIFY_DONE;
1189}
1190
1191static struct notifier_block kprobe_module_nb = {
1192	.notifier_call = kprobes_module_callback,
1193	.priority = 0
1194};
1195
1196static int __init init_kprobes(void)
1197{
1198	int i, err = 0;
1199	unsigned long offset = 0, size = 0;
1200	char *modname, namebuf[128];
1201	const char *symbol_name;
1202	void *addr;
1203	struct kprobe_blackpoint *kb;
1204
1205	/* FIXME allocate the probe table, currently defined statically */
1206	/* initialize all list heads */
1207	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1208		INIT_HLIST_HEAD(&kprobe_table[i]);
1209		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1210		spin_lock_init(&(kretprobe_table_locks[i].lock));
1211	}
1212
1213	/*
1214	 * Lookup and populate the kprobe_blacklist.
1215	 *
1216	 * Unlike the kretprobe blacklist, we'll need to determine
1217	 * the range of addresses that belong to the said functions,
1218	 * since a kprobe need not necessarily be at the beginning
1219	 * of a function.
1220	 */
1221	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1222		kprobe_lookup_name(kb->name, addr);
1223		if (!addr)
1224			continue;
1225
1226		kb->start_addr = (unsigned long)addr;
1227		symbol_name = kallsyms_lookup(kb->start_addr,
1228				&size, &offset, &modname, namebuf);
1229		if (!symbol_name)
1230			kb->range = 0;
1231		else
1232			kb->range = size;
1233	}
1234
1235	if (kretprobe_blacklist_size) {
1236		/* lookup the function address from its name */
1237		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1238			kprobe_lookup_name(kretprobe_blacklist[i].name,
1239					   kretprobe_blacklist[i].addr);
1240			if (!kretprobe_blacklist[i].addr)
1241				printk("kretprobe: lookup failed: %s\n",
1242				       kretprobe_blacklist[i].name);
1243		}
1244	}
1245
1246	/* By default, kprobes are armed */
1247	kprobes_all_disarmed = false;
1248
1249	err = arch_init_kprobes();
1250	if (!err)
1251		err = register_die_notifier(&kprobe_exceptions_nb);
1252	if (!err)
1253		err = register_module_notifier(&kprobe_module_nb);
1254
1255	kprobes_initialized = (err == 0);
1256
1257	if (!err)
1258		init_test_probes();
1259	return err;
1260}
1261
1262#ifdef CONFIG_DEBUG_FS
1263static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1264		const char *sym, int offset,char *modname)
1265{
1266	char *kprobe_type;
1267
1268	if (p->pre_handler == pre_handler_kretprobe)
1269		kprobe_type = "r";
1270	else if (p->pre_handler == setjmp_pre_handler)
1271		kprobe_type = "j";
1272	else
1273		kprobe_type = "k";
1274	if (sym)
1275		seq_printf(pi, "%p  %s  %s+0x%x  %s %s%s\n",
1276			p->addr, kprobe_type, sym, offset,
1277			(modname ? modname : " "),
1278			(kprobe_gone(p) ? "[GONE]" : ""),
1279			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1280			 "[DISABLED]" : ""));
1281	else
1282		seq_printf(pi, "%p  %s  %p %s%s\n",
1283			p->addr, kprobe_type, p->addr,
1284			(kprobe_gone(p) ? "[GONE]" : ""),
1285			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1286			 "[DISABLED]" : ""));
1287}
1288
1289static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1290{
1291	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1292}
1293
1294static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1295{
1296	(*pos)++;
1297	if (*pos >= KPROBE_TABLE_SIZE)
1298		return NULL;
1299	return pos;
1300}
1301
1302static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1303{
1304	/* Nothing to do */
1305}
1306
1307static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1308{
1309	struct hlist_head *head;
1310	struct hlist_node *node;
1311	struct kprobe *p, *kp;
1312	const char *sym = NULL;
1313	unsigned int i = *(loff_t *) v;
1314	unsigned long offset = 0;
1315	char *modname, namebuf[128];
1316
1317	head = &kprobe_table[i];
1318	preempt_disable();
1319	hlist_for_each_entry_rcu(p, node, head, hlist) {
1320		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1321					&offset, &modname, namebuf);
1322		if (p->pre_handler == aggr_pre_handler) {
1323			list_for_each_entry_rcu(kp, &p->list, list)
1324				report_probe(pi, kp, sym, offset, modname);
1325		} else
1326			report_probe(pi, p, sym, offset, modname);
1327	}
1328	preempt_enable();
1329	return 0;
1330}
1331
1332static struct seq_operations kprobes_seq_ops = {
1333	.start = kprobe_seq_start,
1334	.next  = kprobe_seq_next,
1335	.stop  = kprobe_seq_stop,
1336	.show  = show_kprobe_addr
1337};
1338
1339static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1340{
1341	return seq_open(filp, &kprobes_seq_ops);
1342}
1343
1344static struct file_operations debugfs_kprobes_operations = {
1345	.open           = kprobes_open,
1346	.read           = seq_read,
1347	.llseek         = seq_lseek,
1348	.release        = seq_release,
1349};
1350
1351/* Disable one kprobe */
1352int __kprobes disable_kprobe(struct kprobe *kp)
1353{
1354	int ret = 0;
1355	struct kprobe *p;
1356
1357	mutex_lock(&kprobe_mutex);
1358
1359	/* Check whether specified probe is valid. */
1360	p = __get_valid_kprobe(kp);
1361	if (unlikely(p == NULL)) {
1362		ret = -EINVAL;
1363		goto out;
1364	}
1365
1366	/* If the probe is already disabled (or gone), just return */
1367	if (kprobe_disabled(kp))
1368		goto out;
1369
1370	kp->flags |= KPROBE_FLAG_DISABLED;
1371	if (p != kp)
1372		/* When kp != p, p is always enabled. */
1373		try_to_disable_aggr_kprobe(p);
1374
1375	if (!kprobes_all_disarmed && kprobe_disabled(p))
1376		disarm_kprobe(p);
1377out:
1378	mutex_unlock(&kprobe_mutex);
1379	return ret;
1380}
1381EXPORT_SYMBOL_GPL(disable_kprobe);
1382
1383/* Enable one kprobe */
1384int __kprobes enable_kprobe(struct kprobe *kp)
1385{
1386	int ret = 0;
1387	struct kprobe *p;
1388
1389	mutex_lock(&kprobe_mutex);
1390
1391	/* Check whether specified probe is valid. */
1392	p = __get_valid_kprobe(kp);
1393	if (unlikely(p == NULL)) {
1394		ret = -EINVAL;
1395		goto out;
1396	}
1397
1398	if (kprobe_gone(kp)) {
1399		/* This kprobe has gone, we couldn't enable it. */
1400		ret = -EINVAL;
1401		goto out;
1402	}
1403
1404	if (!kprobes_all_disarmed && kprobe_disabled(p))
1405		arm_kprobe(p);
1406
1407	p->flags &= ~KPROBE_FLAG_DISABLED;
1408	if (p != kp)
1409		kp->flags &= ~KPROBE_FLAG_DISABLED;
1410out:
1411	mutex_unlock(&kprobe_mutex);
1412	return ret;
1413}
1414EXPORT_SYMBOL_GPL(enable_kprobe);
1415
1416static void __kprobes arm_all_kprobes(void)
1417{
1418	struct hlist_head *head;
1419	struct hlist_node *node;
1420	struct kprobe *p;
1421	unsigned int i;
1422
1423	mutex_lock(&kprobe_mutex);
1424
1425	/* If kprobes are armed, just return */
1426	if (!kprobes_all_disarmed)
1427		goto already_enabled;
1428
1429	mutex_lock(&text_mutex);
1430	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1431		head = &kprobe_table[i];
1432		hlist_for_each_entry_rcu(p, node, head, hlist)
1433			if (!kprobe_disabled(p))
1434				arch_arm_kprobe(p);
1435	}
1436	mutex_unlock(&text_mutex);
1437
1438	kprobes_all_disarmed = false;
1439	printk(KERN_INFO "Kprobes globally enabled\n");
1440
1441already_enabled:
1442	mutex_unlock(&kprobe_mutex);
1443	return;
1444}
1445
1446static void __kprobes disarm_all_kprobes(void)
1447{
1448	struct hlist_head *head;
1449	struct hlist_node *node;
1450	struct kprobe *p;
1451	unsigned int i;
1452
1453	mutex_lock(&kprobe_mutex);
1454
1455	/* If kprobes are already disarmed, just return */
1456	if (kprobes_all_disarmed)
1457		goto already_disabled;
1458
1459	kprobes_all_disarmed = true;
1460	printk(KERN_INFO "Kprobes globally disabled\n");
1461	mutex_lock(&text_mutex);
1462	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1463		head = &kprobe_table[i];
1464		hlist_for_each_entry_rcu(p, node, head, hlist) {
1465			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1466				arch_disarm_kprobe(p);
1467		}
1468	}
1469
1470	mutex_unlock(&text_mutex);
1471	mutex_unlock(&kprobe_mutex);
1472	/* Allow all currently running kprobes to complete */
1473	synchronize_sched();
1474	return;
1475
1476already_disabled:
1477	mutex_unlock(&kprobe_mutex);
1478	return;
1479}
1480
1481/*
1482 * XXX: The debugfs bool file interface doesn't allow for callbacks
1483 * when the bool state is switched. We can reuse that facility when
1484 * available
1485 */
1486static ssize_t read_enabled_file_bool(struct file *file,
1487	       char __user *user_buf, size_t count, loff_t *ppos)
1488{
1489	char buf[3];
1490
1491	if (!kprobes_all_disarmed)
1492		buf[0] = '1';
1493	else
1494		buf[0] = '0';
1495	buf[1] = '\n';
1496	buf[2] = 0x00;
1497	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1498}
1499
1500static ssize_t write_enabled_file_bool(struct file *file,
1501	       const char __user *user_buf, size_t count, loff_t *ppos)
1502{
1503	char buf[32];
1504	int buf_size;
1505
1506	buf_size = min(count, (sizeof(buf)-1));
1507	if (copy_from_user(buf, user_buf, buf_size))
1508		return -EFAULT;
1509
1510	switch (buf[0]) {
1511	case 'y':
1512	case 'Y':
1513	case '1':
1514		arm_all_kprobes();
1515		break;
1516	case 'n':
1517	case 'N':
1518	case '0':
1519		disarm_all_kprobes();
1520		break;
1521	}
1522
1523	return count;
1524}
1525
1526static struct file_operations fops_kp = {
1527	.read =         read_enabled_file_bool,
1528	.write =        write_enabled_file_bool,
1529};
1530
1531static int __kprobes debugfs_kprobe_init(void)
1532{
1533	struct dentry *dir, *file;
1534	unsigned int value = 1;
1535
1536	dir = debugfs_create_dir("kprobes", NULL);
1537	if (!dir)
1538		return -ENOMEM;
1539
1540	file = debugfs_create_file("list", 0444, dir, NULL,
1541				&debugfs_kprobes_operations);
1542	if (!file) {
1543		debugfs_remove(dir);
1544		return -ENOMEM;
1545	}
1546
1547	file = debugfs_create_file("enabled", 0600, dir,
1548					&value, &fops_kp);
1549	if (!file) {
1550		debugfs_remove(dir);
1551		return -ENOMEM;
1552	}
1553
1554	return 0;
1555}
1556
1557late_initcall(debugfs_kprobe_init);
1558#endif /* CONFIG_DEBUG_FS */
1559
1560module_init(init_kprobes);
1561
1562/* defined in arch/.../kernel/kprobes.c */
1563EXPORT_SYMBOL_GPL(jprobe_return);
1564