kprobes.c revision 4dae560f97fa438f373b53e14b30149c9e44a600
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46#include <linux/memory.h>
47
48#include <asm-generic/sections.h>
49#include <asm/cacheflush.h>
50#include <asm/errno.h>
51#include <asm/uaccess.h>
52
53#define KPROBE_HASH_BITS 6
54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
56
57/*
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
60 */
61#ifndef kprobe_lookup_name
62#define kprobe_lookup_name(name, addr) \
63	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64#endif
65
66static int kprobes_initialized;
67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69
70/* NOTE: change this value only with kprobe_mutex held */
71static bool kprobes_all_disarmed;
72
73static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75static struct {
76	spinlock_t lock ____cacheline_aligned_in_smp;
77} kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80{
81	return &(kretprobe_table_locks[hash].lock);
82}
83
84/*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
88 *
89 * For such cases, we now have a blacklist
90 */
91static struct kprobe_blackpoint kprobe_blacklist[] = {
92	{"preempt_schedule",},
93	{NULL}    /* Terminator */
94};
95
96#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
97/*
98 * kprobe->ainsn.insn points to the copy of the instruction to be
99 * single-stepped. x86_64, POWER4 and above have no-exec support and
100 * stepping on the instruction on a vmalloced/kmalloced/data page
101 * is a recipe for disaster
102 */
103#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104
105struct kprobe_insn_page {
106	struct list_head list;
107	kprobe_opcode_t *insns;		/* Page of instruction slots */
108	char slot_used[INSNS_PER_PAGE];
109	int nused;
110	int ngarbage;
111};
112
113enum kprobe_slot_state {
114	SLOT_CLEAN = 0,
115	SLOT_DIRTY = 1,
116	SLOT_USED = 2,
117};
118
119static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_pages */
120static LIST_HEAD(kprobe_insn_pages);
121static int kprobe_garbage_slots;
122static int collect_garbage_slots(void);
123
124static int __kprobes check_safety(void)
125{
126	int ret = 0;
127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
128	ret = freeze_processes();
129	if (ret == 0) {
130		struct task_struct *p, *q;
131		do_each_thread(p, q) {
132			if (p != current && p->state == TASK_RUNNING &&
133			    p->pid != 0) {
134				printk("Check failed: %s is running\n",p->comm);
135				ret = -1;
136				goto loop_end;
137			}
138		} while_each_thread(p, q);
139	}
140loop_end:
141	thaw_processes();
142#else
143	synchronize_sched();
144#endif
145	return ret;
146}
147
148/**
149 * __get_insn_slot() - Find a slot on an executable page for an instruction.
150 * We allocate an executable page if there's no room on existing ones.
151 */
152static kprobe_opcode_t __kprobes *__get_insn_slot(void)
153{
154	struct kprobe_insn_page *kip;
155
156 retry:
157	list_for_each_entry(kip, &kprobe_insn_pages, list) {
158		if (kip->nused < INSNS_PER_PAGE) {
159			int i;
160			for (i = 0; i < INSNS_PER_PAGE; i++) {
161				if (kip->slot_used[i] == SLOT_CLEAN) {
162					kip->slot_used[i] = SLOT_USED;
163					kip->nused++;
164					return kip->insns + (i * MAX_INSN_SIZE);
165				}
166			}
167			/* Surprise!  No unused slots.  Fix kip->nused. */
168			kip->nused = INSNS_PER_PAGE;
169		}
170	}
171
172	/* If there are any garbage slots, collect it and try again. */
173	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
174		goto retry;
175	}
176	/* All out of space.  Need to allocate a new page. Use slot 0. */
177	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
178	if (!kip)
179		return NULL;
180
181	/*
182	 * Use module_alloc so this page is within +/- 2GB of where the
183	 * kernel image and loaded module images reside. This is required
184	 * so x86_64 can correctly handle the %rip-relative fixups.
185	 */
186	kip->insns = module_alloc(PAGE_SIZE);
187	if (!kip->insns) {
188		kfree(kip);
189		return NULL;
190	}
191	INIT_LIST_HEAD(&kip->list);
192	list_add(&kip->list, &kprobe_insn_pages);
193	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
194	kip->slot_used[0] = SLOT_USED;
195	kip->nused = 1;
196	kip->ngarbage = 0;
197	return kip->insns;
198}
199
200kprobe_opcode_t __kprobes *get_insn_slot(void)
201{
202	kprobe_opcode_t *ret;
203	mutex_lock(&kprobe_insn_mutex);
204	ret = __get_insn_slot();
205	mutex_unlock(&kprobe_insn_mutex);
206	return ret;
207}
208
209/* Return 1 if all garbages are collected, otherwise 0. */
210static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
211{
212	kip->slot_used[idx] = SLOT_CLEAN;
213	kip->nused--;
214	if (kip->nused == 0) {
215		/*
216		 * Page is no longer in use.  Free it unless
217		 * it's the last one.  We keep the last one
218		 * so as not to have to set it up again the
219		 * next time somebody inserts a probe.
220		 */
221		if (!list_is_singular(&kprobe_insn_pages)) {
222			list_del(&kip->list);
223			module_free(NULL, kip->insns);
224			kfree(kip);
225		}
226		return 1;
227	}
228	return 0;
229}
230
231static int __kprobes collect_garbage_slots(void)
232{
233	struct kprobe_insn_page *kip, *next;
234
235	/* Ensure no-one is preepmted on the garbages */
236	if (check_safety())
237		return -EAGAIN;
238
239	list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
240		int i;
241		if (kip->ngarbage == 0)
242			continue;
243		kip->ngarbage = 0;	/* we will collect all garbages */
244		for (i = 0; i < INSNS_PER_PAGE; i++) {
245			if (kip->slot_used[i] == SLOT_DIRTY &&
246			    collect_one_slot(kip, i))
247				break;
248		}
249	}
250	kprobe_garbage_slots = 0;
251	return 0;
252}
253
254void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
255{
256	struct kprobe_insn_page *kip;
257
258	mutex_lock(&kprobe_insn_mutex);
259	list_for_each_entry(kip, &kprobe_insn_pages, list) {
260		if (kip->insns <= slot &&
261		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
262			int i = (slot - kip->insns) / MAX_INSN_SIZE;
263			if (dirty) {
264				kip->slot_used[i] = SLOT_DIRTY;
265				kip->ngarbage++;
266			} else
267				collect_one_slot(kip, i);
268			break;
269		}
270	}
271
272	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
273		collect_garbage_slots();
274
275	mutex_unlock(&kprobe_insn_mutex);
276}
277#endif
278
279/* We have preemption disabled.. so it is safe to use __ versions */
280static inline void set_kprobe_instance(struct kprobe *kp)
281{
282	__get_cpu_var(kprobe_instance) = kp;
283}
284
285static inline void reset_kprobe_instance(void)
286{
287	__get_cpu_var(kprobe_instance) = NULL;
288}
289
290/*
291 * This routine is called either:
292 * 	- under the kprobe_mutex - during kprobe_[un]register()
293 * 				OR
294 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
295 */
296struct kprobe __kprobes *get_kprobe(void *addr)
297{
298	struct hlist_head *head;
299	struct hlist_node *node;
300	struct kprobe *p;
301
302	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
303	hlist_for_each_entry_rcu(p, node, head, hlist) {
304		if (p->addr == addr)
305			return p;
306	}
307	return NULL;
308}
309
310/* Arm a kprobe with text_mutex */
311static void __kprobes arm_kprobe(struct kprobe *kp)
312{
313	mutex_lock(&text_mutex);
314	arch_arm_kprobe(kp);
315	mutex_unlock(&text_mutex);
316}
317
318/* Disarm a kprobe with text_mutex */
319static void __kprobes disarm_kprobe(struct kprobe *kp)
320{
321	mutex_lock(&text_mutex);
322	arch_disarm_kprobe(kp);
323	mutex_unlock(&text_mutex);
324}
325
326/*
327 * Aggregate handlers for multiple kprobes support - these handlers
328 * take care of invoking the individual kprobe handlers on p->list
329 */
330static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
331{
332	struct kprobe *kp;
333
334	list_for_each_entry_rcu(kp, &p->list, list) {
335		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
336			set_kprobe_instance(kp);
337			if (kp->pre_handler(kp, regs))
338				return 1;
339		}
340		reset_kprobe_instance();
341	}
342	return 0;
343}
344
345static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
346					unsigned long flags)
347{
348	struct kprobe *kp;
349
350	list_for_each_entry_rcu(kp, &p->list, list) {
351		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
352			set_kprobe_instance(kp);
353			kp->post_handler(kp, regs, flags);
354			reset_kprobe_instance();
355		}
356	}
357}
358
359static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
360					int trapnr)
361{
362	struct kprobe *cur = __get_cpu_var(kprobe_instance);
363
364	/*
365	 * if we faulted "during" the execution of a user specified
366	 * probe handler, invoke just that probe's fault handler
367	 */
368	if (cur && cur->fault_handler) {
369		if (cur->fault_handler(cur, regs, trapnr))
370			return 1;
371	}
372	return 0;
373}
374
375static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
376{
377	struct kprobe *cur = __get_cpu_var(kprobe_instance);
378	int ret = 0;
379
380	if (cur && cur->break_handler) {
381		if (cur->break_handler(cur, regs))
382			ret = 1;
383	}
384	reset_kprobe_instance();
385	return ret;
386}
387
388/* Walks the list and increments nmissed count for multiprobe case */
389void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
390{
391	struct kprobe *kp;
392	if (p->pre_handler != aggr_pre_handler) {
393		p->nmissed++;
394	} else {
395		list_for_each_entry_rcu(kp, &p->list, list)
396			kp->nmissed++;
397	}
398	return;
399}
400
401void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
402				struct hlist_head *head)
403{
404	struct kretprobe *rp = ri->rp;
405
406	/* remove rp inst off the rprobe_inst_table */
407	hlist_del(&ri->hlist);
408	INIT_HLIST_NODE(&ri->hlist);
409	if (likely(rp)) {
410		spin_lock(&rp->lock);
411		hlist_add_head(&ri->hlist, &rp->free_instances);
412		spin_unlock(&rp->lock);
413	} else
414		/* Unregistering */
415		hlist_add_head(&ri->hlist, head);
416}
417
418void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
419			 struct hlist_head **head, unsigned long *flags)
420{
421	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
422	spinlock_t *hlist_lock;
423
424	*head = &kretprobe_inst_table[hash];
425	hlist_lock = kretprobe_table_lock_ptr(hash);
426	spin_lock_irqsave(hlist_lock, *flags);
427}
428
429static void __kprobes kretprobe_table_lock(unsigned long hash,
430	unsigned long *flags)
431{
432	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
433	spin_lock_irqsave(hlist_lock, *flags);
434}
435
436void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
437	unsigned long *flags)
438{
439	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
440	spinlock_t *hlist_lock;
441
442	hlist_lock = kretprobe_table_lock_ptr(hash);
443	spin_unlock_irqrestore(hlist_lock, *flags);
444}
445
446void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
447{
448	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
449	spin_unlock_irqrestore(hlist_lock, *flags);
450}
451
452/*
453 * This function is called from finish_task_switch when task tk becomes dead,
454 * so that we can recycle any function-return probe instances associated
455 * with this task. These left over instances represent probed functions
456 * that have been called but will never return.
457 */
458void __kprobes kprobe_flush_task(struct task_struct *tk)
459{
460	struct kretprobe_instance *ri;
461	struct hlist_head *head, empty_rp;
462	struct hlist_node *node, *tmp;
463	unsigned long hash, flags = 0;
464
465	if (unlikely(!kprobes_initialized))
466		/* Early boot.  kretprobe_table_locks not yet initialized. */
467		return;
468
469	hash = hash_ptr(tk, KPROBE_HASH_BITS);
470	head = &kretprobe_inst_table[hash];
471	kretprobe_table_lock(hash, &flags);
472	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
473		if (ri->task == tk)
474			recycle_rp_inst(ri, &empty_rp);
475	}
476	kretprobe_table_unlock(hash, &flags);
477	INIT_HLIST_HEAD(&empty_rp);
478	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
479		hlist_del(&ri->hlist);
480		kfree(ri);
481	}
482}
483
484static inline void free_rp_inst(struct kretprobe *rp)
485{
486	struct kretprobe_instance *ri;
487	struct hlist_node *pos, *next;
488
489	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
490		hlist_del(&ri->hlist);
491		kfree(ri);
492	}
493}
494
495static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
496{
497	unsigned long flags, hash;
498	struct kretprobe_instance *ri;
499	struct hlist_node *pos, *next;
500	struct hlist_head *head;
501
502	/* No race here */
503	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
504		kretprobe_table_lock(hash, &flags);
505		head = &kretprobe_inst_table[hash];
506		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
507			if (ri->rp == rp)
508				ri->rp = NULL;
509		}
510		kretprobe_table_unlock(hash, &flags);
511	}
512	free_rp_inst(rp);
513}
514
515/*
516 * Keep all fields in the kprobe consistent
517 */
518static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
519{
520	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
521	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
522}
523
524/*
525* Add the new probe to ap->list. Fail if this is the
526* second jprobe at the address - two jprobes can't coexist
527*/
528static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
529{
530	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
531	if (p->break_handler) {
532		if (ap->break_handler)
533			return -EEXIST;
534		list_add_tail_rcu(&p->list, &ap->list);
535		ap->break_handler = aggr_break_handler;
536	} else
537		list_add_rcu(&p->list, &ap->list);
538	if (p->post_handler && !ap->post_handler)
539		ap->post_handler = aggr_post_handler;
540
541	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
542		ap->flags &= ~KPROBE_FLAG_DISABLED;
543		if (!kprobes_all_disarmed)
544			/* Arm the breakpoint again. */
545			arm_kprobe(ap);
546	}
547	return 0;
548}
549
550/*
551 * Fill in the required fields of the "manager kprobe". Replace the
552 * earlier kprobe in the hlist with the manager kprobe
553 */
554static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
555{
556	copy_kprobe(p, ap);
557	flush_insn_slot(ap);
558	ap->addr = p->addr;
559	ap->flags = p->flags;
560	ap->pre_handler = aggr_pre_handler;
561	ap->fault_handler = aggr_fault_handler;
562	/* We don't care the kprobe which has gone. */
563	if (p->post_handler && !kprobe_gone(p))
564		ap->post_handler = aggr_post_handler;
565	if (p->break_handler && !kprobe_gone(p))
566		ap->break_handler = aggr_break_handler;
567
568	INIT_LIST_HEAD(&ap->list);
569	list_add_rcu(&p->list, &ap->list);
570
571	hlist_replace_rcu(&p->hlist, &ap->hlist);
572}
573
574/*
575 * This is the second or subsequent kprobe at the address - handle
576 * the intricacies
577 */
578static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
579					  struct kprobe *p)
580{
581	int ret = 0;
582	struct kprobe *ap = old_p;
583
584	if (old_p->pre_handler != aggr_pre_handler) {
585		/* If old_p is not an aggr_probe, create new aggr_kprobe. */
586		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
587		if (!ap)
588			return -ENOMEM;
589		add_aggr_kprobe(ap, old_p);
590	}
591
592	if (kprobe_gone(ap)) {
593		/*
594		 * Attempting to insert new probe at the same location that
595		 * had a probe in the module vaddr area which already
596		 * freed. So, the instruction slot has already been
597		 * released. We need a new slot for the new probe.
598		 */
599		ret = arch_prepare_kprobe(ap);
600		if (ret)
601			/*
602			 * Even if fail to allocate new slot, don't need to
603			 * free aggr_probe. It will be used next time, or
604			 * freed by unregister_kprobe.
605			 */
606			return ret;
607
608		/*
609		 * Clear gone flag to prevent allocating new slot again, and
610		 * set disabled flag because it is not armed yet.
611		 */
612		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
613			    | KPROBE_FLAG_DISABLED;
614	}
615
616	copy_kprobe(ap, p);
617	return add_new_kprobe(ap, p);
618}
619
620/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
621static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
622{
623	struct kprobe *kp;
624
625	list_for_each_entry_rcu(kp, &p->list, list) {
626		if (!kprobe_disabled(kp))
627			/*
628			 * There is an active probe on the list.
629			 * We can't disable aggr_kprobe.
630			 */
631			return 0;
632	}
633	p->flags |= KPROBE_FLAG_DISABLED;
634	return 1;
635}
636
637static int __kprobes in_kprobes_functions(unsigned long addr)
638{
639	struct kprobe_blackpoint *kb;
640
641	if (addr >= (unsigned long)__kprobes_text_start &&
642	    addr < (unsigned long)__kprobes_text_end)
643		return -EINVAL;
644	/*
645	 * If there exists a kprobe_blacklist, verify and
646	 * fail any probe registration in the prohibited area
647	 */
648	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
649		if (kb->start_addr) {
650			if (addr >= kb->start_addr &&
651			    addr < (kb->start_addr + kb->range))
652				return -EINVAL;
653		}
654	}
655	return 0;
656}
657
658/*
659 * If we have a symbol_name argument, look it up and add the offset field
660 * to it. This way, we can specify a relative address to a symbol.
661 */
662static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
663{
664	kprobe_opcode_t *addr = p->addr;
665	if (p->symbol_name) {
666		if (addr)
667			return NULL;
668		kprobe_lookup_name(p->symbol_name, addr);
669	}
670
671	if (!addr)
672		return NULL;
673	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
674}
675
676int __kprobes register_kprobe(struct kprobe *p)
677{
678	int ret = 0;
679	struct kprobe *old_p;
680	struct module *probed_mod;
681	kprobe_opcode_t *addr;
682
683	addr = kprobe_addr(p);
684	if (!addr)
685		return -EINVAL;
686	p->addr = addr;
687
688	preempt_disable();
689	if (!kernel_text_address((unsigned long) p->addr) ||
690	    in_kprobes_functions((unsigned long) p->addr)) {
691		preempt_enable();
692		return -EINVAL;
693	}
694
695	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
696	p->flags &= KPROBE_FLAG_DISABLED;
697
698	/*
699	 * Check if are we probing a module.
700	 */
701	probed_mod = __module_text_address((unsigned long) p->addr);
702	if (probed_mod) {
703		/*
704		 * We must hold a refcount of the probed module while updating
705		 * its code to prohibit unexpected unloading.
706		 */
707		if (unlikely(!try_module_get(probed_mod))) {
708			preempt_enable();
709			return -EINVAL;
710		}
711		/*
712		 * If the module freed .init.text, we couldn't insert
713		 * kprobes in there.
714		 */
715		if (within_module_init((unsigned long)p->addr, probed_mod) &&
716		    probed_mod->state != MODULE_STATE_COMING) {
717			module_put(probed_mod);
718			preempt_enable();
719			return -EINVAL;
720		}
721	}
722	preempt_enable();
723
724	p->nmissed = 0;
725	INIT_LIST_HEAD(&p->list);
726	mutex_lock(&kprobe_mutex);
727	old_p = get_kprobe(p->addr);
728	if (old_p) {
729		ret = register_aggr_kprobe(old_p, p);
730		goto out;
731	}
732
733	mutex_lock(&text_mutex);
734	ret = arch_prepare_kprobe(p);
735	if (ret)
736		goto out_unlock_text;
737
738	INIT_HLIST_NODE(&p->hlist);
739	hlist_add_head_rcu(&p->hlist,
740		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
741
742	if (!kprobes_all_disarmed && !kprobe_disabled(p))
743		arch_arm_kprobe(p);
744
745out_unlock_text:
746	mutex_unlock(&text_mutex);
747out:
748	mutex_unlock(&kprobe_mutex);
749
750	if (probed_mod)
751		module_put(probed_mod);
752
753	return ret;
754}
755EXPORT_SYMBOL_GPL(register_kprobe);
756
757/* Check passed kprobe is valid and return kprobe in kprobe_table. */
758static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
759{
760	struct kprobe *old_p, *list_p;
761
762	old_p = get_kprobe(p->addr);
763	if (unlikely(!old_p))
764		return NULL;
765
766	if (p != old_p) {
767		list_for_each_entry_rcu(list_p, &old_p->list, list)
768			if (list_p == p)
769			/* kprobe p is a valid probe */
770				goto valid;
771		return NULL;
772	}
773valid:
774	return old_p;
775}
776
777/*
778 * Unregister a kprobe without a scheduler synchronization.
779 */
780static int __kprobes __unregister_kprobe_top(struct kprobe *p)
781{
782	struct kprobe *old_p, *list_p;
783
784	old_p = __get_valid_kprobe(p);
785	if (old_p == NULL)
786		return -EINVAL;
787
788	if (old_p == p ||
789	    (old_p->pre_handler == aggr_pre_handler &&
790	     list_is_singular(&old_p->list))) {
791		/*
792		 * Only probe on the hash list. Disarm only if kprobes are
793		 * enabled and not gone - otherwise, the breakpoint would
794		 * already have been removed. We save on flushing icache.
795		 */
796		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
797			disarm_kprobe(p);
798		hlist_del_rcu(&old_p->hlist);
799	} else {
800		if (p->break_handler && !kprobe_gone(p))
801			old_p->break_handler = NULL;
802		if (p->post_handler && !kprobe_gone(p)) {
803			list_for_each_entry_rcu(list_p, &old_p->list, list) {
804				if ((list_p != p) && (list_p->post_handler))
805					goto noclean;
806			}
807			old_p->post_handler = NULL;
808		}
809noclean:
810		list_del_rcu(&p->list);
811		if (!kprobe_disabled(old_p)) {
812			try_to_disable_aggr_kprobe(old_p);
813			if (!kprobes_all_disarmed && kprobe_disabled(old_p))
814				disarm_kprobe(old_p);
815		}
816	}
817	return 0;
818}
819
820static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
821{
822	struct kprobe *old_p;
823
824	if (list_empty(&p->list))
825		arch_remove_kprobe(p);
826	else if (list_is_singular(&p->list)) {
827		/* "p" is the last child of an aggr_kprobe */
828		old_p = list_entry(p->list.next, struct kprobe, list);
829		list_del(&p->list);
830		arch_remove_kprobe(old_p);
831		kfree(old_p);
832	}
833}
834
835int __kprobes register_kprobes(struct kprobe **kps, int num)
836{
837	int i, ret = 0;
838
839	if (num <= 0)
840		return -EINVAL;
841	for (i = 0; i < num; i++) {
842		ret = register_kprobe(kps[i]);
843		if (ret < 0) {
844			if (i > 0)
845				unregister_kprobes(kps, i);
846			break;
847		}
848	}
849	return ret;
850}
851EXPORT_SYMBOL_GPL(register_kprobes);
852
853void __kprobes unregister_kprobe(struct kprobe *p)
854{
855	unregister_kprobes(&p, 1);
856}
857EXPORT_SYMBOL_GPL(unregister_kprobe);
858
859void __kprobes unregister_kprobes(struct kprobe **kps, int num)
860{
861	int i;
862
863	if (num <= 0)
864		return;
865	mutex_lock(&kprobe_mutex);
866	for (i = 0; i < num; i++)
867		if (__unregister_kprobe_top(kps[i]) < 0)
868			kps[i]->addr = NULL;
869	mutex_unlock(&kprobe_mutex);
870
871	synchronize_sched();
872	for (i = 0; i < num; i++)
873		if (kps[i]->addr)
874			__unregister_kprobe_bottom(kps[i]);
875}
876EXPORT_SYMBOL_GPL(unregister_kprobes);
877
878static struct notifier_block kprobe_exceptions_nb = {
879	.notifier_call = kprobe_exceptions_notify,
880	.priority = 0x7fffffff /* we need to be notified first */
881};
882
883unsigned long __weak arch_deref_entry_point(void *entry)
884{
885	return (unsigned long)entry;
886}
887
888int __kprobes register_jprobes(struct jprobe **jps, int num)
889{
890	struct jprobe *jp;
891	int ret = 0, i;
892
893	if (num <= 0)
894		return -EINVAL;
895	for (i = 0; i < num; i++) {
896		unsigned long addr;
897		jp = jps[i];
898		addr = arch_deref_entry_point(jp->entry);
899
900		if (!kernel_text_address(addr))
901			ret = -EINVAL;
902		else {
903			/* Todo: Verify probepoint is a function entry point */
904			jp->kp.pre_handler = setjmp_pre_handler;
905			jp->kp.break_handler = longjmp_break_handler;
906			ret = register_kprobe(&jp->kp);
907		}
908		if (ret < 0) {
909			if (i > 0)
910				unregister_jprobes(jps, i);
911			break;
912		}
913	}
914	return ret;
915}
916EXPORT_SYMBOL_GPL(register_jprobes);
917
918int __kprobes register_jprobe(struct jprobe *jp)
919{
920	return register_jprobes(&jp, 1);
921}
922EXPORT_SYMBOL_GPL(register_jprobe);
923
924void __kprobes unregister_jprobe(struct jprobe *jp)
925{
926	unregister_jprobes(&jp, 1);
927}
928EXPORT_SYMBOL_GPL(unregister_jprobe);
929
930void __kprobes unregister_jprobes(struct jprobe **jps, int num)
931{
932	int i;
933
934	if (num <= 0)
935		return;
936	mutex_lock(&kprobe_mutex);
937	for (i = 0; i < num; i++)
938		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
939			jps[i]->kp.addr = NULL;
940	mutex_unlock(&kprobe_mutex);
941
942	synchronize_sched();
943	for (i = 0; i < num; i++) {
944		if (jps[i]->kp.addr)
945			__unregister_kprobe_bottom(&jps[i]->kp);
946	}
947}
948EXPORT_SYMBOL_GPL(unregister_jprobes);
949
950#ifdef CONFIG_KRETPROBES
951/*
952 * This kprobe pre_handler is registered with every kretprobe. When probe
953 * hits it will set up the return probe.
954 */
955static int __kprobes pre_handler_kretprobe(struct kprobe *p,
956					   struct pt_regs *regs)
957{
958	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
959	unsigned long hash, flags = 0;
960	struct kretprobe_instance *ri;
961
962	/*TODO: consider to only swap the RA after the last pre_handler fired */
963	hash = hash_ptr(current, KPROBE_HASH_BITS);
964	spin_lock_irqsave(&rp->lock, flags);
965	if (!hlist_empty(&rp->free_instances)) {
966		ri = hlist_entry(rp->free_instances.first,
967				struct kretprobe_instance, hlist);
968		hlist_del(&ri->hlist);
969		spin_unlock_irqrestore(&rp->lock, flags);
970
971		ri->rp = rp;
972		ri->task = current;
973
974		if (rp->entry_handler && rp->entry_handler(ri, regs))
975			return 0;
976
977		arch_prepare_kretprobe(ri, regs);
978
979		/* XXX(hch): why is there no hlist_move_head? */
980		INIT_HLIST_NODE(&ri->hlist);
981		kretprobe_table_lock(hash, &flags);
982		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
983		kretprobe_table_unlock(hash, &flags);
984	} else {
985		rp->nmissed++;
986		spin_unlock_irqrestore(&rp->lock, flags);
987	}
988	return 0;
989}
990
991int __kprobes register_kretprobe(struct kretprobe *rp)
992{
993	int ret = 0;
994	struct kretprobe_instance *inst;
995	int i;
996	void *addr;
997
998	if (kretprobe_blacklist_size) {
999		addr = kprobe_addr(&rp->kp);
1000		if (!addr)
1001			return -EINVAL;
1002
1003		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1004			if (kretprobe_blacklist[i].addr == addr)
1005				return -EINVAL;
1006		}
1007	}
1008
1009	rp->kp.pre_handler = pre_handler_kretprobe;
1010	rp->kp.post_handler = NULL;
1011	rp->kp.fault_handler = NULL;
1012	rp->kp.break_handler = NULL;
1013
1014	/* Pre-allocate memory for max kretprobe instances */
1015	if (rp->maxactive <= 0) {
1016#ifdef CONFIG_PREEMPT
1017		rp->maxactive = max(10, 2 * num_possible_cpus());
1018#else
1019		rp->maxactive = num_possible_cpus();
1020#endif
1021	}
1022	spin_lock_init(&rp->lock);
1023	INIT_HLIST_HEAD(&rp->free_instances);
1024	for (i = 0; i < rp->maxactive; i++) {
1025		inst = kmalloc(sizeof(struct kretprobe_instance) +
1026			       rp->data_size, GFP_KERNEL);
1027		if (inst == NULL) {
1028			free_rp_inst(rp);
1029			return -ENOMEM;
1030		}
1031		INIT_HLIST_NODE(&inst->hlist);
1032		hlist_add_head(&inst->hlist, &rp->free_instances);
1033	}
1034
1035	rp->nmissed = 0;
1036	/* Establish function entry probe point */
1037	ret = register_kprobe(&rp->kp);
1038	if (ret != 0)
1039		free_rp_inst(rp);
1040	return ret;
1041}
1042EXPORT_SYMBOL_GPL(register_kretprobe);
1043
1044int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1045{
1046	int ret = 0, i;
1047
1048	if (num <= 0)
1049		return -EINVAL;
1050	for (i = 0; i < num; i++) {
1051		ret = register_kretprobe(rps[i]);
1052		if (ret < 0) {
1053			if (i > 0)
1054				unregister_kretprobes(rps, i);
1055			break;
1056		}
1057	}
1058	return ret;
1059}
1060EXPORT_SYMBOL_GPL(register_kretprobes);
1061
1062void __kprobes unregister_kretprobe(struct kretprobe *rp)
1063{
1064	unregister_kretprobes(&rp, 1);
1065}
1066EXPORT_SYMBOL_GPL(unregister_kretprobe);
1067
1068void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1069{
1070	int i;
1071
1072	if (num <= 0)
1073		return;
1074	mutex_lock(&kprobe_mutex);
1075	for (i = 0; i < num; i++)
1076		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1077			rps[i]->kp.addr = NULL;
1078	mutex_unlock(&kprobe_mutex);
1079
1080	synchronize_sched();
1081	for (i = 0; i < num; i++) {
1082		if (rps[i]->kp.addr) {
1083			__unregister_kprobe_bottom(&rps[i]->kp);
1084			cleanup_rp_inst(rps[i]);
1085		}
1086	}
1087}
1088EXPORT_SYMBOL_GPL(unregister_kretprobes);
1089
1090#else /* CONFIG_KRETPROBES */
1091int __kprobes register_kretprobe(struct kretprobe *rp)
1092{
1093	return -ENOSYS;
1094}
1095EXPORT_SYMBOL_GPL(register_kretprobe);
1096
1097int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1098{
1099	return -ENOSYS;
1100}
1101EXPORT_SYMBOL_GPL(register_kretprobes);
1102
1103void __kprobes unregister_kretprobe(struct kretprobe *rp)
1104{
1105}
1106EXPORT_SYMBOL_GPL(unregister_kretprobe);
1107
1108void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1109{
1110}
1111EXPORT_SYMBOL_GPL(unregister_kretprobes);
1112
1113static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1114					   struct pt_regs *regs)
1115{
1116	return 0;
1117}
1118
1119#endif /* CONFIG_KRETPROBES */
1120
1121/* Set the kprobe gone and remove its instruction buffer. */
1122static void __kprobes kill_kprobe(struct kprobe *p)
1123{
1124	struct kprobe *kp;
1125
1126	p->flags |= KPROBE_FLAG_GONE;
1127	if (p->pre_handler == aggr_pre_handler) {
1128		/*
1129		 * If this is an aggr_kprobe, we have to list all the
1130		 * chained probes and mark them GONE.
1131		 */
1132		list_for_each_entry_rcu(kp, &p->list, list)
1133			kp->flags |= KPROBE_FLAG_GONE;
1134		p->post_handler = NULL;
1135		p->break_handler = NULL;
1136	}
1137	/*
1138	 * Here, we can remove insn_slot safely, because no thread calls
1139	 * the original probed function (which will be freed soon) any more.
1140	 */
1141	arch_remove_kprobe(p);
1142}
1143
1144/* Module notifier call back, checking kprobes on the module */
1145static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1146					     unsigned long val, void *data)
1147{
1148	struct module *mod = data;
1149	struct hlist_head *head;
1150	struct hlist_node *node;
1151	struct kprobe *p;
1152	unsigned int i;
1153	int checkcore = (val == MODULE_STATE_GOING);
1154
1155	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1156		return NOTIFY_DONE;
1157
1158	/*
1159	 * When MODULE_STATE_GOING was notified, both of module .text and
1160	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1161	 * notified, only .init.text section would be freed. We need to
1162	 * disable kprobes which have been inserted in the sections.
1163	 */
1164	mutex_lock(&kprobe_mutex);
1165	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1166		head = &kprobe_table[i];
1167		hlist_for_each_entry_rcu(p, node, head, hlist)
1168			if (within_module_init((unsigned long)p->addr, mod) ||
1169			    (checkcore &&
1170			     within_module_core((unsigned long)p->addr, mod))) {
1171				/*
1172				 * The vaddr this probe is installed will soon
1173				 * be vfreed buy not synced to disk. Hence,
1174				 * disarming the breakpoint isn't needed.
1175				 */
1176				kill_kprobe(p);
1177			}
1178	}
1179	mutex_unlock(&kprobe_mutex);
1180	return NOTIFY_DONE;
1181}
1182
1183static struct notifier_block kprobe_module_nb = {
1184	.notifier_call = kprobes_module_callback,
1185	.priority = 0
1186};
1187
1188static int __init init_kprobes(void)
1189{
1190	int i, err = 0;
1191	unsigned long offset = 0, size = 0;
1192	char *modname, namebuf[128];
1193	const char *symbol_name;
1194	void *addr;
1195	struct kprobe_blackpoint *kb;
1196
1197	/* FIXME allocate the probe table, currently defined statically */
1198	/* initialize all list heads */
1199	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1200		INIT_HLIST_HEAD(&kprobe_table[i]);
1201		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1202		spin_lock_init(&(kretprobe_table_locks[i].lock));
1203	}
1204
1205	/*
1206	 * Lookup and populate the kprobe_blacklist.
1207	 *
1208	 * Unlike the kretprobe blacklist, we'll need to determine
1209	 * the range of addresses that belong to the said functions,
1210	 * since a kprobe need not necessarily be at the beginning
1211	 * of a function.
1212	 */
1213	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1214		kprobe_lookup_name(kb->name, addr);
1215		if (!addr)
1216			continue;
1217
1218		kb->start_addr = (unsigned long)addr;
1219		symbol_name = kallsyms_lookup(kb->start_addr,
1220				&size, &offset, &modname, namebuf);
1221		if (!symbol_name)
1222			kb->range = 0;
1223		else
1224			kb->range = size;
1225	}
1226
1227	if (kretprobe_blacklist_size) {
1228		/* lookup the function address from its name */
1229		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1230			kprobe_lookup_name(kretprobe_blacklist[i].name,
1231					   kretprobe_blacklist[i].addr);
1232			if (!kretprobe_blacklist[i].addr)
1233				printk("kretprobe: lookup failed: %s\n",
1234				       kretprobe_blacklist[i].name);
1235		}
1236	}
1237
1238	/* By default, kprobes are armed */
1239	kprobes_all_disarmed = false;
1240
1241	err = arch_init_kprobes();
1242	if (!err)
1243		err = register_die_notifier(&kprobe_exceptions_nb);
1244	if (!err)
1245		err = register_module_notifier(&kprobe_module_nb);
1246
1247	kprobes_initialized = (err == 0);
1248
1249	if (!err)
1250		init_test_probes();
1251	return err;
1252}
1253
1254#ifdef CONFIG_DEBUG_FS
1255static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1256		const char *sym, int offset,char *modname)
1257{
1258	char *kprobe_type;
1259
1260	if (p->pre_handler == pre_handler_kretprobe)
1261		kprobe_type = "r";
1262	else if (p->pre_handler == setjmp_pre_handler)
1263		kprobe_type = "j";
1264	else
1265		kprobe_type = "k";
1266	if (sym)
1267		seq_printf(pi, "%p  %s  %s+0x%x  %s %s%s\n",
1268			p->addr, kprobe_type, sym, offset,
1269			(modname ? modname : " "),
1270			(kprobe_gone(p) ? "[GONE]" : ""),
1271			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1272			 "[DISABLED]" : ""));
1273	else
1274		seq_printf(pi, "%p  %s  %p %s%s\n",
1275			p->addr, kprobe_type, p->addr,
1276			(kprobe_gone(p) ? "[GONE]" : ""),
1277			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1278			 "[DISABLED]" : ""));
1279}
1280
1281static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1282{
1283	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1284}
1285
1286static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1287{
1288	(*pos)++;
1289	if (*pos >= KPROBE_TABLE_SIZE)
1290		return NULL;
1291	return pos;
1292}
1293
1294static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1295{
1296	/* Nothing to do */
1297}
1298
1299static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1300{
1301	struct hlist_head *head;
1302	struct hlist_node *node;
1303	struct kprobe *p, *kp;
1304	const char *sym = NULL;
1305	unsigned int i = *(loff_t *) v;
1306	unsigned long offset = 0;
1307	char *modname, namebuf[128];
1308
1309	head = &kprobe_table[i];
1310	preempt_disable();
1311	hlist_for_each_entry_rcu(p, node, head, hlist) {
1312		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1313					&offset, &modname, namebuf);
1314		if (p->pre_handler == aggr_pre_handler) {
1315			list_for_each_entry_rcu(kp, &p->list, list)
1316				report_probe(pi, kp, sym, offset, modname);
1317		} else
1318			report_probe(pi, p, sym, offset, modname);
1319	}
1320	preempt_enable();
1321	return 0;
1322}
1323
1324static const struct seq_operations kprobes_seq_ops = {
1325	.start = kprobe_seq_start,
1326	.next  = kprobe_seq_next,
1327	.stop  = kprobe_seq_stop,
1328	.show  = show_kprobe_addr
1329};
1330
1331static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1332{
1333	return seq_open(filp, &kprobes_seq_ops);
1334}
1335
1336static const struct file_operations debugfs_kprobes_operations = {
1337	.open           = kprobes_open,
1338	.read           = seq_read,
1339	.llseek         = seq_lseek,
1340	.release        = seq_release,
1341};
1342
1343/* Disable one kprobe */
1344int __kprobes disable_kprobe(struct kprobe *kp)
1345{
1346	int ret = 0;
1347	struct kprobe *p;
1348
1349	mutex_lock(&kprobe_mutex);
1350
1351	/* Check whether specified probe is valid. */
1352	p = __get_valid_kprobe(kp);
1353	if (unlikely(p == NULL)) {
1354		ret = -EINVAL;
1355		goto out;
1356	}
1357
1358	/* If the probe is already disabled (or gone), just return */
1359	if (kprobe_disabled(kp))
1360		goto out;
1361
1362	kp->flags |= KPROBE_FLAG_DISABLED;
1363	if (p != kp)
1364		/* When kp != p, p is always enabled. */
1365		try_to_disable_aggr_kprobe(p);
1366
1367	if (!kprobes_all_disarmed && kprobe_disabled(p))
1368		disarm_kprobe(p);
1369out:
1370	mutex_unlock(&kprobe_mutex);
1371	return ret;
1372}
1373EXPORT_SYMBOL_GPL(disable_kprobe);
1374
1375/* Enable one kprobe */
1376int __kprobes enable_kprobe(struct kprobe *kp)
1377{
1378	int ret = 0;
1379	struct kprobe *p;
1380
1381	mutex_lock(&kprobe_mutex);
1382
1383	/* Check whether specified probe is valid. */
1384	p = __get_valid_kprobe(kp);
1385	if (unlikely(p == NULL)) {
1386		ret = -EINVAL;
1387		goto out;
1388	}
1389
1390	if (kprobe_gone(kp)) {
1391		/* This kprobe has gone, we couldn't enable it. */
1392		ret = -EINVAL;
1393		goto out;
1394	}
1395
1396	if (!kprobes_all_disarmed && kprobe_disabled(p))
1397		arm_kprobe(p);
1398
1399	p->flags &= ~KPROBE_FLAG_DISABLED;
1400	if (p != kp)
1401		kp->flags &= ~KPROBE_FLAG_DISABLED;
1402out:
1403	mutex_unlock(&kprobe_mutex);
1404	return ret;
1405}
1406EXPORT_SYMBOL_GPL(enable_kprobe);
1407
1408static void __kprobes arm_all_kprobes(void)
1409{
1410	struct hlist_head *head;
1411	struct hlist_node *node;
1412	struct kprobe *p;
1413	unsigned int i;
1414
1415	mutex_lock(&kprobe_mutex);
1416
1417	/* If kprobes are armed, just return */
1418	if (!kprobes_all_disarmed)
1419		goto already_enabled;
1420
1421	mutex_lock(&text_mutex);
1422	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1423		head = &kprobe_table[i];
1424		hlist_for_each_entry_rcu(p, node, head, hlist)
1425			if (!kprobe_disabled(p))
1426				arch_arm_kprobe(p);
1427	}
1428	mutex_unlock(&text_mutex);
1429
1430	kprobes_all_disarmed = false;
1431	printk(KERN_INFO "Kprobes globally enabled\n");
1432
1433already_enabled:
1434	mutex_unlock(&kprobe_mutex);
1435	return;
1436}
1437
1438static void __kprobes disarm_all_kprobes(void)
1439{
1440	struct hlist_head *head;
1441	struct hlist_node *node;
1442	struct kprobe *p;
1443	unsigned int i;
1444
1445	mutex_lock(&kprobe_mutex);
1446
1447	/* If kprobes are already disarmed, just return */
1448	if (kprobes_all_disarmed)
1449		goto already_disabled;
1450
1451	kprobes_all_disarmed = true;
1452	printk(KERN_INFO "Kprobes globally disabled\n");
1453	mutex_lock(&text_mutex);
1454	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1455		head = &kprobe_table[i];
1456		hlist_for_each_entry_rcu(p, node, head, hlist) {
1457			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1458				arch_disarm_kprobe(p);
1459		}
1460	}
1461
1462	mutex_unlock(&text_mutex);
1463	mutex_unlock(&kprobe_mutex);
1464	/* Allow all currently running kprobes to complete */
1465	synchronize_sched();
1466	return;
1467
1468already_disabled:
1469	mutex_unlock(&kprobe_mutex);
1470	return;
1471}
1472
1473/*
1474 * XXX: The debugfs bool file interface doesn't allow for callbacks
1475 * when the bool state is switched. We can reuse that facility when
1476 * available
1477 */
1478static ssize_t read_enabled_file_bool(struct file *file,
1479	       char __user *user_buf, size_t count, loff_t *ppos)
1480{
1481	char buf[3];
1482
1483	if (!kprobes_all_disarmed)
1484		buf[0] = '1';
1485	else
1486		buf[0] = '0';
1487	buf[1] = '\n';
1488	buf[2] = 0x00;
1489	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1490}
1491
1492static ssize_t write_enabled_file_bool(struct file *file,
1493	       const char __user *user_buf, size_t count, loff_t *ppos)
1494{
1495	char buf[32];
1496	int buf_size;
1497
1498	buf_size = min(count, (sizeof(buf)-1));
1499	if (copy_from_user(buf, user_buf, buf_size))
1500		return -EFAULT;
1501
1502	switch (buf[0]) {
1503	case 'y':
1504	case 'Y':
1505	case '1':
1506		arm_all_kprobes();
1507		break;
1508	case 'n':
1509	case 'N':
1510	case '0':
1511		disarm_all_kprobes();
1512		break;
1513	}
1514
1515	return count;
1516}
1517
1518static const struct file_operations fops_kp = {
1519	.read =         read_enabled_file_bool,
1520	.write =        write_enabled_file_bool,
1521};
1522
1523static int __kprobes debugfs_kprobe_init(void)
1524{
1525	struct dentry *dir, *file;
1526	unsigned int value = 1;
1527
1528	dir = debugfs_create_dir("kprobes", NULL);
1529	if (!dir)
1530		return -ENOMEM;
1531
1532	file = debugfs_create_file("list", 0444, dir, NULL,
1533				&debugfs_kprobes_operations);
1534	if (!file) {
1535		debugfs_remove(dir);
1536		return -ENOMEM;
1537	}
1538
1539	file = debugfs_create_file("enabled", 0600, dir,
1540					&value, &fops_kp);
1541	if (!file) {
1542		debugfs_remove(dir);
1543		return -ENOMEM;
1544	}
1545
1546	return 0;
1547}
1548
1549late_initcall(debugfs_kprobe_init);
1550#endif /* CONFIG_DEBUG_FS */
1551
1552module_init(init_kprobes);
1553
1554/* defined in arch/.../kernel/kprobes.c */
1555EXPORT_SYMBOL_GPL(jprobe_return);
1556