kprobes.c revision de5bd88d5a5cce3cacea904d3503e5ebdb3852a2
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46#include <linux/memory.h>
47
48#include <asm-generic/sections.h>
49#include <asm/cacheflush.h>
50#include <asm/errno.h>
51#include <asm/uaccess.h>
52
53#define KPROBE_HASH_BITS 6
54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
56
57/*
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
60 */
61#ifndef kprobe_lookup_name
62#define kprobe_lookup_name(name, addr) \
63	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64#endif
65
66static int kprobes_initialized;
67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69
70/* NOTE: change this value only with kprobe_mutex held */
71static bool kprobes_all_disarmed;
72
73static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75static struct {
76	spinlock_t lock ____cacheline_aligned_in_smp;
77} kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80{
81	return &(kretprobe_table_locks[hash].lock);
82}
83
84/*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
88 *
89 * For such cases, we now have a blacklist
90 */
91static struct kprobe_blackpoint kprobe_blacklist[] = {
92	{"preempt_schedule",},
93	{NULL}    /* Terminator */
94};
95
96#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
97/*
98 * kprobe->ainsn.insn points to the copy of the instruction to be
99 * single-stepped. x86_64, POWER4 and above have no-exec support and
100 * stepping on the instruction on a vmalloced/kmalloced/data page
101 * is a recipe for disaster
102 */
103#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104
105struct kprobe_insn_page {
106	struct hlist_node hlist;
107	kprobe_opcode_t *insns;		/* Page of instruction slots */
108	char slot_used[INSNS_PER_PAGE];
109	int nused;
110	int ngarbage;
111};
112
113enum kprobe_slot_state {
114	SLOT_CLEAN = 0,
115	SLOT_DIRTY = 1,
116	SLOT_USED = 2,
117};
118
119static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_pages */
120static struct hlist_head kprobe_insn_pages;
121static int kprobe_garbage_slots;
122static int collect_garbage_slots(void);
123
124static int __kprobes check_safety(void)
125{
126	int ret = 0;
127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
128	ret = freeze_processes();
129	if (ret == 0) {
130		struct task_struct *p, *q;
131		do_each_thread(p, q) {
132			if (p != current && p->state == TASK_RUNNING &&
133			    p->pid != 0) {
134				printk("Check failed: %s is running\n",p->comm);
135				ret = -1;
136				goto loop_end;
137			}
138		} while_each_thread(p, q);
139	}
140loop_end:
141	thaw_processes();
142#else
143	synchronize_sched();
144#endif
145	return ret;
146}
147
148/**
149 * __get_insn_slot() - Find a slot on an executable page for an instruction.
150 * We allocate an executable page if there's no room on existing ones.
151 */
152static kprobe_opcode_t __kprobes *__get_insn_slot(void)
153{
154	struct kprobe_insn_page *kip;
155	struct hlist_node *pos;
156
157 retry:
158	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
159		if (kip->nused < INSNS_PER_PAGE) {
160			int i;
161			for (i = 0; i < INSNS_PER_PAGE; i++) {
162				if (kip->slot_used[i] == SLOT_CLEAN) {
163					kip->slot_used[i] = SLOT_USED;
164					kip->nused++;
165					return kip->insns + (i * MAX_INSN_SIZE);
166				}
167			}
168			/* Surprise!  No unused slots.  Fix kip->nused. */
169			kip->nused = INSNS_PER_PAGE;
170		}
171	}
172
173	/* If there are any garbage slots, collect it and try again. */
174	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
175		goto retry;
176	}
177	/* All out of space.  Need to allocate a new page. Use slot 0. */
178	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
179	if (!kip)
180		return NULL;
181
182	/*
183	 * Use module_alloc so this page is within +/- 2GB of where the
184	 * kernel image and loaded module images reside. This is required
185	 * so x86_64 can correctly handle the %rip-relative fixups.
186	 */
187	kip->insns = module_alloc(PAGE_SIZE);
188	if (!kip->insns) {
189		kfree(kip);
190		return NULL;
191	}
192	INIT_HLIST_NODE(&kip->hlist);
193	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
194	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
195	kip->slot_used[0] = SLOT_USED;
196	kip->nused = 1;
197	kip->ngarbage = 0;
198	return kip->insns;
199}
200
201kprobe_opcode_t __kprobes *get_insn_slot(void)
202{
203	kprobe_opcode_t *ret;
204	mutex_lock(&kprobe_insn_mutex);
205	ret = __get_insn_slot();
206	mutex_unlock(&kprobe_insn_mutex);
207	return ret;
208}
209
210/* Return 1 if all garbages are collected, otherwise 0. */
211static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
212{
213	kip->slot_used[idx] = SLOT_CLEAN;
214	kip->nused--;
215	if (kip->nused == 0) {
216		/*
217		 * Page is no longer in use.  Free it unless
218		 * it's the last one.  We keep the last one
219		 * so as not to have to set it up again the
220		 * next time somebody inserts a probe.
221		 */
222		hlist_del(&kip->hlist);
223		if (hlist_empty(&kprobe_insn_pages)) {
224			INIT_HLIST_NODE(&kip->hlist);
225			hlist_add_head(&kip->hlist,
226				       &kprobe_insn_pages);
227		} else {
228			module_free(NULL, kip->insns);
229			kfree(kip);
230		}
231		return 1;
232	}
233	return 0;
234}
235
236static int __kprobes collect_garbage_slots(void)
237{
238	struct kprobe_insn_page *kip;
239	struct hlist_node *pos, *next;
240	int safety;
241
242	/* Ensure no-one is preepmted on the garbages */
243	mutex_unlock(&kprobe_insn_mutex);
244	safety = check_safety();
245	mutex_lock(&kprobe_insn_mutex);
246	if (safety != 0)
247		return -EAGAIN;
248
249	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
250		int i;
251		if (kip->ngarbage == 0)
252			continue;
253		kip->ngarbage = 0;	/* we will collect all garbages */
254		for (i = 0; i < INSNS_PER_PAGE; i++) {
255			if (kip->slot_used[i] == SLOT_DIRTY &&
256			    collect_one_slot(kip, i))
257				break;
258		}
259	}
260	kprobe_garbage_slots = 0;
261	return 0;
262}
263
264void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
265{
266	struct kprobe_insn_page *kip;
267	struct hlist_node *pos;
268
269	mutex_lock(&kprobe_insn_mutex);
270	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
271		if (kip->insns <= slot &&
272		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
273			int i = (slot - kip->insns) / MAX_INSN_SIZE;
274			if (dirty) {
275				kip->slot_used[i] = SLOT_DIRTY;
276				kip->ngarbage++;
277			} else {
278				collect_one_slot(kip, i);
279			}
280			break;
281		}
282	}
283
284	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
285		collect_garbage_slots();
286
287	mutex_unlock(&kprobe_insn_mutex);
288}
289#endif
290
291/* We have preemption disabled.. so it is safe to use __ versions */
292static inline void set_kprobe_instance(struct kprobe *kp)
293{
294	__get_cpu_var(kprobe_instance) = kp;
295}
296
297static inline void reset_kprobe_instance(void)
298{
299	__get_cpu_var(kprobe_instance) = NULL;
300}
301
302/*
303 * This routine is called either:
304 * 	- under the kprobe_mutex - during kprobe_[un]register()
305 * 				OR
306 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
307 */
308struct kprobe __kprobes *get_kprobe(void *addr)
309{
310	struct hlist_head *head;
311	struct hlist_node *node;
312	struct kprobe *p;
313
314	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
315	hlist_for_each_entry_rcu(p, node, head, hlist) {
316		if (p->addr == addr)
317			return p;
318	}
319	return NULL;
320}
321
322/*
323 * Aggregate handlers for multiple kprobes support - these handlers
324 * take care of invoking the individual kprobe handlers on p->list
325 */
326static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
327{
328	struct kprobe *kp;
329
330	list_for_each_entry_rcu(kp, &p->list, list) {
331		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
332			set_kprobe_instance(kp);
333			if (kp->pre_handler(kp, regs))
334				return 1;
335		}
336		reset_kprobe_instance();
337	}
338	return 0;
339}
340
341static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
342					unsigned long flags)
343{
344	struct kprobe *kp;
345
346	list_for_each_entry_rcu(kp, &p->list, list) {
347		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
348			set_kprobe_instance(kp);
349			kp->post_handler(kp, regs, flags);
350			reset_kprobe_instance();
351		}
352	}
353}
354
355static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
356					int trapnr)
357{
358	struct kprobe *cur = __get_cpu_var(kprobe_instance);
359
360	/*
361	 * if we faulted "during" the execution of a user specified
362	 * probe handler, invoke just that probe's fault handler
363	 */
364	if (cur && cur->fault_handler) {
365		if (cur->fault_handler(cur, regs, trapnr))
366			return 1;
367	}
368	return 0;
369}
370
371static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
372{
373	struct kprobe *cur = __get_cpu_var(kprobe_instance);
374	int ret = 0;
375
376	if (cur && cur->break_handler) {
377		if (cur->break_handler(cur, regs))
378			ret = 1;
379	}
380	reset_kprobe_instance();
381	return ret;
382}
383
384/* Walks the list and increments nmissed count for multiprobe case */
385void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
386{
387	struct kprobe *kp;
388	if (p->pre_handler != aggr_pre_handler) {
389		p->nmissed++;
390	} else {
391		list_for_each_entry_rcu(kp, &p->list, list)
392			kp->nmissed++;
393	}
394	return;
395}
396
397void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
398				struct hlist_head *head)
399{
400	struct kretprobe *rp = ri->rp;
401
402	/* remove rp inst off the rprobe_inst_table */
403	hlist_del(&ri->hlist);
404	INIT_HLIST_NODE(&ri->hlist);
405	if (likely(rp)) {
406		spin_lock(&rp->lock);
407		hlist_add_head(&ri->hlist, &rp->free_instances);
408		spin_unlock(&rp->lock);
409	} else
410		/* Unregistering */
411		hlist_add_head(&ri->hlist, head);
412}
413
414void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
415			 struct hlist_head **head, unsigned long *flags)
416{
417	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
418	spinlock_t *hlist_lock;
419
420	*head = &kretprobe_inst_table[hash];
421	hlist_lock = kretprobe_table_lock_ptr(hash);
422	spin_lock_irqsave(hlist_lock, *flags);
423}
424
425static void __kprobes kretprobe_table_lock(unsigned long hash,
426	unsigned long *flags)
427{
428	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
429	spin_lock_irqsave(hlist_lock, *flags);
430}
431
432void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
433	unsigned long *flags)
434{
435	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
436	spinlock_t *hlist_lock;
437
438	hlist_lock = kretprobe_table_lock_ptr(hash);
439	spin_unlock_irqrestore(hlist_lock, *flags);
440}
441
442void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
443{
444	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
445	spin_unlock_irqrestore(hlist_lock, *flags);
446}
447
448/*
449 * This function is called from finish_task_switch when task tk becomes dead,
450 * so that we can recycle any function-return probe instances associated
451 * with this task. These left over instances represent probed functions
452 * that have been called but will never return.
453 */
454void __kprobes kprobe_flush_task(struct task_struct *tk)
455{
456	struct kretprobe_instance *ri;
457	struct hlist_head *head, empty_rp;
458	struct hlist_node *node, *tmp;
459	unsigned long hash, flags = 0;
460
461	if (unlikely(!kprobes_initialized))
462		/* Early boot.  kretprobe_table_locks not yet initialized. */
463		return;
464
465	hash = hash_ptr(tk, KPROBE_HASH_BITS);
466	head = &kretprobe_inst_table[hash];
467	kretprobe_table_lock(hash, &flags);
468	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
469		if (ri->task == tk)
470			recycle_rp_inst(ri, &empty_rp);
471	}
472	kretprobe_table_unlock(hash, &flags);
473	INIT_HLIST_HEAD(&empty_rp);
474	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
475		hlist_del(&ri->hlist);
476		kfree(ri);
477	}
478}
479
480static inline void free_rp_inst(struct kretprobe *rp)
481{
482	struct kretprobe_instance *ri;
483	struct hlist_node *pos, *next;
484
485	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
486		hlist_del(&ri->hlist);
487		kfree(ri);
488	}
489}
490
491static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
492{
493	unsigned long flags, hash;
494	struct kretprobe_instance *ri;
495	struct hlist_node *pos, *next;
496	struct hlist_head *head;
497
498	/* No race here */
499	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
500		kretprobe_table_lock(hash, &flags);
501		head = &kretprobe_inst_table[hash];
502		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
503			if (ri->rp == rp)
504				ri->rp = NULL;
505		}
506		kretprobe_table_unlock(hash, &flags);
507	}
508	free_rp_inst(rp);
509}
510
511/*
512 * Keep all fields in the kprobe consistent
513 */
514static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
515{
516	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
517	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
518}
519
520/*
521* Add the new probe to ap->list. Fail if this is the
522* second jprobe at the address - two jprobes can't coexist
523*/
524static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
525{
526	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
527	if (p->break_handler) {
528		if (ap->break_handler)
529			return -EEXIST;
530		list_add_tail_rcu(&p->list, &ap->list);
531		ap->break_handler = aggr_break_handler;
532	} else
533		list_add_rcu(&p->list, &ap->list);
534	if (p->post_handler && !ap->post_handler)
535		ap->post_handler = aggr_post_handler;
536
537	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
538		ap->flags &= ~KPROBE_FLAG_DISABLED;
539		if (!kprobes_all_disarmed)
540			/* Arm the breakpoint again. */
541			arch_arm_kprobe(ap);
542	}
543	return 0;
544}
545
546/*
547 * Fill in the required fields of the "manager kprobe". Replace the
548 * earlier kprobe in the hlist with the manager kprobe
549 */
550static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
551{
552	copy_kprobe(p, ap);
553	flush_insn_slot(ap);
554	ap->addr = p->addr;
555	ap->flags = p->flags;
556	ap->pre_handler = aggr_pre_handler;
557	ap->fault_handler = aggr_fault_handler;
558	/* We don't care the kprobe which has gone. */
559	if (p->post_handler && !kprobe_gone(p))
560		ap->post_handler = aggr_post_handler;
561	if (p->break_handler && !kprobe_gone(p))
562		ap->break_handler = aggr_break_handler;
563
564	INIT_LIST_HEAD(&ap->list);
565	list_add_rcu(&p->list, &ap->list);
566
567	hlist_replace_rcu(&p->hlist, &ap->hlist);
568}
569
570/*
571 * This is the second or subsequent kprobe at the address - handle
572 * the intricacies
573 */
574static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
575					  struct kprobe *p)
576{
577	int ret = 0;
578	struct kprobe *ap = old_p;
579
580	if (old_p->pre_handler != aggr_pre_handler) {
581		/* If old_p is not an aggr_probe, create new aggr_kprobe. */
582		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
583		if (!ap)
584			return -ENOMEM;
585		add_aggr_kprobe(ap, old_p);
586	}
587
588	if (kprobe_gone(ap)) {
589		/*
590		 * Attempting to insert new probe at the same location that
591		 * had a probe in the module vaddr area which already
592		 * freed. So, the instruction slot has already been
593		 * released. We need a new slot for the new probe.
594		 */
595		ret = arch_prepare_kprobe(ap);
596		if (ret)
597			/*
598			 * Even if fail to allocate new slot, don't need to
599			 * free aggr_probe. It will be used next time, or
600			 * freed by unregister_kprobe.
601			 */
602			return ret;
603
604		/*
605		 * Clear gone flag to prevent allocating new slot again, and
606		 * set disabled flag because it is not armed yet.
607		 */
608		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
609			    | KPROBE_FLAG_DISABLED;
610	}
611
612	copy_kprobe(ap, p);
613	return add_new_kprobe(ap, p);
614}
615
616/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
617static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
618{
619	struct kprobe *kp;
620
621	list_for_each_entry_rcu(kp, &p->list, list) {
622		if (!kprobe_disabled(kp))
623			/*
624			 * There is an active probe on the list.
625			 * We can't disable aggr_kprobe.
626			 */
627			return 0;
628	}
629	p->flags |= KPROBE_FLAG_DISABLED;
630	return 1;
631}
632
633static int __kprobes in_kprobes_functions(unsigned long addr)
634{
635	struct kprobe_blackpoint *kb;
636
637	if (addr >= (unsigned long)__kprobes_text_start &&
638	    addr < (unsigned long)__kprobes_text_end)
639		return -EINVAL;
640	/*
641	 * If there exists a kprobe_blacklist, verify and
642	 * fail any probe registration in the prohibited area
643	 */
644	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
645		if (kb->start_addr) {
646			if (addr >= kb->start_addr &&
647			    addr < (kb->start_addr + kb->range))
648				return -EINVAL;
649		}
650	}
651	return 0;
652}
653
654/*
655 * If we have a symbol_name argument, look it up and add the offset field
656 * to it. This way, we can specify a relative address to a symbol.
657 */
658static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
659{
660	kprobe_opcode_t *addr = p->addr;
661	if (p->symbol_name) {
662		if (addr)
663			return NULL;
664		kprobe_lookup_name(p->symbol_name, addr);
665	}
666
667	if (!addr)
668		return NULL;
669	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
670}
671
672int __kprobes register_kprobe(struct kprobe *p)
673{
674	int ret = 0;
675	struct kprobe *old_p;
676	struct module *probed_mod;
677	kprobe_opcode_t *addr;
678
679	addr = kprobe_addr(p);
680	if (!addr)
681		return -EINVAL;
682	p->addr = addr;
683
684	preempt_disable();
685	if (!__kernel_text_address((unsigned long) p->addr) ||
686	    in_kprobes_functions((unsigned long) p->addr)) {
687		preempt_enable();
688		return -EINVAL;
689	}
690
691	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
692	p->flags &= KPROBE_FLAG_DISABLED;
693
694	/*
695	 * Check if are we probing a module.
696	 */
697	probed_mod = __module_text_address((unsigned long) p->addr);
698	if (probed_mod) {
699		/*
700		 * We must hold a refcount of the probed module while updating
701		 * its code to prohibit unexpected unloading.
702		 */
703		if (unlikely(!try_module_get(probed_mod))) {
704			preempt_enable();
705			return -EINVAL;
706		}
707		/*
708		 * If the module freed .init.text, we couldn't insert
709		 * kprobes in there.
710		 */
711		if (within_module_init((unsigned long)p->addr, probed_mod) &&
712		    probed_mod->state != MODULE_STATE_COMING) {
713			module_put(probed_mod);
714			preempt_enable();
715			return -EINVAL;
716		}
717	}
718	preempt_enable();
719
720	p->nmissed = 0;
721	INIT_LIST_HEAD(&p->list);
722	mutex_lock(&kprobe_mutex);
723	old_p = get_kprobe(p->addr);
724	if (old_p) {
725		ret = register_aggr_kprobe(old_p, p);
726		goto out;
727	}
728
729	mutex_lock(&text_mutex);
730	ret = arch_prepare_kprobe(p);
731	if (ret)
732		goto out_unlock_text;
733
734	INIT_HLIST_NODE(&p->hlist);
735	hlist_add_head_rcu(&p->hlist,
736		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
737
738	if (!kprobes_all_disarmed && !kprobe_disabled(p))
739		arch_arm_kprobe(p);
740
741out_unlock_text:
742	mutex_unlock(&text_mutex);
743out:
744	mutex_unlock(&kprobe_mutex);
745
746	if (probed_mod)
747		module_put(probed_mod);
748
749	return ret;
750}
751EXPORT_SYMBOL_GPL(register_kprobe);
752
753/* Check passed kprobe is valid and return kprobe in kprobe_table. */
754static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
755{
756	struct kprobe *old_p, *list_p;
757
758	old_p = get_kprobe(p->addr);
759	if (unlikely(!old_p))
760		return NULL;
761
762	if (p != old_p) {
763		list_for_each_entry_rcu(list_p, &old_p->list, list)
764			if (list_p == p)
765			/* kprobe p is a valid probe */
766				goto valid;
767		return NULL;
768	}
769valid:
770	return old_p;
771}
772
773/*
774 * Unregister a kprobe without a scheduler synchronization.
775 */
776static int __kprobes __unregister_kprobe_top(struct kprobe *p)
777{
778	struct kprobe *old_p, *list_p;
779
780	old_p = __get_valid_kprobe(p);
781	if (old_p == NULL)
782		return -EINVAL;
783
784	if (old_p == p ||
785	    (old_p->pre_handler == aggr_pre_handler &&
786	     list_is_singular(&old_p->list))) {
787		/*
788		 * Only probe on the hash list. Disarm only if kprobes are
789		 * enabled and not gone - otherwise, the breakpoint would
790		 * already have been removed. We save on flushing icache.
791		 */
792		if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
793			mutex_lock(&text_mutex);
794			arch_disarm_kprobe(p);
795			mutex_unlock(&text_mutex);
796		}
797		hlist_del_rcu(&old_p->hlist);
798	} else {
799		if (p->break_handler && !kprobe_gone(p))
800			old_p->break_handler = NULL;
801		if (p->post_handler && !kprobe_gone(p)) {
802			list_for_each_entry_rcu(list_p, &old_p->list, list) {
803				if ((list_p != p) && (list_p->post_handler))
804					goto noclean;
805			}
806			old_p->post_handler = NULL;
807		}
808noclean:
809		list_del_rcu(&p->list);
810		if (!kprobe_disabled(old_p)) {
811			try_to_disable_aggr_kprobe(old_p);
812			if (!kprobes_all_disarmed && kprobe_disabled(old_p))
813				arch_disarm_kprobe(old_p);
814		}
815	}
816	return 0;
817}
818
819static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
820{
821	struct kprobe *old_p;
822
823	if (list_empty(&p->list))
824		arch_remove_kprobe(p);
825	else if (list_is_singular(&p->list)) {
826		/* "p" is the last child of an aggr_kprobe */
827		old_p = list_entry(p->list.next, struct kprobe, list);
828		list_del(&p->list);
829		arch_remove_kprobe(old_p);
830		kfree(old_p);
831	}
832}
833
834int __kprobes register_kprobes(struct kprobe **kps, int num)
835{
836	int i, ret = 0;
837
838	if (num <= 0)
839		return -EINVAL;
840	for (i = 0; i < num; i++) {
841		ret = register_kprobe(kps[i]);
842		if (ret < 0) {
843			if (i > 0)
844				unregister_kprobes(kps, i);
845			break;
846		}
847	}
848	return ret;
849}
850EXPORT_SYMBOL_GPL(register_kprobes);
851
852void __kprobes unregister_kprobe(struct kprobe *p)
853{
854	unregister_kprobes(&p, 1);
855}
856EXPORT_SYMBOL_GPL(unregister_kprobe);
857
858void __kprobes unregister_kprobes(struct kprobe **kps, int num)
859{
860	int i;
861
862	if (num <= 0)
863		return;
864	mutex_lock(&kprobe_mutex);
865	for (i = 0; i < num; i++)
866		if (__unregister_kprobe_top(kps[i]) < 0)
867			kps[i]->addr = NULL;
868	mutex_unlock(&kprobe_mutex);
869
870	synchronize_sched();
871	for (i = 0; i < num; i++)
872		if (kps[i]->addr)
873			__unregister_kprobe_bottom(kps[i]);
874}
875EXPORT_SYMBOL_GPL(unregister_kprobes);
876
877static struct notifier_block kprobe_exceptions_nb = {
878	.notifier_call = kprobe_exceptions_notify,
879	.priority = 0x7fffffff /* we need to be notified first */
880};
881
882unsigned long __weak arch_deref_entry_point(void *entry)
883{
884	return (unsigned long)entry;
885}
886
887int __kprobes register_jprobes(struct jprobe **jps, int num)
888{
889	struct jprobe *jp;
890	int ret = 0, i;
891
892	if (num <= 0)
893		return -EINVAL;
894	for (i = 0; i < num; i++) {
895		unsigned long addr;
896		jp = jps[i];
897		addr = arch_deref_entry_point(jp->entry);
898
899		if (!kernel_text_address(addr))
900			ret = -EINVAL;
901		else {
902			/* Todo: Verify probepoint is a function entry point */
903			jp->kp.pre_handler = setjmp_pre_handler;
904			jp->kp.break_handler = longjmp_break_handler;
905			ret = register_kprobe(&jp->kp);
906		}
907		if (ret < 0) {
908			if (i > 0)
909				unregister_jprobes(jps, i);
910			break;
911		}
912	}
913	return ret;
914}
915EXPORT_SYMBOL_GPL(register_jprobes);
916
917int __kprobes register_jprobe(struct jprobe *jp)
918{
919	return register_jprobes(&jp, 1);
920}
921EXPORT_SYMBOL_GPL(register_jprobe);
922
923void __kprobes unregister_jprobe(struct jprobe *jp)
924{
925	unregister_jprobes(&jp, 1);
926}
927EXPORT_SYMBOL_GPL(unregister_jprobe);
928
929void __kprobes unregister_jprobes(struct jprobe **jps, int num)
930{
931	int i;
932
933	if (num <= 0)
934		return;
935	mutex_lock(&kprobe_mutex);
936	for (i = 0; i < num; i++)
937		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
938			jps[i]->kp.addr = NULL;
939	mutex_unlock(&kprobe_mutex);
940
941	synchronize_sched();
942	for (i = 0; i < num; i++) {
943		if (jps[i]->kp.addr)
944			__unregister_kprobe_bottom(&jps[i]->kp);
945	}
946}
947EXPORT_SYMBOL_GPL(unregister_jprobes);
948
949#ifdef CONFIG_KRETPROBES
950/*
951 * This kprobe pre_handler is registered with every kretprobe. When probe
952 * hits it will set up the return probe.
953 */
954static int __kprobes pre_handler_kretprobe(struct kprobe *p,
955					   struct pt_regs *regs)
956{
957	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
958	unsigned long hash, flags = 0;
959	struct kretprobe_instance *ri;
960
961	/*TODO: consider to only swap the RA after the last pre_handler fired */
962	hash = hash_ptr(current, KPROBE_HASH_BITS);
963	spin_lock_irqsave(&rp->lock, flags);
964	if (!hlist_empty(&rp->free_instances)) {
965		ri = hlist_entry(rp->free_instances.first,
966				struct kretprobe_instance, hlist);
967		hlist_del(&ri->hlist);
968		spin_unlock_irqrestore(&rp->lock, flags);
969
970		ri->rp = rp;
971		ri->task = current;
972
973		if (rp->entry_handler && rp->entry_handler(ri, regs))
974			return 0;
975
976		arch_prepare_kretprobe(ri, regs);
977
978		/* XXX(hch): why is there no hlist_move_head? */
979		INIT_HLIST_NODE(&ri->hlist);
980		kretprobe_table_lock(hash, &flags);
981		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
982		kretprobe_table_unlock(hash, &flags);
983	} else {
984		rp->nmissed++;
985		spin_unlock_irqrestore(&rp->lock, flags);
986	}
987	return 0;
988}
989
990int __kprobes register_kretprobe(struct kretprobe *rp)
991{
992	int ret = 0;
993	struct kretprobe_instance *inst;
994	int i;
995	void *addr;
996
997	if (kretprobe_blacklist_size) {
998		addr = kprobe_addr(&rp->kp);
999		if (!addr)
1000			return -EINVAL;
1001
1002		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1003			if (kretprobe_blacklist[i].addr == addr)
1004				return -EINVAL;
1005		}
1006	}
1007
1008	rp->kp.pre_handler = pre_handler_kretprobe;
1009	rp->kp.post_handler = NULL;
1010	rp->kp.fault_handler = NULL;
1011	rp->kp.break_handler = NULL;
1012
1013	/* Pre-allocate memory for max kretprobe instances */
1014	if (rp->maxactive <= 0) {
1015#ifdef CONFIG_PREEMPT
1016		rp->maxactive = max(10, 2 * NR_CPUS);
1017#else
1018		rp->maxactive = NR_CPUS;
1019#endif
1020	}
1021	spin_lock_init(&rp->lock);
1022	INIT_HLIST_HEAD(&rp->free_instances);
1023	for (i = 0; i < rp->maxactive; i++) {
1024		inst = kmalloc(sizeof(struct kretprobe_instance) +
1025			       rp->data_size, GFP_KERNEL);
1026		if (inst == NULL) {
1027			free_rp_inst(rp);
1028			return -ENOMEM;
1029		}
1030		INIT_HLIST_NODE(&inst->hlist);
1031		hlist_add_head(&inst->hlist, &rp->free_instances);
1032	}
1033
1034	rp->nmissed = 0;
1035	/* Establish function entry probe point */
1036	ret = register_kprobe(&rp->kp);
1037	if (ret != 0)
1038		free_rp_inst(rp);
1039	return ret;
1040}
1041EXPORT_SYMBOL_GPL(register_kretprobe);
1042
1043int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1044{
1045	int ret = 0, i;
1046
1047	if (num <= 0)
1048		return -EINVAL;
1049	for (i = 0; i < num; i++) {
1050		ret = register_kretprobe(rps[i]);
1051		if (ret < 0) {
1052			if (i > 0)
1053				unregister_kretprobes(rps, i);
1054			break;
1055		}
1056	}
1057	return ret;
1058}
1059EXPORT_SYMBOL_GPL(register_kretprobes);
1060
1061void __kprobes unregister_kretprobe(struct kretprobe *rp)
1062{
1063	unregister_kretprobes(&rp, 1);
1064}
1065EXPORT_SYMBOL_GPL(unregister_kretprobe);
1066
1067void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1068{
1069	int i;
1070
1071	if (num <= 0)
1072		return;
1073	mutex_lock(&kprobe_mutex);
1074	for (i = 0; i < num; i++)
1075		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1076			rps[i]->kp.addr = NULL;
1077	mutex_unlock(&kprobe_mutex);
1078
1079	synchronize_sched();
1080	for (i = 0; i < num; i++) {
1081		if (rps[i]->kp.addr) {
1082			__unregister_kprobe_bottom(&rps[i]->kp);
1083			cleanup_rp_inst(rps[i]);
1084		}
1085	}
1086}
1087EXPORT_SYMBOL_GPL(unregister_kretprobes);
1088
1089#else /* CONFIG_KRETPROBES */
1090int __kprobes register_kretprobe(struct kretprobe *rp)
1091{
1092	return -ENOSYS;
1093}
1094EXPORT_SYMBOL_GPL(register_kretprobe);
1095
1096int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1097{
1098	return -ENOSYS;
1099}
1100EXPORT_SYMBOL_GPL(register_kretprobes);
1101
1102void __kprobes unregister_kretprobe(struct kretprobe *rp)
1103{
1104}
1105EXPORT_SYMBOL_GPL(unregister_kretprobe);
1106
1107void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1108{
1109}
1110EXPORT_SYMBOL_GPL(unregister_kretprobes);
1111
1112static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1113					   struct pt_regs *regs)
1114{
1115	return 0;
1116}
1117
1118#endif /* CONFIG_KRETPROBES */
1119
1120/* Set the kprobe gone and remove its instruction buffer. */
1121static void __kprobes kill_kprobe(struct kprobe *p)
1122{
1123	struct kprobe *kp;
1124
1125	p->flags |= KPROBE_FLAG_GONE;
1126	if (p->pre_handler == aggr_pre_handler) {
1127		/*
1128		 * If this is an aggr_kprobe, we have to list all the
1129		 * chained probes and mark them GONE.
1130		 */
1131		list_for_each_entry_rcu(kp, &p->list, list)
1132			kp->flags |= KPROBE_FLAG_GONE;
1133		p->post_handler = NULL;
1134		p->break_handler = NULL;
1135	}
1136	/*
1137	 * Here, we can remove insn_slot safely, because no thread calls
1138	 * the original probed function (which will be freed soon) any more.
1139	 */
1140	arch_remove_kprobe(p);
1141}
1142
1143/* Module notifier call back, checking kprobes on the module */
1144static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1145					     unsigned long val, void *data)
1146{
1147	struct module *mod = data;
1148	struct hlist_head *head;
1149	struct hlist_node *node;
1150	struct kprobe *p;
1151	unsigned int i;
1152	int checkcore = (val == MODULE_STATE_GOING);
1153
1154	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1155		return NOTIFY_DONE;
1156
1157	/*
1158	 * When MODULE_STATE_GOING was notified, both of module .text and
1159	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1160	 * notified, only .init.text section would be freed. We need to
1161	 * disable kprobes which have been inserted in the sections.
1162	 */
1163	mutex_lock(&kprobe_mutex);
1164	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1165		head = &kprobe_table[i];
1166		hlist_for_each_entry_rcu(p, node, head, hlist)
1167			if (within_module_init((unsigned long)p->addr, mod) ||
1168			    (checkcore &&
1169			     within_module_core((unsigned long)p->addr, mod))) {
1170				/*
1171				 * The vaddr this probe is installed will soon
1172				 * be vfreed buy not synced to disk. Hence,
1173				 * disarming the breakpoint isn't needed.
1174				 */
1175				kill_kprobe(p);
1176			}
1177	}
1178	mutex_unlock(&kprobe_mutex);
1179	return NOTIFY_DONE;
1180}
1181
1182static struct notifier_block kprobe_module_nb = {
1183	.notifier_call = kprobes_module_callback,
1184	.priority = 0
1185};
1186
1187static int __init init_kprobes(void)
1188{
1189	int i, err = 0;
1190	unsigned long offset = 0, size = 0;
1191	char *modname, namebuf[128];
1192	const char *symbol_name;
1193	void *addr;
1194	struct kprobe_blackpoint *kb;
1195
1196	/* FIXME allocate the probe table, currently defined statically */
1197	/* initialize all list heads */
1198	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1199		INIT_HLIST_HEAD(&kprobe_table[i]);
1200		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1201		spin_lock_init(&(kretprobe_table_locks[i].lock));
1202	}
1203
1204	/*
1205	 * Lookup and populate the kprobe_blacklist.
1206	 *
1207	 * Unlike the kretprobe blacklist, we'll need to determine
1208	 * the range of addresses that belong to the said functions,
1209	 * since a kprobe need not necessarily be at the beginning
1210	 * of a function.
1211	 */
1212	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1213		kprobe_lookup_name(kb->name, addr);
1214		if (!addr)
1215			continue;
1216
1217		kb->start_addr = (unsigned long)addr;
1218		symbol_name = kallsyms_lookup(kb->start_addr,
1219				&size, &offset, &modname, namebuf);
1220		if (!symbol_name)
1221			kb->range = 0;
1222		else
1223			kb->range = size;
1224	}
1225
1226	if (kretprobe_blacklist_size) {
1227		/* lookup the function address from its name */
1228		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1229			kprobe_lookup_name(kretprobe_blacklist[i].name,
1230					   kretprobe_blacklist[i].addr);
1231			if (!kretprobe_blacklist[i].addr)
1232				printk("kretprobe: lookup failed: %s\n",
1233				       kretprobe_blacklist[i].name);
1234		}
1235	}
1236
1237	/* By default, kprobes are armed */
1238	kprobes_all_disarmed = false;
1239
1240	err = arch_init_kprobes();
1241	if (!err)
1242		err = register_die_notifier(&kprobe_exceptions_nb);
1243	if (!err)
1244		err = register_module_notifier(&kprobe_module_nb);
1245
1246	kprobes_initialized = (err == 0);
1247
1248	if (!err)
1249		init_test_probes();
1250	return err;
1251}
1252
1253#ifdef CONFIG_DEBUG_FS
1254static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1255		const char *sym, int offset,char *modname)
1256{
1257	char *kprobe_type;
1258
1259	if (p->pre_handler == pre_handler_kretprobe)
1260		kprobe_type = "r";
1261	else if (p->pre_handler == setjmp_pre_handler)
1262		kprobe_type = "j";
1263	else
1264		kprobe_type = "k";
1265	if (sym)
1266		seq_printf(pi, "%p  %s  %s+0x%x  %s %s%s\n",
1267			p->addr, kprobe_type, sym, offset,
1268			(modname ? modname : " "),
1269			(kprobe_gone(p) ? "[GONE]" : ""),
1270			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1271			 "[DISABLED]" : ""));
1272	else
1273		seq_printf(pi, "%p  %s  %p %s%s\n",
1274			p->addr, kprobe_type, p->addr,
1275			(kprobe_gone(p) ? "[GONE]" : ""),
1276			((kprobe_disabled(p) && !kprobe_gone(p)) ?
1277			 "[DISABLED]" : ""));
1278}
1279
1280static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1281{
1282	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1283}
1284
1285static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1286{
1287	(*pos)++;
1288	if (*pos >= KPROBE_TABLE_SIZE)
1289		return NULL;
1290	return pos;
1291}
1292
1293static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1294{
1295	/* Nothing to do */
1296}
1297
1298static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1299{
1300	struct hlist_head *head;
1301	struct hlist_node *node;
1302	struct kprobe *p, *kp;
1303	const char *sym = NULL;
1304	unsigned int i = *(loff_t *) v;
1305	unsigned long offset = 0;
1306	char *modname, namebuf[128];
1307
1308	head = &kprobe_table[i];
1309	preempt_disable();
1310	hlist_for_each_entry_rcu(p, node, head, hlist) {
1311		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1312					&offset, &modname, namebuf);
1313		if (p->pre_handler == aggr_pre_handler) {
1314			list_for_each_entry_rcu(kp, &p->list, list)
1315				report_probe(pi, kp, sym, offset, modname);
1316		} else
1317			report_probe(pi, p, sym, offset, modname);
1318	}
1319	preempt_enable();
1320	return 0;
1321}
1322
1323static struct seq_operations kprobes_seq_ops = {
1324	.start = kprobe_seq_start,
1325	.next  = kprobe_seq_next,
1326	.stop  = kprobe_seq_stop,
1327	.show  = show_kprobe_addr
1328};
1329
1330static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1331{
1332	return seq_open(filp, &kprobes_seq_ops);
1333}
1334
1335static struct file_operations debugfs_kprobes_operations = {
1336	.open           = kprobes_open,
1337	.read           = seq_read,
1338	.llseek         = seq_lseek,
1339	.release        = seq_release,
1340};
1341
1342/* Disable one kprobe */
1343int __kprobes disable_kprobe(struct kprobe *kp)
1344{
1345	int ret = 0;
1346	struct kprobe *p;
1347
1348	mutex_lock(&kprobe_mutex);
1349
1350	/* Check whether specified probe is valid. */
1351	p = __get_valid_kprobe(kp);
1352	if (unlikely(p == NULL)) {
1353		ret = -EINVAL;
1354		goto out;
1355	}
1356
1357	/* If the probe is already disabled (or gone), just return */
1358	if (kprobe_disabled(kp))
1359		goto out;
1360
1361	kp->flags |= KPROBE_FLAG_DISABLED;
1362	if (p != kp)
1363		/* When kp != p, p is always enabled. */
1364		try_to_disable_aggr_kprobe(p);
1365
1366	if (!kprobes_all_disarmed && kprobe_disabled(p))
1367		arch_disarm_kprobe(p);
1368out:
1369	mutex_unlock(&kprobe_mutex);
1370	return ret;
1371}
1372EXPORT_SYMBOL_GPL(disable_kprobe);
1373
1374/* Enable one kprobe */
1375int __kprobes enable_kprobe(struct kprobe *kp)
1376{
1377	int ret = 0;
1378	struct kprobe *p;
1379
1380	mutex_lock(&kprobe_mutex);
1381
1382	/* Check whether specified probe is valid. */
1383	p = __get_valid_kprobe(kp);
1384	if (unlikely(p == NULL)) {
1385		ret = -EINVAL;
1386		goto out;
1387	}
1388
1389	if (kprobe_gone(kp)) {
1390		/* This kprobe has gone, we couldn't enable it. */
1391		ret = -EINVAL;
1392		goto out;
1393	}
1394
1395	if (!kprobes_all_disarmed && kprobe_disabled(p))
1396		arch_arm_kprobe(p);
1397
1398	p->flags &= ~KPROBE_FLAG_DISABLED;
1399	if (p != kp)
1400		kp->flags &= ~KPROBE_FLAG_DISABLED;
1401out:
1402	mutex_unlock(&kprobe_mutex);
1403	return ret;
1404}
1405EXPORT_SYMBOL_GPL(enable_kprobe);
1406
1407static void __kprobes arm_all_kprobes(void)
1408{
1409	struct hlist_head *head;
1410	struct hlist_node *node;
1411	struct kprobe *p;
1412	unsigned int i;
1413
1414	mutex_lock(&kprobe_mutex);
1415
1416	/* If kprobes are armed, just return */
1417	if (!kprobes_all_disarmed)
1418		goto already_enabled;
1419
1420	mutex_lock(&text_mutex);
1421	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1422		head = &kprobe_table[i];
1423		hlist_for_each_entry_rcu(p, node, head, hlist)
1424			if (!kprobe_disabled(p))
1425				arch_arm_kprobe(p);
1426	}
1427	mutex_unlock(&text_mutex);
1428
1429	kprobes_all_disarmed = false;
1430	printk(KERN_INFO "Kprobes globally enabled\n");
1431
1432already_enabled:
1433	mutex_unlock(&kprobe_mutex);
1434	return;
1435}
1436
1437static void __kprobes disarm_all_kprobes(void)
1438{
1439	struct hlist_head *head;
1440	struct hlist_node *node;
1441	struct kprobe *p;
1442	unsigned int i;
1443
1444	mutex_lock(&kprobe_mutex);
1445
1446	/* If kprobes are already disarmed, just return */
1447	if (kprobes_all_disarmed)
1448		goto already_disabled;
1449
1450	kprobes_all_disarmed = true;
1451	printk(KERN_INFO "Kprobes globally disabled\n");
1452	mutex_lock(&text_mutex);
1453	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1454		head = &kprobe_table[i];
1455		hlist_for_each_entry_rcu(p, node, head, hlist) {
1456			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1457				arch_disarm_kprobe(p);
1458		}
1459	}
1460
1461	mutex_unlock(&text_mutex);
1462	mutex_unlock(&kprobe_mutex);
1463	/* Allow all currently running kprobes to complete */
1464	synchronize_sched();
1465	return;
1466
1467already_disabled:
1468	mutex_unlock(&kprobe_mutex);
1469	return;
1470}
1471
1472/*
1473 * XXX: The debugfs bool file interface doesn't allow for callbacks
1474 * when the bool state is switched. We can reuse that facility when
1475 * available
1476 */
1477static ssize_t read_enabled_file_bool(struct file *file,
1478	       char __user *user_buf, size_t count, loff_t *ppos)
1479{
1480	char buf[3];
1481
1482	if (!kprobes_all_disarmed)
1483		buf[0] = '1';
1484	else
1485		buf[0] = '0';
1486	buf[1] = '\n';
1487	buf[2] = 0x00;
1488	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1489}
1490
1491static ssize_t write_enabled_file_bool(struct file *file,
1492	       const char __user *user_buf, size_t count, loff_t *ppos)
1493{
1494	char buf[32];
1495	int buf_size;
1496
1497	buf_size = min(count, (sizeof(buf)-1));
1498	if (copy_from_user(buf, user_buf, buf_size))
1499		return -EFAULT;
1500
1501	switch (buf[0]) {
1502	case 'y':
1503	case 'Y':
1504	case '1':
1505		arm_all_kprobes();
1506		break;
1507	case 'n':
1508	case 'N':
1509	case '0':
1510		disarm_all_kprobes();
1511		break;
1512	}
1513
1514	return count;
1515}
1516
1517static struct file_operations fops_kp = {
1518	.read =         read_enabled_file_bool,
1519	.write =        write_enabled_file_bool,
1520};
1521
1522static int __kprobes debugfs_kprobe_init(void)
1523{
1524	struct dentry *dir, *file;
1525	unsigned int value = 1;
1526
1527	dir = debugfs_create_dir("kprobes", NULL);
1528	if (!dir)
1529		return -ENOMEM;
1530
1531	file = debugfs_create_file("list", 0444, dir, NULL,
1532				&debugfs_kprobes_operations);
1533	if (!file) {
1534		debugfs_remove(dir);
1535		return -ENOMEM;
1536	}
1537
1538	file = debugfs_create_file("enabled", 0600, dir,
1539					&value, &fops_kp);
1540	if (!file) {
1541		debugfs_remove(dir);
1542		return -ENOMEM;
1543	}
1544
1545	return 0;
1546}
1547
1548late_initcall(debugfs_kprobe_init);
1549#endif /* CONFIG_DEBUG_FS */
1550
1551module_init(init_kprobes);
1552
1553/* defined in arch/.../kernel/kprobes.c */
1554EXPORT_SYMBOL_GPL(jprobe_return);
1555