kprobes.c revision a189d0350f387786b1fb5a5d19e3a5ab0bc0cceb
1/*
2 *  Kernel Probes (KProbes)
3 *  kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 *		Probes initial implementation (includes suggestions from
23 *		Rusty Russell).
24 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 *		hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 *		interface to access function arguments.
28 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 *		exceptions notifier to be first on the priority list.
30 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 *		<prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46
47#include <asm-generic/sections.h>
48#include <asm/cacheflush.h>
49#include <asm/errno.h>
50#include <asm/uaccess.h>
51
52#define KPROBE_HASH_BITS 6
53#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54
55
56/*
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
59 */
60#ifndef kprobe_lookup_name
61#define kprobe_lookup_name(name, addr) \
62	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63#endif
64
65static int kprobes_initialized;
66static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
67static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
68
69/* NOTE: change this value only with kprobe_mutex held */
70static bool kprobe_enabled;
71
72DEFINE_MUTEX(kprobe_mutex);		/* Protects kprobe_table */
73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74static struct {
75	spinlock_t lock ____cacheline_aligned;
76} kretprobe_table_locks[KPROBE_TABLE_SIZE];
77
78static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
79{
80	return &(kretprobe_table_locks[hash].lock);
81}
82
83/*
84 * Normally, functions that we'd want to prohibit kprobes in, are marked
85 * __kprobes. But, there are cases where such functions already belong to
86 * a different section (__sched for preempt_schedule)
87 *
88 * For such cases, we now have a blacklist
89 */
90static struct kprobe_blackpoint kprobe_blacklist[] = {
91	{"preempt_schedule",},
92	{NULL}    /* Terminator */
93};
94
95#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
96/*
97 * kprobe->ainsn.insn points to the copy of the instruction to be
98 * single-stepped. x86_64, POWER4 and above have no-exec support and
99 * stepping on the instruction on a vmalloced/kmalloced/data page
100 * is a recipe for disaster
101 */
102#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
103
104struct kprobe_insn_page {
105	struct hlist_node hlist;
106	kprobe_opcode_t *insns;		/* Page of instruction slots */
107	char slot_used[INSNS_PER_PAGE];
108	int nused;
109	int ngarbage;
110};
111
112enum kprobe_slot_state {
113	SLOT_CLEAN = 0,
114	SLOT_DIRTY = 1,
115	SLOT_USED = 2,
116};
117
118static struct hlist_head kprobe_insn_pages;
119static int kprobe_garbage_slots;
120static int collect_garbage_slots(void);
121
122static int __kprobes check_safety(void)
123{
124	int ret = 0;
125#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
126	ret = freeze_processes();
127	if (ret == 0) {
128		struct task_struct *p, *q;
129		do_each_thread(p, q) {
130			if (p != current && p->state == TASK_RUNNING &&
131			    p->pid != 0) {
132				printk("Check failed: %s is running\n",p->comm);
133				ret = -1;
134				goto loop_end;
135			}
136		} while_each_thread(p, q);
137	}
138loop_end:
139	thaw_processes();
140#else
141	synchronize_sched();
142#endif
143	return ret;
144}
145
146/**
147 * get_insn_slot() - Find a slot on an executable page for an instruction.
148 * We allocate an executable page if there's no room on existing ones.
149 */
150kprobe_opcode_t __kprobes *get_insn_slot(void)
151{
152	struct kprobe_insn_page *kip;
153	struct hlist_node *pos;
154
155 retry:
156	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
157		if (kip->nused < INSNS_PER_PAGE) {
158			int i;
159			for (i = 0; i < INSNS_PER_PAGE; i++) {
160				if (kip->slot_used[i] == SLOT_CLEAN) {
161					kip->slot_used[i] = SLOT_USED;
162					kip->nused++;
163					return kip->insns + (i * MAX_INSN_SIZE);
164				}
165			}
166			/* Surprise!  No unused slots.  Fix kip->nused. */
167			kip->nused = INSNS_PER_PAGE;
168		}
169	}
170
171	/* If there are any garbage slots, collect it and try again. */
172	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
173		goto retry;
174	}
175	/* All out of space.  Need to allocate a new page. Use slot 0. */
176	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
177	if (!kip)
178		return NULL;
179
180	/*
181	 * Use module_alloc so this page is within +/- 2GB of where the
182	 * kernel image and loaded module images reside. This is required
183	 * so x86_64 can correctly handle the %rip-relative fixups.
184	 */
185	kip->insns = module_alloc(PAGE_SIZE);
186	if (!kip->insns) {
187		kfree(kip);
188		return NULL;
189	}
190	INIT_HLIST_NODE(&kip->hlist);
191	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
192	memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
193	kip->slot_used[0] = SLOT_USED;
194	kip->nused = 1;
195	kip->ngarbage = 0;
196	return kip->insns;
197}
198
199/* Return 1 if all garbages are collected, otherwise 0. */
200static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
201{
202	kip->slot_used[idx] = SLOT_CLEAN;
203	kip->nused--;
204	if (kip->nused == 0) {
205		/*
206		 * Page is no longer in use.  Free it unless
207		 * it's the last one.  We keep the last one
208		 * so as not to have to set it up again the
209		 * next time somebody inserts a probe.
210		 */
211		hlist_del(&kip->hlist);
212		if (hlist_empty(&kprobe_insn_pages)) {
213			INIT_HLIST_NODE(&kip->hlist);
214			hlist_add_head(&kip->hlist,
215				       &kprobe_insn_pages);
216		} else {
217			module_free(NULL, kip->insns);
218			kfree(kip);
219		}
220		return 1;
221	}
222	return 0;
223}
224
225static int __kprobes collect_garbage_slots(void)
226{
227	struct kprobe_insn_page *kip;
228	struct hlist_node *pos, *next;
229
230	/* Ensure no-one is preepmted on the garbages */
231	if (check_safety() != 0)
232		return -EAGAIN;
233
234	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
235		int i;
236		if (kip->ngarbage == 0)
237			continue;
238		kip->ngarbage = 0;	/* we will collect all garbages */
239		for (i = 0; i < INSNS_PER_PAGE; i++) {
240			if (kip->slot_used[i] == SLOT_DIRTY &&
241			    collect_one_slot(kip, i))
242				break;
243		}
244	}
245	kprobe_garbage_slots = 0;
246	return 0;
247}
248
249void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
250{
251	struct kprobe_insn_page *kip;
252	struct hlist_node *pos;
253
254	hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
255		if (kip->insns <= slot &&
256		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
257			int i = (slot - kip->insns) / MAX_INSN_SIZE;
258			if (dirty) {
259				kip->slot_used[i] = SLOT_DIRTY;
260				kip->ngarbage++;
261			} else {
262				collect_one_slot(kip, i);
263			}
264			break;
265		}
266	}
267
268	if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
269		collect_garbage_slots();
270}
271#endif
272
273/* We have preemption disabled.. so it is safe to use __ versions */
274static inline void set_kprobe_instance(struct kprobe *kp)
275{
276	__get_cpu_var(kprobe_instance) = kp;
277}
278
279static inline void reset_kprobe_instance(void)
280{
281	__get_cpu_var(kprobe_instance) = NULL;
282}
283
284/*
285 * This routine is called either:
286 * 	- under the kprobe_mutex - during kprobe_[un]register()
287 * 				OR
288 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
289 */
290struct kprobe __kprobes *get_kprobe(void *addr)
291{
292	struct hlist_head *head;
293	struct hlist_node *node;
294	struct kprobe *p;
295
296	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
297	hlist_for_each_entry_rcu(p, node, head, hlist) {
298		if (p->addr == addr)
299			return p;
300	}
301	return NULL;
302}
303
304/*
305 * Aggregate handlers for multiple kprobes support - these handlers
306 * take care of invoking the individual kprobe handlers on p->list
307 */
308static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
309{
310	struct kprobe *kp;
311
312	list_for_each_entry_rcu(kp, &p->list, list) {
313		if (kp->pre_handler) {
314			set_kprobe_instance(kp);
315			if (kp->pre_handler(kp, regs))
316				return 1;
317		}
318		reset_kprobe_instance();
319	}
320	return 0;
321}
322
323static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
324					unsigned long flags)
325{
326	struct kprobe *kp;
327
328	list_for_each_entry_rcu(kp, &p->list, list) {
329		if (kp->post_handler) {
330			set_kprobe_instance(kp);
331			kp->post_handler(kp, regs, flags);
332			reset_kprobe_instance();
333		}
334	}
335}
336
337static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
338					int trapnr)
339{
340	struct kprobe *cur = __get_cpu_var(kprobe_instance);
341
342	/*
343	 * if we faulted "during" the execution of a user specified
344	 * probe handler, invoke just that probe's fault handler
345	 */
346	if (cur && cur->fault_handler) {
347		if (cur->fault_handler(cur, regs, trapnr))
348			return 1;
349	}
350	return 0;
351}
352
353static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
354{
355	struct kprobe *cur = __get_cpu_var(kprobe_instance);
356	int ret = 0;
357
358	if (cur && cur->break_handler) {
359		if (cur->break_handler(cur, regs))
360			ret = 1;
361	}
362	reset_kprobe_instance();
363	return ret;
364}
365
366/* Walks the list and increments nmissed count for multiprobe case */
367void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
368{
369	struct kprobe *kp;
370	if (p->pre_handler != aggr_pre_handler) {
371		p->nmissed++;
372	} else {
373		list_for_each_entry_rcu(kp, &p->list, list)
374			kp->nmissed++;
375	}
376	return;
377}
378
379void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
380				struct hlist_head *head)
381{
382	struct kretprobe *rp = ri->rp;
383
384	/* remove rp inst off the rprobe_inst_table */
385	hlist_del(&ri->hlist);
386	INIT_HLIST_NODE(&ri->hlist);
387	if (likely(rp)) {
388		spin_lock(&rp->lock);
389		hlist_add_head(&ri->hlist, &rp->free_instances);
390		spin_unlock(&rp->lock);
391	} else
392		/* Unregistering */
393		hlist_add_head(&ri->hlist, head);
394}
395
396void kretprobe_hash_lock(struct task_struct *tsk,
397			 struct hlist_head **head, unsigned long *flags)
398{
399	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
400	spinlock_t *hlist_lock;
401
402	*head = &kretprobe_inst_table[hash];
403	hlist_lock = kretprobe_table_lock_ptr(hash);
404	spin_lock_irqsave(hlist_lock, *flags);
405}
406
407static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
408{
409	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
410	spin_lock_irqsave(hlist_lock, *flags);
411}
412
413void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags)
414{
415	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
416	spinlock_t *hlist_lock;
417
418	hlist_lock = kretprobe_table_lock_ptr(hash);
419	spin_unlock_irqrestore(hlist_lock, *flags);
420}
421
422void kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
423{
424	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
425	spin_unlock_irqrestore(hlist_lock, *flags);
426}
427
428/*
429 * This function is called from finish_task_switch when task tk becomes dead,
430 * so that we can recycle any function-return probe instances associated
431 * with this task. These left over instances represent probed functions
432 * that have been called but will never return.
433 */
434void __kprobes kprobe_flush_task(struct task_struct *tk)
435{
436	struct kretprobe_instance *ri;
437	struct hlist_head *head, empty_rp;
438	struct hlist_node *node, *tmp;
439	unsigned long hash, flags = 0;
440
441	if (unlikely(!kprobes_initialized))
442		/* Early boot.  kretprobe_table_locks not yet initialized. */
443		return;
444
445	hash = hash_ptr(tk, KPROBE_HASH_BITS);
446	head = &kretprobe_inst_table[hash];
447	kretprobe_table_lock(hash, &flags);
448	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
449		if (ri->task == tk)
450			recycle_rp_inst(ri, &empty_rp);
451	}
452	kretprobe_table_unlock(hash, &flags);
453	INIT_HLIST_HEAD(&empty_rp);
454	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
455		hlist_del(&ri->hlist);
456		kfree(ri);
457	}
458}
459
460static inline void free_rp_inst(struct kretprobe *rp)
461{
462	struct kretprobe_instance *ri;
463	struct hlist_node *pos, *next;
464
465	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
466		hlist_del(&ri->hlist);
467		kfree(ri);
468	}
469}
470
471static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
472{
473	unsigned long flags, hash;
474	struct kretprobe_instance *ri;
475	struct hlist_node *pos, *next;
476	struct hlist_head *head;
477
478	/* No race here */
479	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
480		kretprobe_table_lock(hash, &flags);
481		head = &kretprobe_inst_table[hash];
482		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
483			if (ri->rp == rp)
484				ri->rp = NULL;
485		}
486		kretprobe_table_unlock(hash, &flags);
487	}
488	free_rp_inst(rp);
489}
490
491/*
492 * Keep all fields in the kprobe consistent
493 */
494static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
495{
496	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
497	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
498}
499
500/*
501* Add the new probe to old_p->list. Fail if this is the
502* second jprobe at the address - two jprobes can't coexist
503*/
504static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
505{
506	if (p->break_handler) {
507		if (old_p->break_handler)
508			return -EEXIST;
509		list_add_tail_rcu(&p->list, &old_p->list);
510		old_p->break_handler = aggr_break_handler;
511	} else
512		list_add_rcu(&p->list, &old_p->list);
513	if (p->post_handler && !old_p->post_handler)
514		old_p->post_handler = aggr_post_handler;
515	return 0;
516}
517
518/*
519 * Fill in the required fields of the "manager kprobe". Replace the
520 * earlier kprobe in the hlist with the manager kprobe
521 */
522static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
523{
524	copy_kprobe(p, ap);
525	flush_insn_slot(ap);
526	ap->addr = p->addr;
527	ap->pre_handler = aggr_pre_handler;
528	ap->fault_handler = aggr_fault_handler;
529	if (p->post_handler)
530		ap->post_handler = aggr_post_handler;
531	if (p->break_handler)
532		ap->break_handler = aggr_break_handler;
533
534	INIT_LIST_HEAD(&ap->list);
535	list_add_rcu(&p->list, &ap->list);
536
537	hlist_replace_rcu(&p->hlist, &ap->hlist);
538}
539
540/*
541 * This is the second or subsequent kprobe at the address - handle
542 * the intricacies
543 */
544static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
545					  struct kprobe *p)
546{
547	int ret = 0;
548	struct kprobe *ap;
549
550	if (old_p->pre_handler == aggr_pre_handler) {
551		copy_kprobe(old_p, p);
552		ret = add_new_kprobe(old_p, p);
553	} else {
554		ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
555		if (!ap)
556			return -ENOMEM;
557		add_aggr_kprobe(ap, old_p);
558		copy_kprobe(ap, p);
559		ret = add_new_kprobe(ap, p);
560	}
561	return ret;
562}
563
564static int __kprobes in_kprobes_functions(unsigned long addr)
565{
566	struct kprobe_blackpoint *kb;
567
568	if (addr >= (unsigned long)__kprobes_text_start &&
569	    addr < (unsigned long)__kprobes_text_end)
570		return -EINVAL;
571	/*
572	 * If there exists a kprobe_blacklist, verify and
573	 * fail any probe registration in the prohibited area
574	 */
575	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
576		if (kb->start_addr) {
577			if (addr >= kb->start_addr &&
578			    addr < (kb->start_addr + kb->range))
579				return -EINVAL;
580		}
581	}
582	return 0;
583}
584
585/*
586 * If we have a symbol_name argument, look it up and add the offset field
587 * to it. This way, we can specify a relative address to a symbol.
588 */
589static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
590{
591	kprobe_opcode_t *addr = p->addr;
592	if (p->symbol_name) {
593		if (addr)
594			return NULL;
595		kprobe_lookup_name(p->symbol_name, addr);
596	}
597
598	if (!addr)
599		return NULL;
600	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
601}
602
603static int __kprobes __register_kprobe(struct kprobe *p,
604	unsigned long called_from)
605{
606	int ret = 0;
607	struct kprobe *old_p;
608	struct module *probed_mod;
609	kprobe_opcode_t *addr;
610
611	addr = kprobe_addr(p);
612	if (!addr)
613		return -EINVAL;
614	p->addr = addr;
615
616	preempt_disable();
617	if (!__kernel_text_address((unsigned long) p->addr) ||
618	    in_kprobes_functions((unsigned long) p->addr)) {
619		preempt_enable();
620		return -EINVAL;
621	}
622
623	p->mod_refcounted = 0;
624
625	/*
626	 * Check if are we probing a module.
627	 */
628	probed_mod = __module_text_address((unsigned long) p->addr);
629	if (probed_mod) {
630		struct module *calling_mod;
631		calling_mod = __module_text_address(called_from);
632		/*
633		 * We must allow modules to probe themself and in this case
634		 * avoid incrementing the module refcount, so as to allow
635		 * unloading of self probing modules.
636		 */
637		if (calling_mod && calling_mod != probed_mod) {
638			if (unlikely(!try_module_get(probed_mod))) {
639				preempt_enable();
640				return -EINVAL;
641			}
642			p->mod_refcounted = 1;
643		} else
644			probed_mod = NULL;
645	}
646	preempt_enable();
647
648	p->nmissed = 0;
649	INIT_LIST_HEAD(&p->list);
650	mutex_lock(&kprobe_mutex);
651	old_p = get_kprobe(p->addr);
652	if (old_p) {
653		ret = register_aggr_kprobe(old_p, p);
654		goto out;
655	}
656
657	ret = arch_prepare_kprobe(p);
658	if (ret)
659		goto out;
660
661	INIT_HLIST_NODE(&p->hlist);
662	hlist_add_head_rcu(&p->hlist,
663		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
664
665	if (kprobe_enabled)
666		arch_arm_kprobe(p);
667
668out:
669	mutex_unlock(&kprobe_mutex);
670
671	if (ret && probed_mod)
672		module_put(probed_mod);
673	return ret;
674}
675
676/*
677 * Unregister a kprobe without a scheduler synchronization.
678 */
679static int __kprobes __unregister_kprobe_top(struct kprobe *p)
680{
681	struct kprobe *old_p, *list_p;
682
683	old_p = get_kprobe(p->addr);
684	if (unlikely(!old_p))
685		return -EINVAL;
686
687	if (p != old_p) {
688		list_for_each_entry_rcu(list_p, &old_p->list, list)
689			if (list_p == p)
690			/* kprobe p is a valid probe */
691				goto valid_p;
692		return -EINVAL;
693	}
694valid_p:
695	if (old_p == p ||
696	    (old_p->pre_handler == aggr_pre_handler &&
697	     list_is_singular(&old_p->list))) {
698		/*
699		 * Only probe on the hash list. Disarm only if kprobes are
700		 * enabled - otherwise, the breakpoint would already have
701		 * been removed. We save on flushing icache.
702		 */
703		if (kprobe_enabled)
704			arch_disarm_kprobe(p);
705		hlist_del_rcu(&old_p->hlist);
706	} else {
707		if (p->break_handler)
708			old_p->break_handler = NULL;
709		if (p->post_handler) {
710			list_for_each_entry_rcu(list_p, &old_p->list, list) {
711				if ((list_p != p) && (list_p->post_handler))
712					goto noclean;
713			}
714			old_p->post_handler = NULL;
715		}
716noclean:
717		list_del_rcu(&p->list);
718	}
719	return 0;
720}
721
722static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
723{
724	struct module *mod;
725	struct kprobe *old_p;
726
727	if (p->mod_refcounted) {
728		/*
729		 * Since we've already incremented refcount,
730		 * we don't need to disable preemption.
731		 */
732		mod = module_text_address((unsigned long)p->addr);
733		if (mod)
734			module_put(mod);
735	}
736
737	if (list_empty(&p->list) || list_is_singular(&p->list)) {
738		if (!list_empty(&p->list)) {
739			/* "p" is the last child of an aggr_kprobe */
740			old_p = list_entry(p->list.next, struct kprobe, list);
741			list_del(&p->list);
742			kfree(old_p);
743		}
744		arch_remove_kprobe(p);
745	}
746}
747
748static int __register_kprobes(struct kprobe **kps, int num,
749	unsigned long called_from)
750{
751	int i, ret = 0;
752
753	if (num <= 0)
754		return -EINVAL;
755	for (i = 0; i < num; i++) {
756		ret = __register_kprobe(kps[i], called_from);
757		if (ret < 0) {
758			if (i > 0)
759				unregister_kprobes(kps, i);
760			break;
761		}
762	}
763	return ret;
764}
765
766/*
767 * Registration and unregistration functions for kprobe.
768 */
769int __kprobes register_kprobe(struct kprobe *p)
770{
771	return __register_kprobes(&p, 1,
772				  (unsigned long)__builtin_return_address(0));
773}
774
775void __kprobes unregister_kprobe(struct kprobe *p)
776{
777	unregister_kprobes(&p, 1);
778}
779
780int __kprobes register_kprobes(struct kprobe **kps, int num)
781{
782	return __register_kprobes(kps, num,
783				  (unsigned long)__builtin_return_address(0));
784}
785
786void __kprobes unregister_kprobes(struct kprobe **kps, int num)
787{
788	int i;
789
790	if (num <= 0)
791		return;
792	mutex_lock(&kprobe_mutex);
793	for (i = 0; i < num; i++)
794		if (__unregister_kprobe_top(kps[i]) < 0)
795			kps[i]->addr = NULL;
796	mutex_unlock(&kprobe_mutex);
797
798	synchronize_sched();
799	for (i = 0; i < num; i++)
800		if (kps[i]->addr)
801			__unregister_kprobe_bottom(kps[i]);
802}
803
804static struct notifier_block kprobe_exceptions_nb = {
805	.notifier_call = kprobe_exceptions_notify,
806	.priority = 0x7fffffff /* we need to be notified first */
807};
808
809unsigned long __weak arch_deref_entry_point(void *entry)
810{
811	return (unsigned long)entry;
812}
813
814static int __register_jprobes(struct jprobe **jps, int num,
815	unsigned long called_from)
816{
817	struct jprobe *jp;
818	int ret = 0, i;
819
820	if (num <= 0)
821		return -EINVAL;
822	for (i = 0; i < num; i++) {
823		unsigned long addr;
824		jp = jps[i];
825		addr = arch_deref_entry_point(jp->entry);
826
827		if (!kernel_text_address(addr))
828			ret = -EINVAL;
829		else {
830			/* Todo: Verify probepoint is a function entry point */
831			jp->kp.pre_handler = setjmp_pre_handler;
832			jp->kp.break_handler = longjmp_break_handler;
833			ret = __register_kprobe(&jp->kp, called_from);
834		}
835		if (ret < 0) {
836			if (i > 0)
837				unregister_jprobes(jps, i);
838			break;
839		}
840	}
841	return ret;
842}
843
844int __kprobes register_jprobe(struct jprobe *jp)
845{
846	return __register_jprobes(&jp, 1,
847		(unsigned long)__builtin_return_address(0));
848}
849
850void __kprobes unregister_jprobe(struct jprobe *jp)
851{
852	unregister_jprobes(&jp, 1);
853}
854
855int __kprobes register_jprobes(struct jprobe **jps, int num)
856{
857	return __register_jprobes(jps, num,
858		(unsigned long)__builtin_return_address(0));
859}
860
861void __kprobes unregister_jprobes(struct jprobe **jps, int num)
862{
863	int i;
864
865	if (num <= 0)
866		return;
867	mutex_lock(&kprobe_mutex);
868	for (i = 0; i < num; i++)
869		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
870			jps[i]->kp.addr = NULL;
871	mutex_unlock(&kprobe_mutex);
872
873	synchronize_sched();
874	for (i = 0; i < num; i++) {
875		if (jps[i]->kp.addr)
876			__unregister_kprobe_bottom(&jps[i]->kp);
877	}
878}
879
880#ifdef CONFIG_KRETPROBES
881/*
882 * This kprobe pre_handler is registered with every kretprobe. When probe
883 * hits it will set up the return probe.
884 */
885static int __kprobes pre_handler_kretprobe(struct kprobe *p,
886					   struct pt_regs *regs)
887{
888	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
889	unsigned long hash, flags = 0;
890	struct kretprobe_instance *ri;
891
892	/*TODO: consider to only swap the RA after the last pre_handler fired */
893	hash = hash_ptr(current, KPROBE_HASH_BITS);
894	spin_lock_irqsave(&rp->lock, flags);
895	if (!hlist_empty(&rp->free_instances)) {
896		ri = hlist_entry(rp->free_instances.first,
897				struct kretprobe_instance, hlist);
898		hlist_del(&ri->hlist);
899		spin_unlock_irqrestore(&rp->lock, flags);
900
901		ri->rp = rp;
902		ri->task = current;
903
904		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
905			spin_unlock_irqrestore(&rp->lock, flags);
906			return 0;
907		}
908
909		arch_prepare_kretprobe(ri, regs);
910
911		/* XXX(hch): why is there no hlist_move_head? */
912		INIT_HLIST_NODE(&ri->hlist);
913		kretprobe_table_lock(hash, &flags);
914		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
915		kretprobe_table_unlock(hash, &flags);
916	} else {
917		rp->nmissed++;
918		spin_unlock_irqrestore(&rp->lock, flags);
919	}
920	return 0;
921}
922
923static int __kprobes __register_kretprobe(struct kretprobe *rp,
924					  unsigned long called_from)
925{
926	int ret = 0;
927	struct kretprobe_instance *inst;
928	int i;
929	void *addr;
930
931	if (kretprobe_blacklist_size) {
932		addr = kprobe_addr(&rp->kp);
933		if (!addr)
934			return -EINVAL;
935
936		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
937			if (kretprobe_blacklist[i].addr == addr)
938				return -EINVAL;
939		}
940	}
941
942	rp->kp.pre_handler = pre_handler_kretprobe;
943	rp->kp.post_handler = NULL;
944	rp->kp.fault_handler = NULL;
945	rp->kp.break_handler = NULL;
946
947	/* Pre-allocate memory for max kretprobe instances */
948	if (rp->maxactive <= 0) {
949#ifdef CONFIG_PREEMPT
950		rp->maxactive = max(10, 2 * NR_CPUS);
951#else
952		rp->maxactive = NR_CPUS;
953#endif
954	}
955	spin_lock_init(&rp->lock);
956	INIT_HLIST_HEAD(&rp->free_instances);
957	for (i = 0; i < rp->maxactive; i++) {
958		inst = kmalloc(sizeof(struct kretprobe_instance) +
959			       rp->data_size, GFP_KERNEL);
960		if (inst == NULL) {
961			free_rp_inst(rp);
962			return -ENOMEM;
963		}
964		INIT_HLIST_NODE(&inst->hlist);
965		hlist_add_head(&inst->hlist, &rp->free_instances);
966	}
967
968	rp->nmissed = 0;
969	/* Establish function entry probe point */
970	ret = __register_kprobe(&rp->kp, called_from);
971	if (ret != 0)
972		free_rp_inst(rp);
973	return ret;
974}
975
976static int __register_kretprobes(struct kretprobe **rps, int num,
977	unsigned long called_from)
978{
979	int ret = 0, i;
980
981	if (num <= 0)
982		return -EINVAL;
983	for (i = 0; i < num; i++) {
984		ret = __register_kretprobe(rps[i], called_from);
985		if (ret < 0) {
986			if (i > 0)
987				unregister_kretprobes(rps, i);
988			break;
989		}
990	}
991	return ret;
992}
993
994int __kprobes register_kretprobe(struct kretprobe *rp)
995{
996	return __register_kretprobes(&rp, 1,
997			(unsigned long)__builtin_return_address(0));
998}
999
1000void __kprobes unregister_kretprobe(struct kretprobe *rp)
1001{
1002	unregister_kretprobes(&rp, 1);
1003}
1004
1005int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1006{
1007	return __register_kretprobes(rps, num,
1008			(unsigned long)__builtin_return_address(0));
1009}
1010
1011void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1012{
1013	int i;
1014
1015	if (num <= 0)
1016		return;
1017	mutex_lock(&kprobe_mutex);
1018	for (i = 0; i < num; i++)
1019		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1020			rps[i]->kp.addr = NULL;
1021	mutex_unlock(&kprobe_mutex);
1022
1023	synchronize_sched();
1024	for (i = 0; i < num; i++) {
1025		if (rps[i]->kp.addr) {
1026			__unregister_kprobe_bottom(&rps[i]->kp);
1027			cleanup_rp_inst(rps[i]);
1028		}
1029	}
1030}
1031
1032#else /* CONFIG_KRETPROBES */
1033int __kprobes register_kretprobe(struct kretprobe *rp)
1034{
1035	return -ENOSYS;
1036}
1037
1038int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1039{
1040	return -ENOSYS;
1041}
1042void __kprobes unregister_kretprobe(struct kretprobe *rp)
1043{
1044}
1045
1046void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1047{
1048}
1049
1050static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1051					   struct pt_regs *regs)
1052{
1053	return 0;
1054}
1055
1056#endif /* CONFIG_KRETPROBES */
1057
1058static int __init init_kprobes(void)
1059{
1060	int i, err = 0;
1061	unsigned long offset = 0, size = 0;
1062	char *modname, namebuf[128];
1063	const char *symbol_name;
1064	void *addr;
1065	struct kprobe_blackpoint *kb;
1066
1067	/* FIXME allocate the probe table, currently defined statically */
1068	/* initialize all list heads */
1069	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1070		INIT_HLIST_HEAD(&kprobe_table[i]);
1071		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1072		spin_lock_init(&(kretprobe_table_locks[i].lock));
1073	}
1074
1075	/*
1076	 * Lookup and populate the kprobe_blacklist.
1077	 *
1078	 * Unlike the kretprobe blacklist, we'll need to determine
1079	 * the range of addresses that belong to the said functions,
1080	 * since a kprobe need not necessarily be at the beginning
1081	 * of a function.
1082	 */
1083	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1084		kprobe_lookup_name(kb->name, addr);
1085		if (!addr)
1086			continue;
1087
1088		kb->start_addr = (unsigned long)addr;
1089		symbol_name = kallsyms_lookup(kb->start_addr,
1090				&size, &offset, &modname, namebuf);
1091		if (!symbol_name)
1092			kb->range = 0;
1093		else
1094			kb->range = size;
1095	}
1096
1097	if (kretprobe_blacklist_size) {
1098		/* lookup the function address from its name */
1099		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1100			kprobe_lookup_name(kretprobe_blacklist[i].name,
1101					   kretprobe_blacklist[i].addr);
1102			if (!kretprobe_blacklist[i].addr)
1103				printk("kretprobe: lookup failed: %s\n",
1104				       kretprobe_blacklist[i].name);
1105		}
1106	}
1107
1108	/* By default, kprobes are enabled */
1109	kprobe_enabled = true;
1110
1111	err = arch_init_kprobes();
1112	if (!err)
1113		err = register_die_notifier(&kprobe_exceptions_nb);
1114	kprobes_initialized = (err == 0);
1115
1116	if (!err)
1117		init_test_probes();
1118	return err;
1119}
1120
1121#ifdef CONFIG_DEBUG_FS
1122static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1123		const char *sym, int offset,char *modname)
1124{
1125	char *kprobe_type;
1126
1127	if (p->pre_handler == pre_handler_kretprobe)
1128		kprobe_type = "r";
1129	else if (p->pre_handler == setjmp_pre_handler)
1130		kprobe_type = "j";
1131	else
1132		kprobe_type = "k";
1133	if (sym)
1134		seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
1135			sym, offset, (modname ? modname : " "));
1136	else
1137		seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
1138}
1139
1140static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1141{
1142	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1143}
1144
1145static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1146{
1147	(*pos)++;
1148	if (*pos >= KPROBE_TABLE_SIZE)
1149		return NULL;
1150	return pos;
1151}
1152
1153static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1154{
1155	/* Nothing to do */
1156}
1157
1158static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1159{
1160	struct hlist_head *head;
1161	struct hlist_node *node;
1162	struct kprobe *p, *kp;
1163	const char *sym = NULL;
1164	unsigned int i = *(loff_t *) v;
1165	unsigned long offset = 0;
1166	char *modname, namebuf[128];
1167
1168	head = &kprobe_table[i];
1169	preempt_disable();
1170	hlist_for_each_entry_rcu(p, node, head, hlist) {
1171		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1172					&offset, &modname, namebuf);
1173		if (p->pre_handler == aggr_pre_handler) {
1174			list_for_each_entry_rcu(kp, &p->list, list)
1175				report_probe(pi, kp, sym, offset, modname);
1176		} else
1177			report_probe(pi, p, sym, offset, modname);
1178	}
1179	preempt_enable();
1180	return 0;
1181}
1182
1183static struct seq_operations kprobes_seq_ops = {
1184	.start = kprobe_seq_start,
1185	.next  = kprobe_seq_next,
1186	.stop  = kprobe_seq_stop,
1187	.show  = show_kprobe_addr
1188};
1189
1190static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1191{
1192	return seq_open(filp, &kprobes_seq_ops);
1193}
1194
1195static struct file_operations debugfs_kprobes_operations = {
1196	.open           = kprobes_open,
1197	.read           = seq_read,
1198	.llseek         = seq_lseek,
1199	.release        = seq_release,
1200};
1201
1202static void __kprobes enable_all_kprobes(void)
1203{
1204	struct hlist_head *head;
1205	struct hlist_node *node;
1206	struct kprobe *p;
1207	unsigned int i;
1208
1209	mutex_lock(&kprobe_mutex);
1210
1211	/* If kprobes are already enabled, just return */
1212	if (kprobe_enabled)
1213		goto already_enabled;
1214
1215	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1216		head = &kprobe_table[i];
1217		hlist_for_each_entry_rcu(p, node, head, hlist)
1218			arch_arm_kprobe(p);
1219	}
1220
1221	kprobe_enabled = true;
1222	printk(KERN_INFO "Kprobes globally enabled\n");
1223
1224already_enabled:
1225	mutex_unlock(&kprobe_mutex);
1226	return;
1227}
1228
1229static void __kprobes disable_all_kprobes(void)
1230{
1231	struct hlist_head *head;
1232	struct hlist_node *node;
1233	struct kprobe *p;
1234	unsigned int i;
1235
1236	mutex_lock(&kprobe_mutex);
1237
1238	/* If kprobes are already disabled, just return */
1239	if (!kprobe_enabled)
1240		goto already_disabled;
1241
1242	kprobe_enabled = false;
1243	printk(KERN_INFO "Kprobes globally disabled\n");
1244	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1245		head = &kprobe_table[i];
1246		hlist_for_each_entry_rcu(p, node, head, hlist) {
1247			if (!arch_trampoline_kprobe(p))
1248				arch_disarm_kprobe(p);
1249		}
1250	}
1251
1252	mutex_unlock(&kprobe_mutex);
1253	/* Allow all currently running kprobes to complete */
1254	synchronize_sched();
1255	return;
1256
1257already_disabled:
1258	mutex_unlock(&kprobe_mutex);
1259	return;
1260}
1261
1262/*
1263 * XXX: The debugfs bool file interface doesn't allow for callbacks
1264 * when the bool state is switched. We can reuse that facility when
1265 * available
1266 */
1267static ssize_t read_enabled_file_bool(struct file *file,
1268	       char __user *user_buf, size_t count, loff_t *ppos)
1269{
1270	char buf[3];
1271
1272	if (kprobe_enabled)
1273		buf[0] = '1';
1274	else
1275		buf[0] = '0';
1276	buf[1] = '\n';
1277	buf[2] = 0x00;
1278	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1279}
1280
1281static ssize_t write_enabled_file_bool(struct file *file,
1282	       const char __user *user_buf, size_t count, loff_t *ppos)
1283{
1284	char buf[32];
1285	int buf_size;
1286
1287	buf_size = min(count, (sizeof(buf)-1));
1288	if (copy_from_user(buf, user_buf, buf_size))
1289		return -EFAULT;
1290
1291	switch (buf[0]) {
1292	case 'y':
1293	case 'Y':
1294	case '1':
1295		enable_all_kprobes();
1296		break;
1297	case 'n':
1298	case 'N':
1299	case '0':
1300		disable_all_kprobes();
1301		break;
1302	}
1303
1304	return count;
1305}
1306
1307static struct file_operations fops_kp = {
1308	.read =         read_enabled_file_bool,
1309	.write =        write_enabled_file_bool,
1310};
1311
1312static int __kprobes debugfs_kprobe_init(void)
1313{
1314	struct dentry *dir, *file;
1315	unsigned int value = 1;
1316
1317	dir = debugfs_create_dir("kprobes", NULL);
1318	if (!dir)
1319		return -ENOMEM;
1320
1321	file = debugfs_create_file("list", 0444, dir, NULL,
1322				&debugfs_kprobes_operations);
1323	if (!file) {
1324		debugfs_remove(dir);
1325		return -ENOMEM;
1326	}
1327
1328	file = debugfs_create_file("enabled", 0600, dir,
1329					&value, &fops_kp);
1330	if (!file) {
1331		debugfs_remove(dir);
1332		return -ENOMEM;
1333	}
1334
1335	return 0;
1336}
1337
1338late_initcall(debugfs_kprobe_init);
1339#endif /* CONFIG_DEBUG_FS */
1340
1341module_init(init_kprobes);
1342
1343EXPORT_SYMBOL_GPL(register_kprobe);
1344EXPORT_SYMBOL_GPL(unregister_kprobe);
1345EXPORT_SYMBOL_GPL(register_kprobes);
1346EXPORT_SYMBOL_GPL(unregister_kprobes);
1347EXPORT_SYMBOL_GPL(register_jprobe);
1348EXPORT_SYMBOL_GPL(unregister_jprobe);
1349EXPORT_SYMBOL_GPL(register_jprobes);
1350EXPORT_SYMBOL_GPL(unregister_jprobes);
1351EXPORT_SYMBOL_GPL(jprobe_return);
1352EXPORT_SYMBOL_GPL(register_kretprobe);
1353EXPORT_SYMBOL_GPL(unregister_kretprobe);
1354EXPORT_SYMBOL_GPL(register_kretprobes);
1355EXPORT_SYMBOL_GPL(unregister_kretprobes);
1356