kprobes.c revision b4c6c34a530b4d1c626f4ac0a884e0a9b849378c
1/* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34#include <linux/kprobes.h> 35#include <linux/hash.h> 36#include <linux/init.h> 37#include <linux/slab.h> 38#include <linux/module.h> 39#include <linux/moduleloader.h> 40#include <linux/kallsyms.h> 41#include <linux/freezer.h> 42#include <asm-generic/sections.h> 43#include <asm/cacheflush.h> 44#include <asm/errno.h> 45#include <asm/kdebug.h> 46 47#define KPROBE_HASH_BITS 6 48#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 49 50 51/* 52 * Some oddball architectures like 64bit powerpc have function descriptors 53 * so this must be overridable. 54 */ 55#ifndef kprobe_lookup_name 56#define kprobe_lookup_name(name, addr) \ 57 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 58#endif 59 60static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 61static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 62static atomic_t kprobe_count; 63 64DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 65DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 66static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 67 68static struct notifier_block kprobe_page_fault_nb = { 69 .notifier_call = kprobe_exceptions_notify, 70 .priority = 0x7fffffff /* we need to notified first */ 71}; 72 73#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 74/* 75 * kprobe->ainsn.insn points to the copy of the instruction to be 76 * single-stepped. x86_64, POWER4 and above have no-exec support and 77 * stepping on the instruction on a vmalloced/kmalloced/data page 78 * is a recipe for disaster 79 */ 80#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 81 82struct kprobe_insn_page { 83 struct hlist_node hlist; 84 kprobe_opcode_t *insns; /* Page of instruction slots */ 85 char slot_used[INSNS_PER_PAGE]; 86 int nused; 87 int ngarbage; 88}; 89 90static struct hlist_head kprobe_insn_pages; 91static int kprobe_garbage_slots; 92static int collect_garbage_slots(void); 93 94static int __kprobes check_safety(void) 95{ 96 int ret = 0; 97#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) 98 ret = freeze_processes(); 99 if (ret == 0) { 100 struct task_struct *p, *q; 101 do_each_thread(p, q) { 102 if (p != current && p->state == TASK_RUNNING && 103 p->pid != 0) { 104 printk("Check failed: %s is running\n",p->comm); 105 ret = -1; 106 goto loop_end; 107 } 108 } while_each_thread(p, q); 109 } 110loop_end: 111 thaw_processes(); 112#else 113 synchronize_sched(); 114#endif 115 return ret; 116} 117 118/** 119 * get_insn_slot() - Find a slot on an executable page for an instruction. 120 * We allocate an executable page if there's no room on existing ones. 121 */ 122kprobe_opcode_t __kprobes *get_insn_slot(void) 123{ 124 struct kprobe_insn_page *kip; 125 struct hlist_node *pos; 126 127 retry: 128 hlist_for_each(pos, &kprobe_insn_pages) { 129 kip = hlist_entry(pos, struct kprobe_insn_page, hlist); 130 if (kip->nused < INSNS_PER_PAGE) { 131 int i; 132 for (i = 0; i < INSNS_PER_PAGE; i++) { 133 if (!kip->slot_used[i]) { 134 kip->slot_used[i] = 1; 135 kip->nused++; 136 return kip->insns + (i * MAX_INSN_SIZE); 137 } 138 } 139 /* Surprise! No unused slots. Fix kip->nused. */ 140 kip->nused = INSNS_PER_PAGE; 141 } 142 } 143 144 /* If there are any garbage slots, collect it and try again. */ 145 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 146 goto retry; 147 } 148 /* All out of space. Need to allocate a new page. Use slot 0. */ 149 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 150 if (!kip) { 151 return NULL; 152 } 153 154 /* 155 * Use module_alloc so this page is within +/- 2GB of where the 156 * kernel image and loaded module images reside. This is required 157 * so x86_64 can correctly handle the %rip-relative fixups. 158 */ 159 kip->insns = module_alloc(PAGE_SIZE); 160 if (!kip->insns) { 161 kfree(kip); 162 return NULL; 163 } 164 INIT_HLIST_NODE(&kip->hlist); 165 hlist_add_head(&kip->hlist, &kprobe_insn_pages); 166 memset(kip->slot_used, 0, INSNS_PER_PAGE); 167 kip->slot_used[0] = 1; 168 kip->nused = 1; 169 kip->ngarbage = 0; 170 return kip->insns; 171} 172 173/* Return 1 if all garbages are collected, otherwise 0. */ 174static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 175{ 176 kip->slot_used[idx] = 0; 177 kip->nused--; 178 if (kip->nused == 0) { 179 /* 180 * Page is no longer in use. Free it unless 181 * it's the last one. We keep the last one 182 * so as not to have to set it up again the 183 * next time somebody inserts a probe. 184 */ 185 hlist_del(&kip->hlist); 186 if (hlist_empty(&kprobe_insn_pages)) { 187 INIT_HLIST_NODE(&kip->hlist); 188 hlist_add_head(&kip->hlist, 189 &kprobe_insn_pages); 190 } else { 191 module_free(NULL, kip->insns); 192 kfree(kip); 193 } 194 return 1; 195 } 196 return 0; 197} 198 199static int __kprobes collect_garbage_slots(void) 200{ 201 struct kprobe_insn_page *kip; 202 struct hlist_node *pos, *next; 203 204 /* Ensure no-one is preepmted on the garbages */ 205 if (check_safety() != 0) 206 return -EAGAIN; 207 208 hlist_for_each_safe(pos, next, &kprobe_insn_pages) { 209 int i; 210 kip = hlist_entry(pos, struct kprobe_insn_page, hlist); 211 if (kip->ngarbage == 0) 212 continue; 213 kip->ngarbage = 0; /* we will collect all garbages */ 214 for (i = 0; i < INSNS_PER_PAGE; i++) { 215 if (kip->slot_used[i] == -1 && 216 collect_one_slot(kip, i)) 217 break; 218 } 219 } 220 kprobe_garbage_slots = 0; 221 return 0; 222} 223 224void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 225{ 226 struct kprobe_insn_page *kip; 227 struct hlist_node *pos; 228 229 hlist_for_each(pos, &kprobe_insn_pages) { 230 kip = hlist_entry(pos, struct kprobe_insn_page, hlist); 231 if (kip->insns <= slot && 232 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 233 int i = (slot - kip->insns) / MAX_INSN_SIZE; 234 if (dirty) { 235 kip->slot_used[i] = -1; 236 kip->ngarbage++; 237 } else { 238 collect_one_slot(kip, i); 239 } 240 break; 241 } 242 } 243 if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) { 244 collect_garbage_slots(); 245 } 246} 247#endif 248 249/* We have preemption disabled.. so it is safe to use __ versions */ 250static inline void set_kprobe_instance(struct kprobe *kp) 251{ 252 __get_cpu_var(kprobe_instance) = kp; 253} 254 255static inline void reset_kprobe_instance(void) 256{ 257 __get_cpu_var(kprobe_instance) = NULL; 258} 259 260/* 261 * This routine is called either: 262 * - under the kprobe_mutex - during kprobe_[un]register() 263 * OR 264 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 265 */ 266struct kprobe __kprobes *get_kprobe(void *addr) 267{ 268 struct hlist_head *head; 269 struct hlist_node *node; 270 struct kprobe *p; 271 272 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 273 hlist_for_each_entry_rcu(p, node, head, hlist) { 274 if (p->addr == addr) 275 return p; 276 } 277 return NULL; 278} 279 280/* 281 * Aggregate handlers for multiple kprobes support - these handlers 282 * take care of invoking the individual kprobe handlers on p->list 283 */ 284static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 285{ 286 struct kprobe *kp; 287 288 list_for_each_entry_rcu(kp, &p->list, list) { 289 if (kp->pre_handler) { 290 set_kprobe_instance(kp); 291 if (kp->pre_handler(kp, regs)) 292 return 1; 293 } 294 reset_kprobe_instance(); 295 } 296 return 0; 297} 298 299static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 300 unsigned long flags) 301{ 302 struct kprobe *kp; 303 304 list_for_each_entry_rcu(kp, &p->list, list) { 305 if (kp->post_handler) { 306 set_kprobe_instance(kp); 307 kp->post_handler(kp, regs, flags); 308 reset_kprobe_instance(); 309 } 310 } 311 return; 312} 313 314static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 315 int trapnr) 316{ 317 struct kprobe *cur = __get_cpu_var(kprobe_instance); 318 319 /* 320 * if we faulted "during" the execution of a user specified 321 * probe handler, invoke just that probe's fault handler 322 */ 323 if (cur && cur->fault_handler) { 324 if (cur->fault_handler(cur, regs, trapnr)) 325 return 1; 326 } 327 return 0; 328} 329 330static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 331{ 332 struct kprobe *cur = __get_cpu_var(kprobe_instance); 333 int ret = 0; 334 335 if (cur && cur->break_handler) { 336 if (cur->break_handler(cur, regs)) 337 ret = 1; 338 } 339 reset_kprobe_instance(); 340 return ret; 341} 342 343/* Walks the list and increments nmissed count for multiprobe case */ 344void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 345{ 346 struct kprobe *kp; 347 if (p->pre_handler != aggr_pre_handler) { 348 p->nmissed++; 349 } else { 350 list_for_each_entry_rcu(kp, &p->list, list) 351 kp->nmissed++; 352 } 353 return; 354} 355 356/* Called with kretprobe_lock held */ 357struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) 358{ 359 struct hlist_node *node; 360 struct kretprobe_instance *ri; 361 hlist_for_each_entry(ri, node, &rp->free_instances, uflist) 362 return ri; 363 return NULL; 364} 365 366/* Called with kretprobe_lock held */ 367static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe 368 *rp) 369{ 370 struct hlist_node *node; 371 struct kretprobe_instance *ri; 372 hlist_for_each_entry(ri, node, &rp->used_instances, uflist) 373 return ri; 374 return NULL; 375} 376 377/* Called with kretprobe_lock held */ 378void __kprobes add_rp_inst(struct kretprobe_instance *ri) 379{ 380 /* 381 * Remove rp inst off the free list - 382 * Add it back when probed function returns 383 */ 384 hlist_del(&ri->uflist); 385 386 /* Add rp inst onto table */ 387 INIT_HLIST_NODE(&ri->hlist); 388 hlist_add_head(&ri->hlist, 389 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); 390 391 /* Also add this rp inst to the used list. */ 392 INIT_HLIST_NODE(&ri->uflist); 393 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 394} 395 396/* Called with kretprobe_lock held */ 397void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 398 struct hlist_head *head) 399{ 400 /* remove rp inst off the rprobe_inst_table */ 401 hlist_del(&ri->hlist); 402 if (ri->rp) { 403 /* remove rp inst off the used list */ 404 hlist_del(&ri->uflist); 405 /* put rp inst back onto the free list */ 406 INIT_HLIST_NODE(&ri->uflist); 407 hlist_add_head(&ri->uflist, &ri->rp->free_instances); 408 } else 409 /* Unregistering */ 410 hlist_add_head(&ri->hlist, head); 411} 412 413struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) 414{ 415 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 416} 417 418/* 419 * This function is called from finish_task_switch when task tk becomes dead, 420 * so that we can recycle any function-return probe instances associated 421 * with this task. These left over instances represent probed functions 422 * that have been called but will never return. 423 */ 424void __kprobes kprobe_flush_task(struct task_struct *tk) 425{ 426 struct kretprobe_instance *ri; 427 struct hlist_head *head, empty_rp; 428 struct hlist_node *node, *tmp; 429 unsigned long flags = 0; 430 431 INIT_HLIST_HEAD(&empty_rp); 432 spin_lock_irqsave(&kretprobe_lock, flags); 433 head = kretprobe_inst_table_head(tk); 434 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 435 if (ri->task == tk) 436 recycle_rp_inst(ri, &empty_rp); 437 } 438 spin_unlock_irqrestore(&kretprobe_lock, flags); 439 440 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 441 hlist_del(&ri->hlist); 442 kfree(ri); 443 } 444} 445 446static inline void free_rp_inst(struct kretprobe *rp) 447{ 448 struct kretprobe_instance *ri; 449 while ((ri = get_free_rp_inst(rp)) != NULL) { 450 hlist_del(&ri->uflist); 451 kfree(ri); 452 } 453} 454 455/* 456 * Keep all fields in the kprobe consistent 457 */ 458static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) 459{ 460 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); 461 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); 462} 463 464/* 465* Add the new probe to old_p->list. Fail if this is the 466* second jprobe at the address - two jprobes can't coexist 467*/ 468static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 469{ 470 if (p->break_handler) { 471 if (old_p->break_handler) 472 return -EEXIST; 473 list_add_tail_rcu(&p->list, &old_p->list); 474 old_p->break_handler = aggr_break_handler; 475 } else 476 list_add_rcu(&p->list, &old_p->list); 477 if (p->post_handler && !old_p->post_handler) 478 old_p->post_handler = aggr_post_handler; 479 return 0; 480} 481 482/* 483 * Fill in the required fields of the "manager kprobe". Replace the 484 * earlier kprobe in the hlist with the manager kprobe 485 */ 486static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 487{ 488 copy_kprobe(p, ap); 489 flush_insn_slot(ap); 490 ap->addr = p->addr; 491 ap->pre_handler = aggr_pre_handler; 492 ap->fault_handler = aggr_fault_handler; 493 if (p->post_handler) 494 ap->post_handler = aggr_post_handler; 495 if (p->break_handler) 496 ap->break_handler = aggr_break_handler; 497 498 INIT_LIST_HEAD(&ap->list); 499 list_add_rcu(&p->list, &ap->list); 500 501 hlist_replace_rcu(&p->hlist, &ap->hlist); 502} 503 504/* 505 * This is the second or subsequent kprobe at the address - handle 506 * the intricacies 507 */ 508static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 509 struct kprobe *p) 510{ 511 int ret = 0; 512 struct kprobe *ap; 513 514 if (old_p->pre_handler == aggr_pre_handler) { 515 copy_kprobe(old_p, p); 516 ret = add_new_kprobe(old_p, p); 517 } else { 518 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 519 if (!ap) 520 return -ENOMEM; 521 add_aggr_kprobe(ap, old_p); 522 copy_kprobe(ap, p); 523 ret = add_new_kprobe(ap, p); 524 } 525 return ret; 526} 527 528static int __kprobes in_kprobes_functions(unsigned long addr) 529{ 530 if (addr >= (unsigned long)__kprobes_text_start 531 && addr < (unsigned long)__kprobes_text_end) 532 return -EINVAL; 533 return 0; 534} 535 536static int __kprobes __register_kprobe(struct kprobe *p, 537 unsigned long called_from) 538{ 539 int ret = 0; 540 struct kprobe *old_p; 541 struct module *probed_mod; 542 543 /* 544 * If we have a symbol_name argument look it up, 545 * and add it to the address. That way the addr 546 * field can either be global or relative to a symbol. 547 */ 548 if (p->symbol_name) { 549 if (p->addr) 550 return -EINVAL; 551 kprobe_lookup_name(p->symbol_name, p->addr); 552 } 553 554 if (!p->addr) 555 return -EINVAL; 556 p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset); 557 558 if ((!kernel_text_address((unsigned long) p->addr)) || 559 in_kprobes_functions((unsigned long) p->addr)) 560 return -EINVAL; 561 562 p->mod_refcounted = 0; 563 /* Check are we probing a module */ 564 if ((probed_mod = module_text_address((unsigned long) p->addr))) { 565 struct module *calling_mod = module_text_address(called_from); 566 /* We must allow modules to probe themself and 567 * in this case avoid incrementing the module refcount, 568 * so as to allow unloading of self probing modules. 569 */ 570 if (calling_mod && (calling_mod != probed_mod)) { 571 if (unlikely(!try_module_get(probed_mod))) 572 return -EINVAL; 573 p->mod_refcounted = 1; 574 } else 575 probed_mod = NULL; 576 } 577 578 p->nmissed = 0; 579 mutex_lock(&kprobe_mutex); 580 old_p = get_kprobe(p->addr); 581 if (old_p) { 582 ret = register_aggr_kprobe(old_p, p); 583 if (!ret) 584 atomic_inc(&kprobe_count); 585 goto out; 586 } 587 588 if ((ret = arch_prepare_kprobe(p)) != 0) 589 goto out; 590 591 INIT_HLIST_NODE(&p->hlist); 592 hlist_add_head_rcu(&p->hlist, 593 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 594 595 if (atomic_add_return(1, &kprobe_count) == \ 596 (ARCH_INACTIVE_KPROBE_COUNT + 1)) 597 register_page_fault_notifier(&kprobe_page_fault_nb); 598 599 arch_arm_kprobe(p); 600 601out: 602 mutex_unlock(&kprobe_mutex); 603 604 if (ret && probed_mod) 605 module_put(probed_mod); 606 return ret; 607} 608 609int __kprobes register_kprobe(struct kprobe *p) 610{ 611 return __register_kprobe(p, 612 (unsigned long)__builtin_return_address(0)); 613} 614 615void __kprobes unregister_kprobe(struct kprobe *p) 616{ 617 struct module *mod; 618 struct kprobe *old_p, *list_p; 619 int cleanup_p; 620 621 mutex_lock(&kprobe_mutex); 622 old_p = get_kprobe(p->addr); 623 if (unlikely(!old_p)) { 624 mutex_unlock(&kprobe_mutex); 625 return; 626 } 627 if (p != old_p) { 628 list_for_each_entry_rcu(list_p, &old_p->list, list) 629 if (list_p == p) 630 /* kprobe p is a valid probe */ 631 goto valid_p; 632 mutex_unlock(&kprobe_mutex); 633 return; 634 } 635valid_p: 636 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && 637 (p->list.next == &old_p->list) && 638 (p->list.prev == &old_p->list))) { 639 /* Only probe on the hash list */ 640 arch_disarm_kprobe(p); 641 hlist_del_rcu(&old_p->hlist); 642 cleanup_p = 1; 643 } else { 644 list_del_rcu(&p->list); 645 cleanup_p = 0; 646 } 647 648 mutex_unlock(&kprobe_mutex); 649 650 synchronize_sched(); 651 if (p->mod_refcounted && 652 (mod = module_text_address((unsigned long)p->addr))) 653 module_put(mod); 654 655 if (cleanup_p) { 656 if (p != old_p) { 657 list_del_rcu(&p->list); 658 kfree(old_p); 659 } 660 arch_remove_kprobe(p); 661 } else { 662 mutex_lock(&kprobe_mutex); 663 if (p->break_handler) 664 old_p->break_handler = NULL; 665 if (p->post_handler){ 666 list_for_each_entry_rcu(list_p, &old_p->list, list){ 667 if (list_p->post_handler){ 668 cleanup_p = 2; 669 break; 670 } 671 } 672 if (cleanup_p == 0) 673 old_p->post_handler = NULL; 674 } 675 mutex_unlock(&kprobe_mutex); 676 } 677 678 /* Call unregister_page_fault_notifier() 679 * if no probes are active 680 */ 681 mutex_lock(&kprobe_mutex); 682 if (atomic_add_return(-1, &kprobe_count) == \ 683 ARCH_INACTIVE_KPROBE_COUNT) 684 unregister_page_fault_notifier(&kprobe_page_fault_nb); 685 mutex_unlock(&kprobe_mutex); 686 return; 687} 688 689static struct notifier_block kprobe_exceptions_nb = { 690 .notifier_call = kprobe_exceptions_notify, 691 .priority = 0x7fffffff /* we need to be notified first */ 692}; 693 694 695int __kprobes register_jprobe(struct jprobe *jp) 696{ 697 /* Todo: Verify probepoint is a function entry point */ 698 jp->kp.pre_handler = setjmp_pre_handler; 699 jp->kp.break_handler = longjmp_break_handler; 700 701 return __register_kprobe(&jp->kp, 702 (unsigned long)__builtin_return_address(0)); 703} 704 705void __kprobes unregister_jprobe(struct jprobe *jp) 706{ 707 unregister_kprobe(&jp->kp); 708} 709 710#ifdef ARCH_SUPPORTS_KRETPROBES 711 712/* 713 * This kprobe pre_handler is registered with every kretprobe. When probe 714 * hits it will set up the return probe. 715 */ 716static int __kprobes pre_handler_kretprobe(struct kprobe *p, 717 struct pt_regs *regs) 718{ 719 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 720 unsigned long flags = 0; 721 722 /*TODO: consider to only swap the RA after the last pre_handler fired */ 723 spin_lock_irqsave(&kretprobe_lock, flags); 724 arch_prepare_kretprobe(rp, regs); 725 spin_unlock_irqrestore(&kretprobe_lock, flags); 726 return 0; 727} 728 729int __kprobes register_kretprobe(struct kretprobe *rp) 730{ 731 int ret = 0; 732 struct kretprobe_instance *inst; 733 int i; 734 735 rp->kp.pre_handler = pre_handler_kretprobe; 736 rp->kp.post_handler = NULL; 737 rp->kp.fault_handler = NULL; 738 rp->kp.break_handler = NULL; 739 740 /* Pre-allocate memory for max kretprobe instances */ 741 if (rp->maxactive <= 0) { 742#ifdef CONFIG_PREEMPT 743 rp->maxactive = max(10, 2 * NR_CPUS); 744#else 745 rp->maxactive = NR_CPUS; 746#endif 747 } 748 INIT_HLIST_HEAD(&rp->used_instances); 749 INIT_HLIST_HEAD(&rp->free_instances); 750 for (i = 0; i < rp->maxactive; i++) { 751 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); 752 if (inst == NULL) { 753 free_rp_inst(rp); 754 return -ENOMEM; 755 } 756 INIT_HLIST_NODE(&inst->uflist); 757 hlist_add_head(&inst->uflist, &rp->free_instances); 758 } 759 760 rp->nmissed = 0; 761 /* Establish function entry probe point */ 762 if ((ret = __register_kprobe(&rp->kp, 763 (unsigned long)__builtin_return_address(0))) != 0) 764 free_rp_inst(rp); 765 return ret; 766} 767 768#else /* ARCH_SUPPORTS_KRETPROBES */ 769 770int __kprobes register_kretprobe(struct kretprobe *rp) 771{ 772 return -ENOSYS; 773} 774 775#endif /* ARCH_SUPPORTS_KRETPROBES */ 776 777void __kprobes unregister_kretprobe(struct kretprobe *rp) 778{ 779 unsigned long flags; 780 struct kretprobe_instance *ri; 781 782 unregister_kprobe(&rp->kp); 783 /* No race here */ 784 spin_lock_irqsave(&kretprobe_lock, flags); 785 while ((ri = get_used_rp_inst(rp)) != NULL) { 786 ri->rp = NULL; 787 hlist_del(&ri->uflist); 788 } 789 spin_unlock_irqrestore(&kretprobe_lock, flags); 790 free_rp_inst(rp); 791} 792 793static int __init init_kprobes(void) 794{ 795 int i, err = 0; 796 797 /* FIXME allocate the probe table, currently defined statically */ 798 /* initialize all list heads */ 799 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 800 INIT_HLIST_HEAD(&kprobe_table[i]); 801 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 802 } 803 atomic_set(&kprobe_count, 0); 804 805 err = arch_init_kprobes(); 806 if (!err) 807 err = register_die_notifier(&kprobe_exceptions_nb); 808 809 return err; 810} 811 812__initcall(init_kprobes); 813 814EXPORT_SYMBOL_GPL(register_kprobe); 815EXPORT_SYMBOL_GPL(unregister_kprobe); 816EXPORT_SYMBOL_GPL(register_jprobe); 817EXPORT_SYMBOL_GPL(unregister_jprobe); 818EXPORT_SYMBOL_GPL(jprobe_return); 819EXPORT_SYMBOL_GPL(register_kretprobe); 820EXPORT_SYMBOL_GPL(unregister_kretprobe); 821 822