kprobes.c revision e579abeb58eb4b8d7321c6eb44dd9e2d0cbaebaa
1/* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34#include <linux/kprobes.h> 35#include <linux/hash.h> 36#include <linux/init.h> 37#include <linux/slab.h> 38#include <linux/stddef.h> 39#include <linux/module.h> 40#include <linux/moduleloader.h> 41#include <linux/kallsyms.h> 42#include <linux/freezer.h> 43#include <linux/seq_file.h> 44#include <linux/debugfs.h> 45#include <linux/kdebug.h> 46#include <linux/memory.h> 47 48#include <asm-generic/sections.h> 49#include <asm/cacheflush.h> 50#include <asm/errno.h> 51#include <asm/uaccess.h> 52 53#define KPROBE_HASH_BITS 6 54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 55 56 57/* 58 * Some oddball architectures like 64bit powerpc have function descriptors 59 * so this must be overridable. 60 */ 61#ifndef kprobe_lookup_name 62#define kprobe_lookup_name(name, addr) \ 63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 64#endif 65 66static int kprobes_initialized; 67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 69 70/* NOTE: change this value only with kprobe_mutex held */ 71static bool kprobes_all_disarmed; 72 73static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 75static struct { 76 spinlock_t lock ____cacheline_aligned_in_smp; 77} kretprobe_table_locks[KPROBE_TABLE_SIZE]; 78 79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 80{ 81 return &(kretprobe_table_locks[hash].lock); 82} 83 84/* 85 * Normally, functions that we'd want to prohibit kprobes in, are marked 86 * __kprobes. But, there are cases where such functions already belong to 87 * a different section (__sched for preempt_schedule) 88 * 89 * For such cases, we now have a blacklist 90 */ 91static struct kprobe_blackpoint kprobe_blacklist[] = { 92 {"preempt_schedule",}, 93 {NULL} /* Terminator */ 94}; 95 96#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 97/* 98 * kprobe->ainsn.insn points to the copy of the instruction to be 99 * single-stepped. x86_64, POWER4 and above have no-exec support and 100 * stepping on the instruction on a vmalloced/kmalloced/data page 101 * is a recipe for disaster 102 */ 103#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 104 105struct kprobe_insn_page { 106 struct hlist_node hlist; 107 kprobe_opcode_t *insns; /* Page of instruction slots */ 108 char slot_used[INSNS_PER_PAGE]; 109 int nused; 110 int ngarbage; 111}; 112 113enum kprobe_slot_state { 114 SLOT_CLEAN = 0, 115 SLOT_DIRTY = 1, 116 SLOT_USED = 2, 117}; 118 119static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 120static struct hlist_head kprobe_insn_pages; 121static int kprobe_garbage_slots; 122static int collect_garbage_slots(void); 123 124static int __kprobes check_safety(void) 125{ 126 int ret = 0; 127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) 128 ret = freeze_processes(); 129 if (ret == 0) { 130 struct task_struct *p, *q; 131 do_each_thread(p, q) { 132 if (p != current && p->state == TASK_RUNNING && 133 p->pid != 0) { 134 printk("Check failed: %s is running\n",p->comm); 135 ret = -1; 136 goto loop_end; 137 } 138 } while_each_thread(p, q); 139 } 140loop_end: 141 thaw_processes(); 142#else 143 synchronize_sched(); 144#endif 145 return ret; 146} 147 148/** 149 * __get_insn_slot() - Find a slot on an executable page for an instruction. 150 * We allocate an executable page if there's no room on existing ones. 151 */ 152static kprobe_opcode_t __kprobes *__get_insn_slot(void) 153{ 154 struct kprobe_insn_page *kip; 155 struct hlist_node *pos; 156 157 retry: 158 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { 159 if (kip->nused < INSNS_PER_PAGE) { 160 int i; 161 for (i = 0; i < INSNS_PER_PAGE; i++) { 162 if (kip->slot_used[i] == SLOT_CLEAN) { 163 kip->slot_used[i] = SLOT_USED; 164 kip->nused++; 165 return kip->insns + (i * MAX_INSN_SIZE); 166 } 167 } 168 /* Surprise! No unused slots. Fix kip->nused. */ 169 kip->nused = INSNS_PER_PAGE; 170 } 171 } 172 173 /* If there are any garbage slots, collect it and try again. */ 174 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 175 goto retry; 176 } 177 /* All out of space. Need to allocate a new page. Use slot 0. */ 178 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 179 if (!kip) 180 return NULL; 181 182 /* 183 * Use module_alloc so this page is within +/- 2GB of where the 184 * kernel image and loaded module images reside. This is required 185 * so x86_64 can correctly handle the %rip-relative fixups. 186 */ 187 kip->insns = module_alloc(PAGE_SIZE); 188 if (!kip->insns) { 189 kfree(kip); 190 return NULL; 191 } 192 INIT_HLIST_NODE(&kip->hlist); 193 hlist_add_head(&kip->hlist, &kprobe_insn_pages); 194 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); 195 kip->slot_used[0] = SLOT_USED; 196 kip->nused = 1; 197 kip->ngarbage = 0; 198 return kip->insns; 199} 200 201kprobe_opcode_t __kprobes *get_insn_slot(void) 202{ 203 kprobe_opcode_t *ret; 204 mutex_lock(&kprobe_insn_mutex); 205 ret = __get_insn_slot(); 206 mutex_unlock(&kprobe_insn_mutex); 207 return ret; 208} 209 210/* Return 1 if all garbages are collected, otherwise 0. */ 211static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 212{ 213 kip->slot_used[idx] = SLOT_CLEAN; 214 kip->nused--; 215 if (kip->nused == 0) { 216 /* 217 * Page is no longer in use. Free it unless 218 * it's the last one. We keep the last one 219 * so as not to have to set it up again the 220 * next time somebody inserts a probe. 221 */ 222 hlist_del(&kip->hlist); 223 if (hlist_empty(&kprobe_insn_pages)) { 224 INIT_HLIST_NODE(&kip->hlist); 225 hlist_add_head(&kip->hlist, 226 &kprobe_insn_pages); 227 } else { 228 module_free(NULL, kip->insns); 229 kfree(kip); 230 } 231 return 1; 232 } 233 return 0; 234} 235 236static int __kprobes collect_garbage_slots(void) 237{ 238 struct kprobe_insn_page *kip; 239 struct hlist_node *pos, *next; 240 int safety; 241 242 /* Ensure no-one is preepmted on the garbages */ 243 mutex_unlock(&kprobe_insn_mutex); 244 safety = check_safety(); 245 mutex_lock(&kprobe_insn_mutex); 246 if (safety != 0) 247 return -EAGAIN; 248 249 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { 250 int i; 251 if (kip->ngarbage == 0) 252 continue; 253 kip->ngarbage = 0; /* we will collect all garbages */ 254 for (i = 0; i < INSNS_PER_PAGE; i++) { 255 if (kip->slot_used[i] == SLOT_DIRTY && 256 collect_one_slot(kip, i)) 257 break; 258 } 259 } 260 kprobe_garbage_slots = 0; 261 return 0; 262} 263 264void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 265{ 266 struct kprobe_insn_page *kip; 267 struct hlist_node *pos; 268 269 mutex_lock(&kprobe_insn_mutex); 270 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { 271 if (kip->insns <= slot && 272 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 273 int i = (slot - kip->insns) / MAX_INSN_SIZE; 274 if (dirty) { 275 kip->slot_used[i] = SLOT_DIRTY; 276 kip->ngarbage++; 277 } else { 278 collect_one_slot(kip, i); 279 } 280 break; 281 } 282 } 283 284 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) 285 collect_garbage_slots(); 286 287 mutex_unlock(&kprobe_insn_mutex); 288} 289#endif 290 291/* We have preemption disabled.. so it is safe to use __ versions */ 292static inline void set_kprobe_instance(struct kprobe *kp) 293{ 294 __get_cpu_var(kprobe_instance) = kp; 295} 296 297static inline void reset_kprobe_instance(void) 298{ 299 __get_cpu_var(kprobe_instance) = NULL; 300} 301 302/* 303 * This routine is called either: 304 * - under the kprobe_mutex - during kprobe_[un]register() 305 * OR 306 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 307 */ 308struct kprobe __kprobes *get_kprobe(void *addr) 309{ 310 struct hlist_head *head; 311 struct hlist_node *node; 312 struct kprobe *p; 313 314 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 315 hlist_for_each_entry_rcu(p, node, head, hlist) { 316 if (p->addr == addr) 317 return p; 318 } 319 return NULL; 320} 321 322/* 323 * Aggregate handlers for multiple kprobes support - these handlers 324 * take care of invoking the individual kprobe handlers on p->list 325 */ 326static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 327{ 328 struct kprobe *kp; 329 330 list_for_each_entry_rcu(kp, &p->list, list) { 331 if (kp->pre_handler && !kprobe_gone(kp)) { 332 set_kprobe_instance(kp); 333 if (kp->pre_handler(kp, regs)) 334 return 1; 335 } 336 reset_kprobe_instance(); 337 } 338 return 0; 339} 340 341static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 342 unsigned long flags) 343{ 344 struct kprobe *kp; 345 346 list_for_each_entry_rcu(kp, &p->list, list) { 347 if (kp->post_handler && !kprobe_gone(kp)) { 348 set_kprobe_instance(kp); 349 kp->post_handler(kp, regs, flags); 350 reset_kprobe_instance(); 351 } 352 } 353} 354 355static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 356 int trapnr) 357{ 358 struct kprobe *cur = __get_cpu_var(kprobe_instance); 359 360 /* 361 * if we faulted "during" the execution of a user specified 362 * probe handler, invoke just that probe's fault handler 363 */ 364 if (cur && cur->fault_handler) { 365 if (cur->fault_handler(cur, regs, trapnr)) 366 return 1; 367 } 368 return 0; 369} 370 371static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 372{ 373 struct kprobe *cur = __get_cpu_var(kprobe_instance); 374 int ret = 0; 375 376 if (cur && cur->break_handler) { 377 if (cur->break_handler(cur, regs)) 378 ret = 1; 379 } 380 reset_kprobe_instance(); 381 return ret; 382} 383 384/* Walks the list and increments nmissed count for multiprobe case */ 385void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 386{ 387 struct kprobe *kp; 388 if (p->pre_handler != aggr_pre_handler) { 389 p->nmissed++; 390 } else { 391 list_for_each_entry_rcu(kp, &p->list, list) 392 kp->nmissed++; 393 } 394 return; 395} 396 397void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 398 struct hlist_head *head) 399{ 400 struct kretprobe *rp = ri->rp; 401 402 /* remove rp inst off the rprobe_inst_table */ 403 hlist_del(&ri->hlist); 404 INIT_HLIST_NODE(&ri->hlist); 405 if (likely(rp)) { 406 spin_lock(&rp->lock); 407 hlist_add_head(&ri->hlist, &rp->free_instances); 408 spin_unlock(&rp->lock); 409 } else 410 /* Unregistering */ 411 hlist_add_head(&ri->hlist, head); 412} 413 414void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 415 struct hlist_head **head, unsigned long *flags) 416{ 417 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 418 spinlock_t *hlist_lock; 419 420 *head = &kretprobe_inst_table[hash]; 421 hlist_lock = kretprobe_table_lock_ptr(hash); 422 spin_lock_irqsave(hlist_lock, *flags); 423} 424 425static void __kprobes kretprobe_table_lock(unsigned long hash, 426 unsigned long *flags) 427{ 428 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 429 spin_lock_irqsave(hlist_lock, *flags); 430} 431 432void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 433 unsigned long *flags) 434{ 435 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 436 spinlock_t *hlist_lock; 437 438 hlist_lock = kretprobe_table_lock_ptr(hash); 439 spin_unlock_irqrestore(hlist_lock, *flags); 440} 441 442void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) 443{ 444 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 445 spin_unlock_irqrestore(hlist_lock, *flags); 446} 447 448/* 449 * This function is called from finish_task_switch when task tk becomes dead, 450 * so that we can recycle any function-return probe instances associated 451 * with this task. These left over instances represent probed functions 452 * that have been called but will never return. 453 */ 454void __kprobes kprobe_flush_task(struct task_struct *tk) 455{ 456 struct kretprobe_instance *ri; 457 struct hlist_head *head, empty_rp; 458 struct hlist_node *node, *tmp; 459 unsigned long hash, flags = 0; 460 461 if (unlikely(!kprobes_initialized)) 462 /* Early boot. kretprobe_table_locks not yet initialized. */ 463 return; 464 465 hash = hash_ptr(tk, KPROBE_HASH_BITS); 466 head = &kretprobe_inst_table[hash]; 467 kretprobe_table_lock(hash, &flags); 468 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 469 if (ri->task == tk) 470 recycle_rp_inst(ri, &empty_rp); 471 } 472 kretprobe_table_unlock(hash, &flags); 473 INIT_HLIST_HEAD(&empty_rp); 474 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 475 hlist_del(&ri->hlist); 476 kfree(ri); 477 } 478} 479 480static inline void free_rp_inst(struct kretprobe *rp) 481{ 482 struct kretprobe_instance *ri; 483 struct hlist_node *pos, *next; 484 485 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 486 hlist_del(&ri->hlist); 487 kfree(ri); 488 } 489} 490 491static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 492{ 493 unsigned long flags, hash; 494 struct kretprobe_instance *ri; 495 struct hlist_node *pos, *next; 496 struct hlist_head *head; 497 498 /* No race here */ 499 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 500 kretprobe_table_lock(hash, &flags); 501 head = &kretprobe_inst_table[hash]; 502 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 503 if (ri->rp == rp) 504 ri->rp = NULL; 505 } 506 kretprobe_table_unlock(hash, &flags); 507 } 508 free_rp_inst(rp); 509} 510 511/* 512 * Keep all fields in the kprobe consistent 513 */ 514static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) 515{ 516 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); 517 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); 518} 519 520/* 521* Add the new probe to ap->list. Fail if this is the 522* second jprobe at the address - two jprobes can't coexist 523*/ 524static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 525{ 526 if (p->break_handler) { 527 if (ap->break_handler) 528 return -EEXIST; 529 list_add_tail_rcu(&p->list, &ap->list); 530 ap->break_handler = aggr_break_handler; 531 } else 532 list_add_rcu(&p->list, &ap->list); 533 if (p->post_handler && !ap->post_handler) 534 ap->post_handler = aggr_post_handler; 535 return 0; 536} 537 538/* 539 * Fill in the required fields of the "manager kprobe". Replace the 540 * earlier kprobe in the hlist with the manager kprobe 541 */ 542static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 543{ 544 copy_kprobe(p, ap); 545 flush_insn_slot(ap); 546 ap->addr = p->addr; 547 ap->flags = p->flags; 548 ap->pre_handler = aggr_pre_handler; 549 ap->fault_handler = aggr_fault_handler; 550 /* We don't care the kprobe which has gone. */ 551 if (p->post_handler && !kprobe_gone(p)) 552 ap->post_handler = aggr_post_handler; 553 if (p->break_handler && !kprobe_gone(p)) 554 ap->break_handler = aggr_break_handler; 555 556 INIT_LIST_HEAD(&ap->list); 557 list_add_rcu(&p->list, &ap->list); 558 559 hlist_replace_rcu(&p->hlist, &ap->hlist); 560} 561 562/* 563 * This is the second or subsequent kprobe at the address - handle 564 * the intricacies 565 */ 566static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 567 struct kprobe *p) 568{ 569 int ret = 0; 570 struct kprobe *ap = old_p; 571 572 if (old_p->pre_handler != aggr_pre_handler) { 573 /* If old_p is not an aggr_probe, create new aggr_kprobe. */ 574 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 575 if (!ap) 576 return -ENOMEM; 577 add_aggr_kprobe(ap, old_p); 578 } 579 580 if (kprobe_gone(ap)) { 581 /* 582 * Attempting to insert new probe at the same location that 583 * had a probe in the module vaddr area which already 584 * freed. So, the instruction slot has already been 585 * released. We need a new slot for the new probe. 586 */ 587 ret = arch_prepare_kprobe(ap); 588 if (ret) 589 /* 590 * Even if fail to allocate new slot, don't need to 591 * free aggr_probe. It will be used next time, or 592 * freed by unregister_kprobe. 593 */ 594 return ret; 595 /* Clear gone flag to prevent allocating new slot again. */ 596 ap->flags &= ~KPROBE_FLAG_GONE; 597 /* 598 * If the old_p has gone, its breakpoint has been disarmed. 599 * We have to arm it again after preparing real kprobes. 600 */ 601 if (!kprobes_all_disarmed) 602 arch_arm_kprobe(ap); 603 } 604 605 copy_kprobe(ap, p); 606 return add_new_kprobe(ap, p); 607} 608 609static int __kprobes in_kprobes_functions(unsigned long addr) 610{ 611 struct kprobe_blackpoint *kb; 612 613 if (addr >= (unsigned long)__kprobes_text_start && 614 addr < (unsigned long)__kprobes_text_end) 615 return -EINVAL; 616 /* 617 * If there exists a kprobe_blacklist, verify and 618 * fail any probe registration in the prohibited area 619 */ 620 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 621 if (kb->start_addr) { 622 if (addr >= kb->start_addr && 623 addr < (kb->start_addr + kb->range)) 624 return -EINVAL; 625 } 626 } 627 return 0; 628} 629 630/* 631 * If we have a symbol_name argument, look it up and add the offset field 632 * to it. This way, we can specify a relative address to a symbol. 633 */ 634static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 635{ 636 kprobe_opcode_t *addr = p->addr; 637 if (p->symbol_name) { 638 if (addr) 639 return NULL; 640 kprobe_lookup_name(p->symbol_name, addr); 641 } 642 643 if (!addr) 644 return NULL; 645 return (kprobe_opcode_t *)(((char *)addr) + p->offset); 646} 647 648int __kprobes register_kprobe(struct kprobe *p) 649{ 650 int ret = 0; 651 struct kprobe *old_p; 652 struct module *probed_mod; 653 kprobe_opcode_t *addr; 654 655 addr = kprobe_addr(p); 656 if (!addr) 657 return -EINVAL; 658 p->addr = addr; 659 660 preempt_disable(); 661 if (!__kernel_text_address((unsigned long) p->addr) || 662 in_kprobes_functions((unsigned long) p->addr)) { 663 preempt_enable(); 664 return -EINVAL; 665 } 666 667 p->flags = 0; 668 /* 669 * Check if are we probing a module. 670 */ 671 probed_mod = __module_text_address((unsigned long) p->addr); 672 if (probed_mod) { 673 /* 674 * We must hold a refcount of the probed module while updating 675 * its code to prohibit unexpected unloading. 676 */ 677 if (unlikely(!try_module_get(probed_mod))) { 678 preempt_enable(); 679 return -EINVAL; 680 } 681 /* 682 * If the module freed .init.text, we couldn't insert 683 * kprobes in there. 684 */ 685 if (within_module_init((unsigned long)p->addr, probed_mod) && 686 probed_mod->state != MODULE_STATE_COMING) { 687 module_put(probed_mod); 688 preempt_enable(); 689 return -EINVAL; 690 } 691 } 692 preempt_enable(); 693 694 p->nmissed = 0; 695 INIT_LIST_HEAD(&p->list); 696 mutex_lock(&kprobe_mutex); 697 old_p = get_kprobe(p->addr); 698 if (old_p) { 699 ret = register_aggr_kprobe(old_p, p); 700 goto out; 701 } 702 703 mutex_lock(&text_mutex); 704 ret = arch_prepare_kprobe(p); 705 if (ret) 706 goto out_unlock_text; 707 708 INIT_HLIST_NODE(&p->hlist); 709 hlist_add_head_rcu(&p->hlist, 710 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 711 712 if (!kprobes_all_disarmed) 713 arch_arm_kprobe(p); 714 715out_unlock_text: 716 mutex_unlock(&text_mutex); 717out: 718 mutex_unlock(&kprobe_mutex); 719 720 if (probed_mod) 721 module_put(probed_mod); 722 723 return ret; 724} 725EXPORT_SYMBOL_GPL(register_kprobe); 726 727/* 728 * Unregister a kprobe without a scheduler synchronization. 729 */ 730static int __kprobes __unregister_kprobe_top(struct kprobe *p) 731{ 732 struct kprobe *old_p, *list_p; 733 734 old_p = get_kprobe(p->addr); 735 if (unlikely(!old_p)) 736 return -EINVAL; 737 738 if (p != old_p) { 739 list_for_each_entry_rcu(list_p, &old_p->list, list) 740 if (list_p == p) 741 /* kprobe p is a valid probe */ 742 goto valid_p; 743 return -EINVAL; 744 } 745valid_p: 746 if (old_p == p || 747 (old_p->pre_handler == aggr_pre_handler && 748 list_is_singular(&old_p->list))) { 749 /* 750 * Only probe on the hash list. Disarm only if kprobes are 751 * enabled and not gone - otherwise, the breakpoint would 752 * already have been removed. We save on flushing icache. 753 */ 754 if (!kprobes_all_disarmed && !kprobe_gone(old_p)) { 755 mutex_lock(&text_mutex); 756 arch_disarm_kprobe(p); 757 mutex_unlock(&text_mutex); 758 } 759 hlist_del_rcu(&old_p->hlist); 760 } else { 761 if (p->break_handler && !kprobe_gone(p)) 762 old_p->break_handler = NULL; 763 if (p->post_handler && !kprobe_gone(p)) { 764 list_for_each_entry_rcu(list_p, &old_p->list, list) { 765 if ((list_p != p) && (list_p->post_handler)) 766 goto noclean; 767 } 768 old_p->post_handler = NULL; 769 } 770noclean: 771 list_del_rcu(&p->list); 772 } 773 return 0; 774} 775 776static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 777{ 778 struct kprobe *old_p; 779 780 if (list_empty(&p->list)) 781 arch_remove_kprobe(p); 782 else if (list_is_singular(&p->list)) { 783 /* "p" is the last child of an aggr_kprobe */ 784 old_p = list_entry(p->list.next, struct kprobe, list); 785 list_del(&p->list); 786 arch_remove_kprobe(old_p); 787 kfree(old_p); 788 } 789} 790 791int __kprobes register_kprobes(struct kprobe **kps, int num) 792{ 793 int i, ret = 0; 794 795 if (num <= 0) 796 return -EINVAL; 797 for (i = 0; i < num; i++) { 798 ret = register_kprobe(kps[i]); 799 if (ret < 0) { 800 if (i > 0) 801 unregister_kprobes(kps, i); 802 break; 803 } 804 } 805 return ret; 806} 807EXPORT_SYMBOL_GPL(register_kprobes); 808 809void __kprobes unregister_kprobe(struct kprobe *p) 810{ 811 unregister_kprobes(&p, 1); 812} 813EXPORT_SYMBOL_GPL(unregister_kprobe); 814 815void __kprobes unregister_kprobes(struct kprobe **kps, int num) 816{ 817 int i; 818 819 if (num <= 0) 820 return; 821 mutex_lock(&kprobe_mutex); 822 for (i = 0; i < num; i++) 823 if (__unregister_kprobe_top(kps[i]) < 0) 824 kps[i]->addr = NULL; 825 mutex_unlock(&kprobe_mutex); 826 827 synchronize_sched(); 828 for (i = 0; i < num; i++) 829 if (kps[i]->addr) 830 __unregister_kprobe_bottom(kps[i]); 831} 832EXPORT_SYMBOL_GPL(unregister_kprobes); 833 834static struct notifier_block kprobe_exceptions_nb = { 835 .notifier_call = kprobe_exceptions_notify, 836 .priority = 0x7fffffff /* we need to be notified first */ 837}; 838 839unsigned long __weak arch_deref_entry_point(void *entry) 840{ 841 return (unsigned long)entry; 842} 843 844int __kprobes register_jprobes(struct jprobe **jps, int num) 845{ 846 struct jprobe *jp; 847 int ret = 0, i; 848 849 if (num <= 0) 850 return -EINVAL; 851 for (i = 0; i < num; i++) { 852 unsigned long addr; 853 jp = jps[i]; 854 addr = arch_deref_entry_point(jp->entry); 855 856 if (!kernel_text_address(addr)) 857 ret = -EINVAL; 858 else { 859 /* Todo: Verify probepoint is a function entry point */ 860 jp->kp.pre_handler = setjmp_pre_handler; 861 jp->kp.break_handler = longjmp_break_handler; 862 ret = register_kprobe(&jp->kp); 863 } 864 if (ret < 0) { 865 if (i > 0) 866 unregister_jprobes(jps, i); 867 break; 868 } 869 } 870 return ret; 871} 872EXPORT_SYMBOL_GPL(register_jprobes); 873 874int __kprobes register_jprobe(struct jprobe *jp) 875{ 876 return register_jprobes(&jp, 1); 877} 878EXPORT_SYMBOL_GPL(register_jprobe); 879 880void __kprobes unregister_jprobe(struct jprobe *jp) 881{ 882 unregister_jprobes(&jp, 1); 883} 884EXPORT_SYMBOL_GPL(unregister_jprobe); 885 886void __kprobes unregister_jprobes(struct jprobe **jps, int num) 887{ 888 int i; 889 890 if (num <= 0) 891 return; 892 mutex_lock(&kprobe_mutex); 893 for (i = 0; i < num; i++) 894 if (__unregister_kprobe_top(&jps[i]->kp) < 0) 895 jps[i]->kp.addr = NULL; 896 mutex_unlock(&kprobe_mutex); 897 898 synchronize_sched(); 899 for (i = 0; i < num; i++) { 900 if (jps[i]->kp.addr) 901 __unregister_kprobe_bottom(&jps[i]->kp); 902 } 903} 904EXPORT_SYMBOL_GPL(unregister_jprobes); 905 906#ifdef CONFIG_KRETPROBES 907/* 908 * This kprobe pre_handler is registered with every kretprobe. When probe 909 * hits it will set up the return probe. 910 */ 911static int __kprobes pre_handler_kretprobe(struct kprobe *p, 912 struct pt_regs *regs) 913{ 914 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 915 unsigned long hash, flags = 0; 916 struct kretprobe_instance *ri; 917 918 /*TODO: consider to only swap the RA after the last pre_handler fired */ 919 hash = hash_ptr(current, KPROBE_HASH_BITS); 920 spin_lock_irqsave(&rp->lock, flags); 921 if (!hlist_empty(&rp->free_instances)) { 922 ri = hlist_entry(rp->free_instances.first, 923 struct kretprobe_instance, hlist); 924 hlist_del(&ri->hlist); 925 spin_unlock_irqrestore(&rp->lock, flags); 926 927 ri->rp = rp; 928 ri->task = current; 929 930 if (rp->entry_handler && rp->entry_handler(ri, regs)) 931 return 0; 932 933 arch_prepare_kretprobe(ri, regs); 934 935 /* XXX(hch): why is there no hlist_move_head? */ 936 INIT_HLIST_NODE(&ri->hlist); 937 kretprobe_table_lock(hash, &flags); 938 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 939 kretprobe_table_unlock(hash, &flags); 940 } else { 941 rp->nmissed++; 942 spin_unlock_irqrestore(&rp->lock, flags); 943 } 944 return 0; 945} 946 947int __kprobes register_kretprobe(struct kretprobe *rp) 948{ 949 int ret = 0; 950 struct kretprobe_instance *inst; 951 int i; 952 void *addr; 953 954 if (kretprobe_blacklist_size) { 955 addr = kprobe_addr(&rp->kp); 956 if (!addr) 957 return -EINVAL; 958 959 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 960 if (kretprobe_blacklist[i].addr == addr) 961 return -EINVAL; 962 } 963 } 964 965 rp->kp.pre_handler = pre_handler_kretprobe; 966 rp->kp.post_handler = NULL; 967 rp->kp.fault_handler = NULL; 968 rp->kp.break_handler = NULL; 969 970 /* Pre-allocate memory for max kretprobe instances */ 971 if (rp->maxactive <= 0) { 972#ifdef CONFIG_PREEMPT 973 rp->maxactive = max(10, 2 * NR_CPUS); 974#else 975 rp->maxactive = NR_CPUS; 976#endif 977 } 978 spin_lock_init(&rp->lock); 979 INIT_HLIST_HEAD(&rp->free_instances); 980 for (i = 0; i < rp->maxactive; i++) { 981 inst = kmalloc(sizeof(struct kretprobe_instance) + 982 rp->data_size, GFP_KERNEL); 983 if (inst == NULL) { 984 free_rp_inst(rp); 985 return -ENOMEM; 986 } 987 INIT_HLIST_NODE(&inst->hlist); 988 hlist_add_head(&inst->hlist, &rp->free_instances); 989 } 990 991 rp->nmissed = 0; 992 /* Establish function entry probe point */ 993 ret = register_kprobe(&rp->kp); 994 if (ret != 0) 995 free_rp_inst(rp); 996 return ret; 997} 998EXPORT_SYMBOL_GPL(register_kretprobe); 999 1000int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1001{ 1002 int ret = 0, i; 1003 1004 if (num <= 0) 1005 return -EINVAL; 1006 for (i = 0; i < num; i++) { 1007 ret = register_kretprobe(rps[i]); 1008 if (ret < 0) { 1009 if (i > 0) 1010 unregister_kretprobes(rps, i); 1011 break; 1012 } 1013 } 1014 return ret; 1015} 1016EXPORT_SYMBOL_GPL(register_kretprobes); 1017 1018void __kprobes unregister_kretprobe(struct kretprobe *rp) 1019{ 1020 unregister_kretprobes(&rp, 1); 1021} 1022EXPORT_SYMBOL_GPL(unregister_kretprobe); 1023 1024void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1025{ 1026 int i; 1027 1028 if (num <= 0) 1029 return; 1030 mutex_lock(&kprobe_mutex); 1031 for (i = 0; i < num; i++) 1032 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1033 rps[i]->kp.addr = NULL; 1034 mutex_unlock(&kprobe_mutex); 1035 1036 synchronize_sched(); 1037 for (i = 0; i < num; i++) { 1038 if (rps[i]->kp.addr) { 1039 __unregister_kprobe_bottom(&rps[i]->kp); 1040 cleanup_rp_inst(rps[i]); 1041 } 1042 } 1043} 1044EXPORT_SYMBOL_GPL(unregister_kretprobes); 1045 1046#else /* CONFIG_KRETPROBES */ 1047int __kprobes register_kretprobe(struct kretprobe *rp) 1048{ 1049 return -ENOSYS; 1050} 1051EXPORT_SYMBOL_GPL(register_kretprobe); 1052 1053int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1054{ 1055 return -ENOSYS; 1056} 1057EXPORT_SYMBOL_GPL(register_kretprobes); 1058 1059void __kprobes unregister_kretprobe(struct kretprobe *rp) 1060{ 1061} 1062EXPORT_SYMBOL_GPL(unregister_kretprobe); 1063 1064void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1065{ 1066} 1067EXPORT_SYMBOL_GPL(unregister_kretprobes); 1068 1069static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1070 struct pt_regs *regs) 1071{ 1072 return 0; 1073} 1074 1075#endif /* CONFIG_KRETPROBES */ 1076 1077/* Set the kprobe gone and remove its instruction buffer. */ 1078static void __kprobes kill_kprobe(struct kprobe *p) 1079{ 1080 struct kprobe *kp; 1081 p->flags |= KPROBE_FLAG_GONE; 1082 if (p->pre_handler == aggr_pre_handler) { 1083 /* 1084 * If this is an aggr_kprobe, we have to list all the 1085 * chained probes and mark them GONE. 1086 */ 1087 list_for_each_entry_rcu(kp, &p->list, list) 1088 kp->flags |= KPROBE_FLAG_GONE; 1089 p->post_handler = NULL; 1090 p->break_handler = NULL; 1091 } 1092 /* 1093 * Here, we can remove insn_slot safely, because no thread calls 1094 * the original probed function (which will be freed soon) any more. 1095 */ 1096 arch_remove_kprobe(p); 1097} 1098 1099/* Module notifier call back, checking kprobes on the module */ 1100static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1101 unsigned long val, void *data) 1102{ 1103 struct module *mod = data; 1104 struct hlist_head *head; 1105 struct hlist_node *node; 1106 struct kprobe *p; 1107 unsigned int i; 1108 int checkcore = (val == MODULE_STATE_GOING); 1109 1110 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 1111 return NOTIFY_DONE; 1112 1113 /* 1114 * When MODULE_STATE_GOING was notified, both of module .text and 1115 * .init.text sections would be freed. When MODULE_STATE_LIVE was 1116 * notified, only .init.text section would be freed. We need to 1117 * disable kprobes which have been inserted in the sections. 1118 */ 1119 mutex_lock(&kprobe_mutex); 1120 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1121 head = &kprobe_table[i]; 1122 hlist_for_each_entry_rcu(p, node, head, hlist) 1123 if (within_module_init((unsigned long)p->addr, mod) || 1124 (checkcore && 1125 within_module_core((unsigned long)p->addr, mod))) { 1126 /* 1127 * The vaddr this probe is installed will soon 1128 * be vfreed buy not synced to disk. Hence, 1129 * disarming the breakpoint isn't needed. 1130 */ 1131 kill_kprobe(p); 1132 } 1133 } 1134 mutex_unlock(&kprobe_mutex); 1135 return NOTIFY_DONE; 1136} 1137 1138static struct notifier_block kprobe_module_nb = { 1139 .notifier_call = kprobes_module_callback, 1140 .priority = 0 1141}; 1142 1143static int __init init_kprobes(void) 1144{ 1145 int i, err = 0; 1146 unsigned long offset = 0, size = 0; 1147 char *modname, namebuf[128]; 1148 const char *symbol_name; 1149 void *addr; 1150 struct kprobe_blackpoint *kb; 1151 1152 /* FIXME allocate the probe table, currently defined statically */ 1153 /* initialize all list heads */ 1154 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1155 INIT_HLIST_HEAD(&kprobe_table[i]); 1156 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1157 spin_lock_init(&(kretprobe_table_locks[i].lock)); 1158 } 1159 1160 /* 1161 * Lookup and populate the kprobe_blacklist. 1162 * 1163 * Unlike the kretprobe blacklist, we'll need to determine 1164 * the range of addresses that belong to the said functions, 1165 * since a kprobe need not necessarily be at the beginning 1166 * of a function. 1167 */ 1168 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1169 kprobe_lookup_name(kb->name, addr); 1170 if (!addr) 1171 continue; 1172 1173 kb->start_addr = (unsigned long)addr; 1174 symbol_name = kallsyms_lookup(kb->start_addr, 1175 &size, &offset, &modname, namebuf); 1176 if (!symbol_name) 1177 kb->range = 0; 1178 else 1179 kb->range = size; 1180 } 1181 1182 if (kretprobe_blacklist_size) { 1183 /* lookup the function address from its name */ 1184 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1185 kprobe_lookup_name(kretprobe_blacklist[i].name, 1186 kretprobe_blacklist[i].addr); 1187 if (!kretprobe_blacklist[i].addr) 1188 printk("kretprobe: lookup failed: %s\n", 1189 kretprobe_blacklist[i].name); 1190 } 1191 } 1192 1193 /* By default, kprobes are armed */ 1194 kprobes_all_disarmed = false; 1195 1196 err = arch_init_kprobes(); 1197 if (!err) 1198 err = register_die_notifier(&kprobe_exceptions_nb); 1199 if (!err) 1200 err = register_module_notifier(&kprobe_module_nb); 1201 1202 kprobes_initialized = (err == 0); 1203 1204 if (!err) 1205 init_test_probes(); 1206 return err; 1207} 1208 1209#ifdef CONFIG_DEBUG_FS 1210static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1211 const char *sym, int offset,char *modname) 1212{ 1213 char *kprobe_type; 1214 1215 if (p->pre_handler == pre_handler_kretprobe) 1216 kprobe_type = "r"; 1217 else if (p->pre_handler == setjmp_pre_handler) 1218 kprobe_type = "j"; 1219 else 1220 kprobe_type = "k"; 1221 if (sym) 1222 seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, 1223 sym, offset, (modname ? modname : " "), 1224 (kprobe_gone(p) ? "[GONE]" : "")); 1225 else 1226 seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, 1227 (kprobe_gone(p) ? "[GONE]" : "")); 1228} 1229 1230static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1231{ 1232 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 1233} 1234 1235static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 1236{ 1237 (*pos)++; 1238 if (*pos >= KPROBE_TABLE_SIZE) 1239 return NULL; 1240 return pos; 1241} 1242 1243static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 1244{ 1245 /* Nothing to do */ 1246} 1247 1248static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 1249{ 1250 struct hlist_head *head; 1251 struct hlist_node *node; 1252 struct kprobe *p, *kp; 1253 const char *sym = NULL; 1254 unsigned int i = *(loff_t *) v; 1255 unsigned long offset = 0; 1256 char *modname, namebuf[128]; 1257 1258 head = &kprobe_table[i]; 1259 preempt_disable(); 1260 hlist_for_each_entry_rcu(p, node, head, hlist) { 1261 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1262 &offset, &modname, namebuf); 1263 if (p->pre_handler == aggr_pre_handler) { 1264 list_for_each_entry_rcu(kp, &p->list, list) 1265 report_probe(pi, kp, sym, offset, modname); 1266 } else 1267 report_probe(pi, p, sym, offset, modname); 1268 } 1269 preempt_enable(); 1270 return 0; 1271} 1272 1273static struct seq_operations kprobes_seq_ops = { 1274 .start = kprobe_seq_start, 1275 .next = kprobe_seq_next, 1276 .stop = kprobe_seq_stop, 1277 .show = show_kprobe_addr 1278}; 1279 1280static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 1281{ 1282 return seq_open(filp, &kprobes_seq_ops); 1283} 1284 1285static struct file_operations debugfs_kprobes_operations = { 1286 .open = kprobes_open, 1287 .read = seq_read, 1288 .llseek = seq_lseek, 1289 .release = seq_release, 1290}; 1291 1292static void __kprobes arm_all_kprobes(void) 1293{ 1294 struct hlist_head *head; 1295 struct hlist_node *node; 1296 struct kprobe *p; 1297 unsigned int i; 1298 1299 mutex_lock(&kprobe_mutex); 1300 1301 /* If kprobes are armed, just return */ 1302 if (!kprobes_all_disarmed) 1303 goto already_enabled; 1304 1305 mutex_lock(&text_mutex); 1306 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1307 head = &kprobe_table[i]; 1308 hlist_for_each_entry_rcu(p, node, head, hlist) 1309 if (!kprobe_gone(p)) 1310 arch_arm_kprobe(p); 1311 } 1312 mutex_unlock(&text_mutex); 1313 1314 kprobes_all_disarmed = false; 1315 printk(KERN_INFO "Kprobes globally enabled\n"); 1316 1317already_enabled: 1318 mutex_unlock(&kprobe_mutex); 1319 return; 1320} 1321 1322static void __kprobes disarm_all_kprobes(void) 1323{ 1324 struct hlist_head *head; 1325 struct hlist_node *node; 1326 struct kprobe *p; 1327 unsigned int i; 1328 1329 mutex_lock(&kprobe_mutex); 1330 1331 /* If kprobes are already disarmed, just return */ 1332 if (kprobes_all_disarmed) 1333 goto already_disabled; 1334 1335 kprobes_all_disarmed = true; 1336 printk(KERN_INFO "Kprobes globally disabled\n"); 1337 mutex_lock(&text_mutex); 1338 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1339 head = &kprobe_table[i]; 1340 hlist_for_each_entry_rcu(p, node, head, hlist) { 1341 if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) 1342 arch_disarm_kprobe(p); 1343 } 1344 } 1345 1346 mutex_unlock(&text_mutex); 1347 mutex_unlock(&kprobe_mutex); 1348 /* Allow all currently running kprobes to complete */ 1349 synchronize_sched(); 1350 return; 1351 1352already_disabled: 1353 mutex_unlock(&kprobe_mutex); 1354 return; 1355} 1356 1357/* 1358 * XXX: The debugfs bool file interface doesn't allow for callbacks 1359 * when the bool state is switched. We can reuse that facility when 1360 * available 1361 */ 1362static ssize_t read_enabled_file_bool(struct file *file, 1363 char __user *user_buf, size_t count, loff_t *ppos) 1364{ 1365 char buf[3]; 1366 1367 if (!kprobes_all_disarmed) 1368 buf[0] = '1'; 1369 else 1370 buf[0] = '0'; 1371 buf[1] = '\n'; 1372 buf[2] = 0x00; 1373 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1374} 1375 1376static ssize_t write_enabled_file_bool(struct file *file, 1377 const char __user *user_buf, size_t count, loff_t *ppos) 1378{ 1379 char buf[32]; 1380 int buf_size; 1381 1382 buf_size = min(count, (sizeof(buf)-1)); 1383 if (copy_from_user(buf, user_buf, buf_size)) 1384 return -EFAULT; 1385 1386 switch (buf[0]) { 1387 case 'y': 1388 case 'Y': 1389 case '1': 1390 arm_all_kprobes(); 1391 break; 1392 case 'n': 1393 case 'N': 1394 case '0': 1395 disarm_all_kprobes(); 1396 break; 1397 } 1398 1399 return count; 1400} 1401 1402static struct file_operations fops_kp = { 1403 .read = read_enabled_file_bool, 1404 .write = write_enabled_file_bool, 1405}; 1406 1407static int __kprobes debugfs_kprobe_init(void) 1408{ 1409 struct dentry *dir, *file; 1410 unsigned int value = 1; 1411 1412 dir = debugfs_create_dir("kprobes", NULL); 1413 if (!dir) 1414 return -ENOMEM; 1415 1416 file = debugfs_create_file("list", 0444, dir, NULL, 1417 &debugfs_kprobes_operations); 1418 if (!file) { 1419 debugfs_remove(dir); 1420 return -ENOMEM; 1421 } 1422 1423 file = debugfs_create_file("enabled", 0600, dir, 1424 &value, &fops_kp); 1425 if (!file) { 1426 debugfs_remove(dir); 1427 return -ENOMEM; 1428 } 1429 1430 return 0; 1431} 1432 1433late_initcall(debugfs_kprobe_init); 1434#endif /* CONFIG_DEBUG_FS */ 1435 1436module_init(init_kprobes); 1437 1438/* defined in arch/.../kernel/kprobes.c */ 1439EXPORT_SYMBOL_GPL(jprobe_return); 1440