kprobes.c revision 4460fdad85becd569f11501ad5b91814814335ff
1/* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34#include <linux/kprobes.h> 35#include <linux/hash.h> 36#include <linux/init.h> 37#include <linux/slab.h> 38#include <linux/stddef.h> 39#include <linux/module.h> 40#include <linux/moduleloader.h> 41#include <linux/kallsyms.h> 42#include <linux/freezer.h> 43#include <linux/seq_file.h> 44#include <linux/debugfs.h> 45#include <linux/kdebug.h> 46#include <linux/memory.h> 47 48#include <asm-generic/sections.h> 49#include <asm/cacheflush.h> 50#include <asm/errno.h> 51#include <asm/uaccess.h> 52 53#define KPROBE_HASH_BITS 6 54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 55 56 57/* 58 * Some oddball architectures like 64bit powerpc have function descriptors 59 * so this must be overridable. 60 */ 61#ifndef kprobe_lookup_name 62#define kprobe_lookup_name(name, addr) \ 63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 64#endif 65 66static int kprobes_initialized; 67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 69 70/* NOTE: change this value only with kprobe_mutex held */ 71static bool kprobe_enabled; 72 73static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 75static struct { 76 spinlock_t lock ____cacheline_aligned_in_smp; 77} kretprobe_table_locks[KPROBE_TABLE_SIZE]; 78 79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 80{ 81 return &(kretprobe_table_locks[hash].lock); 82} 83 84/* 85 * Normally, functions that we'd want to prohibit kprobes in, are marked 86 * __kprobes. But, there are cases where such functions already belong to 87 * a different section (__sched for preempt_schedule) 88 * 89 * For such cases, we now have a blacklist 90 */ 91static struct kprobe_blackpoint kprobe_blacklist[] = { 92 {"preempt_schedule",}, 93 {NULL} /* Terminator */ 94}; 95 96#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 97/* 98 * kprobe->ainsn.insn points to the copy of the instruction to be 99 * single-stepped. x86_64, POWER4 and above have no-exec support and 100 * stepping on the instruction on a vmalloced/kmalloced/data page 101 * is a recipe for disaster 102 */ 103#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 104 105struct kprobe_insn_page { 106 struct hlist_node hlist; 107 kprobe_opcode_t *insns; /* Page of instruction slots */ 108 char slot_used[INSNS_PER_PAGE]; 109 int nused; 110 int ngarbage; 111}; 112 113enum kprobe_slot_state { 114 SLOT_CLEAN = 0, 115 SLOT_DIRTY = 1, 116 SLOT_USED = 2, 117}; 118 119static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 120static struct hlist_head kprobe_insn_pages; 121static int kprobe_garbage_slots; 122static int collect_garbage_slots(void); 123 124static int __kprobes check_safety(void) 125{ 126 int ret = 0; 127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) 128 ret = freeze_processes(); 129 if (ret == 0) { 130 struct task_struct *p, *q; 131 do_each_thread(p, q) { 132 if (p != current && p->state == TASK_RUNNING && 133 p->pid != 0) { 134 printk("Check failed: %s is running\n",p->comm); 135 ret = -1; 136 goto loop_end; 137 } 138 } while_each_thread(p, q); 139 } 140loop_end: 141 thaw_processes(); 142#else 143 synchronize_sched(); 144#endif 145 return ret; 146} 147 148/** 149 * __get_insn_slot() - Find a slot on an executable page for an instruction. 150 * We allocate an executable page if there's no room on existing ones. 151 */ 152static kprobe_opcode_t __kprobes *__get_insn_slot(void) 153{ 154 struct kprobe_insn_page *kip; 155 struct hlist_node *pos; 156 157 retry: 158 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { 159 if (kip->nused < INSNS_PER_PAGE) { 160 int i; 161 for (i = 0; i < INSNS_PER_PAGE; i++) { 162 if (kip->slot_used[i] == SLOT_CLEAN) { 163 kip->slot_used[i] = SLOT_USED; 164 kip->nused++; 165 return kip->insns + (i * MAX_INSN_SIZE); 166 } 167 } 168 /* Surprise! No unused slots. Fix kip->nused. */ 169 kip->nused = INSNS_PER_PAGE; 170 } 171 } 172 173 /* If there are any garbage slots, collect it and try again. */ 174 if (kprobe_garbage_slots && collect_garbage_slots() == 0) { 175 goto retry; 176 } 177 /* All out of space. Need to allocate a new page. Use slot 0. */ 178 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 179 if (!kip) 180 return NULL; 181 182 /* 183 * Use module_alloc so this page is within +/- 2GB of where the 184 * kernel image and loaded module images reside. This is required 185 * so x86_64 can correctly handle the %rip-relative fixups. 186 */ 187 kip->insns = module_alloc(PAGE_SIZE); 188 if (!kip->insns) { 189 kfree(kip); 190 return NULL; 191 } 192 INIT_HLIST_NODE(&kip->hlist); 193 hlist_add_head(&kip->hlist, &kprobe_insn_pages); 194 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); 195 kip->slot_used[0] = SLOT_USED; 196 kip->nused = 1; 197 kip->ngarbage = 0; 198 return kip->insns; 199} 200 201kprobe_opcode_t __kprobes *get_insn_slot(void) 202{ 203 kprobe_opcode_t *ret; 204 mutex_lock(&kprobe_insn_mutex); 205 ret = __get_insn_slot(); 206 mutex_unlock(&kprobe_insn_mutex); 207 return ret; 208} 209 210/* Return 1 if all garbages are collected, otherwise 0. */ 211static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 212{ 213 kip->slot_used[idx] = SLOT_CLEAN; 214 kip->nused--; 215 if (kip->nused == 0) { 216 /* 217 * Page is no longer in use. Free it unless 218 * it's the last one. We keep the last one 219 * so as not to have to set it up again the 220 * next time somebody inserts a probe. 221 */ 222 hlist_del(&kip->hlist); 223 if (hlist_empty(&kprobe_insn_pages)) { 224 INIT_HLIST_NODE(&kip->hlist); 225 hlist_add_head(&kip->hlist, 226 &kprobe_insn_pages); 227 } else { 228 module_free(NULL, kip->insns); 229 kfree(kip); 230 } 231 return 1; 232 } 233 return 0; 234} 235 236static int __kprobes collect_garbage_slots(void) 237{ 238 struct kprobe_insn_page *kip; 239 struct hlist_node *pos, *next; 240 int safety; 241 242 /* Ensure no-one is preepmted on the garbages */ 243 mutex_unlock(&kprobe_insn_mutex); 244 safety = check_safety(); 245 mutex_lock(&kprobe_insn_mutex); 246 if (safety != 0) 247 return -EAGAIN; 248 249 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { 250 int i; 251 if (kip->ngarbage == 0) 252 continue; 253 kip->ngarbage = 0; /* we will collect all garbages */ 254 for (i = 0; i < INSNS_PER_PAGE; i++) { 255 if (kip->slot_used[i] == SLOT_DIRTY && 256 collect_one_slot(kip, i)) 257 break; 258 } 259 } 260 kprobe_garbage_slots = 0; 261 return 0; 262} 263 264void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 265{ 266 struct kprobe_insn_page *kip; 267 struct hlist_node *pos; 268 269 mutex_lock(&kprobe_insn_mutex); 270 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { 271 if (kip->insns <= slot && 272 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 273 int i = (slot - kip->insns) / MAX_INSN_SIZE; 274 if (dirty) { 275 kip->slot_used[i] = SLOT_DIRTY; 276 kip->ngarbage++; 277 } else { 278 collect_one_slot(kip, i); 279 } 280 break; 281 } 282 } 283 284 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) 285 collect_garbage_slots(); 286 287 mutex_unlock(&kprobe_insn_mutex); 288} 289#endif 290 291/* We have preemption disabled.. so it is safe to use __ versions */ 292static inline void set_kprobe_instance(struct kprobe *kp) 293{ 294 __get_cpu_var(kprobe_instance) = kp; 295} 296 297static inline void reset_kprobe_instance(void) 298{ 299 __get_cpu_var(kprobe_instance) = NULL; 300} 301 302/* 303 * This routine is called either: 304 * - under the kprobe_mutex - during kprobe_[un]register() 305 * OR 306 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 307 */ 308struct kprobe __kprobes *get_kprobe(void *addr) 309{ 310 struct hlist_head *head; 311 struct hlist_node *node; 312 struct kprobe *p; 313 314 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 315 hlist_for_each_entry_rcu(p, node, head, hlist) { 316 if (p->addr == addr) 317 return p; 318 } 319 return NULL; 320} 321 322/* 323 * Aggregate handlers for multiple kprobes support - these handlers 324 * take care of invoking the individual kprobe handlers on p->list 325 */ 326static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 327{ 328 struct kprobe *kp; 329 330 list_for_each_entry_rcu(kp, &p->list, list) { 331 if (kp->pre_handler && !kprobe_gone(kp)) { 332 set_kprobe_instance(kp); 333 if (kp->pre_handler(kp, regs)) 334 return 1; 335 } 336 reset_kprobe_instance(); 337 } 338 return 0; 339} 340 341static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 342 unsigned long flags) 343{ 344 struct kprobe *kp; 345 346 list_for_each_entry_rcu(kp, &p->list, list) { 347 if (kp->post_handler && !kprobe_gone(kp)) { 348 set_kprobe_instance(kp); 349 kp->post_handler(kp, regs, flags); 350 reset_kprobe_instance(); 351 } 352 } 353} 354 355static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 356 int trapnr) 357{ 358 struct kprobe *cur = __get_cpu_var(kprobe_instance); 359 360 /* 361 * if we faulted "during" the execution of a user specified 362 * probe handler, invoke just that probe's fault handler 363 */ 364 if (cur && cur->fault_handler) { 365 if (cur->fault_handler(cur, regs, trapnr)) 366 return 1; 367 } 368 return 0; 369} 370 371static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 372{ 373 struct kprobe *cur = __get_cpu_var(kprobe_instance); 374 int ret = 0; 375 376 if (cur && cur->break_handler) { 377 if (cur->break_handler(cur, regs)) 378 ret = 1; 379 } 380 reset_kprobe_instance(); 381 return ret; 382} 383 384/* Walks the list and increments nmissed count for multiprobe case */ 385void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 386{ 387 struct kprobe *kp; 388 if (p->pre_handler != aggr_pre_handler) { 389 p->nmissed++; 390 } else { 391 list_for_each_entry_rcu(kp, &p->list, list) 392 kp->nmissed++; 393 } 394 return; 395} 396 397void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 398 struct hlist_head *head) 399{ 400 struct kretprobe *rp = ri->rp; 401 402 /* remove rp inst off the rprobe_inst_table */ 403 hlist_del(&ri->hlist); 404 INIT_HLIST_NODE(&ri->hlist); 405 if (likely(rp)) { 406 spin_lock(&rp->lock); 407 hlist_add_head(&ri->hlist, &rp->free_instances); 408 spin_unlock(&rp->lock); 409 } else 410 /* Unregistering */ 411 hlist_add_head(&ri->hlist, head); 412} 413 414void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 415 struct hlist_head **head, unsigned long *flags) 416{ 417 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 418 spinlock_t *hlist_lock; 419 420 *head = &kretprobe_inst_table[hash]; 421 hlist_lock = kretprobe_table_lock_ptr(hash); 422 spin_lock_irqsave(hlist_lock, *flags); 423} 424 425static void __kprobes kretprobe_table_lock(unsigned long hash, 426 unsigned long *flags) 427{ 428 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 429 spin_lock_irqsave(hlist_lock, *flags); 430} 431 432void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 433 unsigned long *flags) 434{ 435 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 436 spinlock_t *hlist_lock; 437 438 hlist_lock = kretprobe_table_lock_ptr(hash); 439 spin_unlock_irqrestore(hlist_lock, *flags); 440} 441 442void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags) 443{ 444 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 445 spin_unlock_irqrestore(hlist_lock, *flags); 446} 447 448/* 449 * This function is called from finish_task_switch when task tk becomes dead, 450 * so that we can recycle any function-return probe instances associated 451 * with this task. These left over instances represent probed functions 452 * that have been called but will never return. 453 */ 454void __kprobes kprobe_flush_task(struct task_struct *tk) 455{ 456 struct kretprobe_instance *ri; 457 struct hlist_head *head, empty_rp; 458 struct hlist_node *node, *tmp; 459 unsigned long hash, flags = 0; 460 461 if (unlikely(!kprobes_initialized)) 462 /* Early boot. kretprobe_table_locks not yet initialized. */ 463 return; 464 465 hash = hash_ptr(tk, KPROBE_HASH_BITS); 466 head = &kretprobe_inst_table[hash]; 467 kretprobe_table_lock(hash, &flags); 468 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 469 if (ri->task == tk) 470 recycle_rp_inst(ri, &empty_rp); 471 } 472 kretprobe_table_unlock(hash, &flags); 473 INIT_HLIST_HEAD(&empty_rp); 474 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 475 hlist_del(&ri->hlist); 476 kfree(ri); 477 } 478} 479 480static inline void free_rp_inst(struct kretprobe *rp) 481{ 482 struct kretprobe_instance *ri; 483 struct hlist_node *pos, *next; 484 485 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 486 hlist_del(&ri->hlist); 487 kfree(ri); 488 } 489} 490 491static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 492{ 493 unsigned long flags, hash; 494 struct kretprobe_instance *ri; 495 struct hlist_node *pos, *next; 496 struct hlist_head *head; 497 498 /* No race here */ 499 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 500 kretprobe_table_lock(hash, &flags); 501 head = &kretprobe_inst_table[hash]; 502 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 503 if (ri->rp == rp) 504 ri->rp = NULL; 505 } 506 kretprobe_table_unlock(hash, &flags); 507 } 508 free_rp_inst(rp); 509} 510 511/* 512 * Keep all fields in the kprobe consistent 513 */ 514static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) 515{ 516 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); 517 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); 518} 519 520/* 521* Add the new probe to old_p->list. Fail if this is the 522* second jprobe at the address - two jprobes can't coexist 523*/ 524static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 525{ 526 if (p->break_handler) { 527 if (old_p->break_handler) 528 return -EEXIST; 529 list_add_tail_rcu(&p->list, &old_p->list); 530 old_p->break_handler = aggr_break_handler; 531 } else 532 list_add_rcu(&p->list, &old_p->list); 533 if (p->post_handler && !old_p->post_handler) 534 old_p->post_handler = aggr_post_handler; 535 return 0; 536} 537 538/* 539 * Fill in the required fields of the "manager kprobe". Replace the 540 * earlier kprobe in the hlist with the manager kprobe 541 */ 542static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 543{ 544 copy_kprobe(p, ap); 545 flush_insn_slot(ap); 546 ap->addr = p->addr; 547 ap->pre_handler = aggr_pre_handler; 548 ap->fault_handler = aggr_fault_handler; 549 /* We don't care the kprobe which has gone. */ 550 if (p->post_handler && !kprobe_gone(p)) 551 ap->post_handler = aggr_post_handler; 552 if (p->break_handler && !kprobe_gone(p)) 553 ap->break_handler = aggr_break_handler; 554 555 INIT_LIST_HEAD(&ap->list); 556 list_add_rcu(&p->list, &ap->list); 557 558 hlist_replace_rcu(&p->hlist, &ap->hlist); 559} 560 561/* 562 * This is the second or subsequent kprobe at the address - handle 563 * the intricacies 564 */ 565static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 566 struct kprobe *p) 567{ 568 int ret = 0; 569 struct kprobe *ap; 570 571 if (kprobe_gone(old_p)) { 572 /* 573 * Attempting to insert new probe at the same location that 574 * had a probe in the module vaddr area which already 575 * freed. So, the instruction slot has already been 576 * released. We need a new slot for the new probe. 577 */ 578 ret = arch_prepare_kprobe(old_p); 579 if (ret) 580 return ret; 581 } 582 if (old_p->pre_handler == aggr_pre_handler) { 583 copy_kprobe(old_p, p); 584 ret = add_new_kprobe(old_p, p); 585 ap = old_p; 586 } else { 587 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 588 if (!ap) { 589 if (kprobe_gone(old_p)) 590 arch_remove_kprobe(old_p); 591 return -ENOMEM; 592 } 593 add_aggr_kprobe(ap, old_p); 594 copy_kprobe(ap, p); 595 ret = add_new_kprobe(ap, p); 596 } 597 if (kprobe_gone(old_p)) { 598 /* 599 * If the old_p has gone, its breakpoint has been disarmed. 600 * We have to arm it again after preparing real kprobes. 601 */ 602 ap->flags &= ~KPROBE_FLAG_GONE; 603 if (kprobe_enabled) 604 arch_arm_kprobe(ap); 605 } 606 return ret; 607} 608 609static int __kprobes in_kprobes_functions(unsigned long addr) 610{ 611 struct kprobe_blackpoint *kb; 612 613 if (addr >= (unsigned long)__kprobes_text_start && 614 addr < (unsigned long)__kprobes_text_end) 615 return -EINVAL; 616 /* 617 * If there exists a kprobe_blacklist, verify and 618 * fail any probe registration in the prohibited area 619 */ 620 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 621 if (kb->start_addr) { 622 if (addr >= kb->start_addr && 623 addr < (kb->start_addr + kb->range)) 624 return -EINVAL; 625 } 626 } 627 return 0; 628} 629 630/* 631 * If we have a symbol_name argument, look it up and add the offset field 632 * to it. This way, we can specify a relative address to a symbol. 633 */ 634static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 635{ 636 kprobe_opcode_t *addr = p->addr; 637 if (p->symbol_name) { 638 if (addr) 639 return NULL; 640 kprobe_lookup_name(p->symbol_name, addr); 641 } 642 643 if (!addr) 644 return NULL; 645 return (kprobe_opcode_t *)(((char *)addr) + p->offset); 646} 647 648int __kprobes register_kprobe(struct kprobe *p) 649{ 650 int ret = 0; 651 struct kprobe *old_p; 652 struct module *probed_mod; 653 kprobe_opcode_t *addr; 654 655 addr = kprobe_addr(p); 656 if (!addr) 657 return -EINVAL; 658 p->addr = addr; 659 660 preempt_disable(); 661 if (!__kernel_text_address((unsigned long) p->addr) || 662 in_kprobes_functions((unsigned long) p->addr)) { 663 preempt_enable(); 664 return -EINVAL; 665 } 666 667 p->flags = 0; 668 /* 669 * Check if are we probing a module. 670 */ 671 probed_mod = __module_text_address((unsigned long) p->addr); 672 if (probed_mod) { 673 /* 674 * We must hold a refcount of the probed module while updating 675 * its code to prohibit unexpected unloading. 676 */ 677 if (unlikely(!try_module_get(probed_mod))) { 678 preempt_enable(); 679 return -EINVAL; 680 } 681 /* 682 * If the module freed .init.text, we couldn't insert 683 * kprobes in there. 684 */ 685 if (within_module_init((unsigned long)p->addr, probed_mod) && 686 probed_mod->state != MODULE_STATE_COMING) { 687 module_put(probed_mod); 688 preempt_enable(); 689 return -EINVAL; 690 } 691 } 692 preempt_enable(); 693 694 p->nmissed = 0; 695 INIT_LIST_HEAD(&p->list); 696 mutex_lock(&kprobe_mutex); 697 old_p = get_kprobe(p->addr); 698 if (old_p) { 699 ret = register_aggr_kprobe(old_p, p); 700 goto out; 701 } 702 703 mutex_lock(&text_mutex); 704 ret = arch_prepare_kprobe(p); 705 if (ret) 706 goto out_unlock_text; 707 708 INIT_HLIST_NODE(&p->hlist); 709 hlist_add_head_rcu(&p->hlist, 710 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 711 712 if (kprobe_enabled) 713 arch_arm_kprobe(p); 714 715out_unlock_text: 716 mutex_unlock(&text_mutex); 717out: 718 mutex_unlock(&kprobe_mutex); 719 720 if (probed_mod) 721 module_put(probed_mod); 722 723 return ret; 724} 725 726/* 727 * Unregister a kprobe without a scheduler synchronization. 728 */ 729static int __kprobes __unregister_kprobe_top(struct kprobe *p) 730{ 731 struct kprobe *old_p, *list_p; 732 733 old_p = get_kprobe(p->addr); 734 if (unlikely(!old_p)) 735 return -EINVAL; 736 737 if (p != old_p) { 738 list_for_each_entry_rcu(list_p, &old_p->list, list) 739 if (list_p == p) 740 /* kprobe p is a valid probe */ 741 goto valid_p; 742 return -EINVAL; 743 } 744valid_p: 745 if (old_p == p || 746 (old_p->pre_handler == aggr_pre_handler && 747 list_is_singular(&old_p->list))) { 748 /* 749 * Only probe on the hash list. Disarm only if kprobes are 750 * enabled and not gone - otherwise, the breakpoint would 751 * already have been removed. We save on flushing icache. 752 */ 753 if (kprobe_enabled && !kprobe_gone(old_p)) { 754 mutex_lock(&text_mutex); 755 arch_disarm_kprobe(p); 756 mutex_unlock(&text_mutex); 757 } 758 hlist_del_rcu(&old_p->hlist); 759 } else { 760 if (p->break_handler && !kprobe_gone(p)) 761 old_p->break_handler = NULL; 762 if (p->post_handler && !kprobe_gone(p)) { 763 list_for_each_entry_rcu(list_p, &old_p->list, list) { 764 if ((list_p != p) && (list_p->post_handler)) 765 goto noclean; 766 } 767 old_p->post_handler = NULL; 768 } 769noclean: 770 list_del_rcu(&p->list); 771 } 772 return 0; 773} 774 775static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 776{ 777 struct kprobe *old_p; 778 779 if (list_empty(&p->list)) 780 arch_remove_kprobe(p); 781 else if (list_is_singular(&p->list)) { 782 /* "p" is the last child of an aggr_kprobe */ 783 old_p = list_entry(p->list.next, struct kprobe, list); 784 list_del(&p->list); 785 arch_remove_kprobe(old_p); 786 kfree(old_p); 787 } 788} 789 790int __kprobes register_kprobes(struct kprobe **kps, int num) 791{ 792 int i, ret = 0; 793 794 if (num <= 0) 795 return -EINVAL; 796 for (i = 0; i < num; i++) { 797 ret = register_kprobe(kps[i]); 798 if (ret < 0) { 799 if (i > 0) 800 unregister_kprobes(kps, i); 801 break; 802 } 803 } 804 return ret; 805} 806 807void __kprobes unregister_kprobe(struct kprobe *p) 808{ 809 unregister_kprobes(&p, 1); 810} 811 812void __kprobes unregister_kprobes(struct kprobe **kps, int num) 813{ 814 int i; 815 816 if (num <= 0) 817 return; 818 mutex_lock(&kprobe_mutex); 819 for (i = 0; i < num; i++) 820 if (__unregister_kprobe_top(kps[i]) < 0) 821 kps[i]->addr = NULL; 822 mutex_unlock(&kprobe_mutex); 823 824 synchronize_sched(); 825 for (i = 0; i < num; i++) 826 if (kps[i]->addr) 827 __unregister_kprobe_bottom(kps[i]); 828} 829 830static struct notifier_block kprobe_exceptions_nb = { 831 .notifier_call = kprobe_exceptions_notify, 832 .priority = 0x7fffffff /* we need to be notified first */ 833}; 834 835unsigned long __weak arch_deref_entry_point(void *entry) 836{ 837 return (unsigned long)entry; 838} 839 840int __kprobes register_jprobes(struct jprobe **jps, int num) 841{ 842 struct jprobe *jp; 843 int ret = 0, i; 844 845 if (num <= 0) 846 return -EINVAL; 847 for (i = 0; i < num; i++) { 848 unsigned long addr; 849 jp = jps[i]; 850 addr = arch_deref_entry_point(jp->entry); 851 852 if (!kernel_text_address(addr)) 853 ret = -EINVAL; 854 else { 855 /* Todo: Verify probepoint is a function entry point */ 856 jp->kp.pre_handler = setjmp_pre_handler; 857 jp->kp.break_handler = longjmp_break_handler; 858 ret = register_kprobe(&jp->kp); 859 } 860 if (ret < 0) { 861 if (i > 0) 862 unregister_jprobes(jps, i); 863 break; 864 } 865 } 866 return ret; 867} 868 869int __kprobes register_jprobe(struct jprobe *jp) 870{ 871 return register_jprobes(&jp, 1); 872} 873 874void __kprobes unregister_jprobe(struct jprobe *jp) 875{ 876 unregister_jprobes(&jp, 1); 877} 878 879void __kprobes unregister_jprobes(struct jprobe **jps, int num) 880{ 881 int i; 882 883 if (num <= 0) 884 return; 885 mutex_lock(&kprobe_mutex); 886 for (i = 0; i < num; i++) 887 if (__unregister_kprobe_top(&jps[i]->kp) < 0) 888 jps[i]->kp.addr = NULL; 889 mutex_unlock(&kprobe_mutex); 890 891 synchronize_sched(); 892 for (i = 0; i < num; i++) { 893 if (jps[i]->kp.addr) 894 __unregister_kprobe_bottom(&jps[i]->kp); 895 } 896} 897 898#ifdef CONFIG_KRETPROBES 899/* 900 * This kprobe pre_handler is registered with every kretprobe. When probe 901 * hits it will set up the return probe. 902 */ 903static int __kprobes pre_handler_kretprobe(struct kprobe *p, 904 struct pt_regs *regs) 905{ 906 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 907 unsigned long hash, flags = 0; 908 struct kretprobe_instance *ri; 909 910 /*TODO: consider to only swap the RA after the last pre_handler fired */ 911 hash = hash_ptr(current, KPROBE_HASH_BITS); 912 spin_lock_irqsave(&rp->lock, flags); 913 if (!hlist_empty(&rp->free_instances)) { 914 ri = hlist_entry(rp->free_instances.first, 915 struct kretprobe_instance, hlist); 916 hlist_del(&ri->hlist); 917 spin_unlock_irqrestore(&rp->lock, flags); 918 919 ri->rp = rp; 920 ri->task = current; 921 922 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 923 spin_unlock_irqrestore(&rp->lock, flags); 924 return 0; 925 } 926 927 arch_prepare_kretprobe(ri, regs); 928 929 /* XXX(hch): why is there no hlist_move_head? */ 930 INIT_HLIST_NODE(&ri->hlist); 931 kretprobe_table_lock(hash, &flags); 932 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 933 kretprobe_table_unlock(hash, &flags); 934 } else { 935 rp->nmissed++; 936 spin_unlock_irqrestore(&rp->lock, flags); 937 } 938 return 0; 939} 940 941int __kprobes register_kretprobe(struct kretprobe *rp) 942{ 943 int ret = 0; 944 struct kretprobe_instance *inst; 945 int i; 946 void *addr; 947 948 if (kretprobe_blacklist_size) { 949 addr = kprobe_addr(&rp->kp); 950 if (!addr) 951 return -EINVAL; 952 953 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 954 if (kretprobe_blacklist[i].addr == addr) 955 return -EINVAL; 956 } 957 } 958 959 rp->kp.pre_handler = pre_handler_kretprobe; 960 rp->kp.post_handler = NULL; 961 rp->kp.fault_handler = NULL; 962 rp->kp.break_handler = NULL; 963 964 /* Pre-allocate memory for max kretprobe instances */ 965 if (rp->maxactive <= 0) { 966#ifdef CONFIG_PREEMPT 967 rp->maxactive = max(10, 2 * NR_CPUS); 968#else 969 rp->maxactive = NR_CPUS; 970#endif 971 } 972 spin_lock_init(&rp->lock); 973 INIT_HLIST_HEAD(&rp->free_instances); 974 for (i = 0; i < rp->maxactive; i++) { 975 inst = kmalloc(sizeof(struct kretprobe_instance) + 976 rp->data_size, GFP_KERNEL); 977 if (inst == NULL) { 978 free_rp_inst(rp); 979 return -ENOMEM; 980 } 981 INIT_HLIST_NODE(&inst->hlist); 982 hlist_add_head(&inst->hlist, &rp->free_instances); 983 } 984 985 rp->nmissed = 0; 986 /* Establish function entry probe point */ 987 ret = register_kprobe(&rp->kp); 988 if (ret != 0) 989 free_rp_inst(rp); 990 return ret; 991} 992 993int __kprobes register_kretprobes(struct kretprobe **rps, int num) 994{ 995 int ret = 0, i; 996 997 if (num <= 0) 998 return -EINVAL; 999 for (i = 0; i < num; i++) { 1000 ret = register_kretprobe(rps[i]); 1001 if (ret < 0) { 1002 if (i > 0) 1003 unregister_kretprobes(rps, i); 1004 break; 1005 } 1006 } 1007 return ret; 1008} 1009 1010void __kprobes unregister_kretprobe(struct kretprobe *rp) 1011{ 1012 unregister_kretprobes(&rp, 1); 1013} 1014 1015void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1016{ 1017 int i; 1018 1019 if (num <= 0) 1020 return; 1021 mutex_lock(&kprobe_mutex); 1022 for (i = 0; i < num; i++) 1023 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1024 rps[i]->kp.addr = NULL; 1025 mutex_unlock(&kprobe_mutex); 1026 1027 synchronize_sched(); 1028 for (i = 0; i < num; i++) { 1029 if (rps[i]->kp.addr) { 1030 __unregister_kprobe_bottom(&rps[i]->kp); 1031 cleanup_rp_inst(rps[i]); 1032 } 1033 } 1034} 1035 1036#else /* CONFIG_KRETPROBES */ 1037int __kprobes register_kretprobe(struct kretprobe *rp) 1038{ 1039 return -ENOSYS; 1040} 1041 1042int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1043{ 1044 return -ENOSYS; 1045} 1046void __kprobes unregister_kretprobe(struct kretprobe *rp) 1047{ 1048} 1049 1050void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1051{ 1052} 1053 1054static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1055 struct pt_regs *regs) 1056{ 1057 return 0; 1058} 1059 1060#endif /* CONFIG_KRETPROBES */ 1061 1062/* Set the kprobe gone and remove its instruction buffer. */ 1063static void __kprobes kill_kprobe(struct kprobe *p) 1064{ 1065 struct kprobe *kp; 1066 p->flags |= KPROBE_FLAG_GONE; 1067 if (p->pre_handler == aggr_pre_handler) { 1068 /* 1069 * If this is an aggr_kprobe, we have to list all the 1070 * chained probes and mark them GONE. 1071 */ 1072 list_for_each_entry_rcu(kp, &p->list, list) 1073 kp->flags |= KPROBE_FLAG_GONE; 1074 p->post_handler = NULL; 1075 p->break_handler = NULL; 1076 } 1077 /* 1078 * Here, we can remove insn_slot safely, because no thread calls 1079 * the original probed function (which will be freed soon) any more. 1080 */ 1081 arch_remove_kprobe(p); 1082} 1083 1084/* Module notifier call back, checking kprobes on the module */ 1085static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1086 unsigned long val, void *data) 1087{ 1088 struct module *mod = data; 1089 struct hlist_head *head; 1090 struct hlist_node *node; 1091 struct kprobe *p; 1092 unsigned int i; 1093 int checkcore = (val == MODULE_STATE_GOING); 1094 1095 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 1096 return NOTIFY_DONE; 1097 1098 /* 1099 * When MODULE_STATE_GOING was notified, both of module .text and 1100 * .init.text sections would be freed. When MODULE_STATE_LIVE was 1101 * notified, only .init.text section would be freed. We need to 1102 * disable kprobes which have been inserted in the sections. 1103 */ 1104 mutex_lock(&kprobe_mutex); 1105 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1106 head = &kprobe_table[i]; 1107 hlist_for_each_entry_rcu(p, node, head, hlist) 1108 if (within_module_init((unsigned long)p->addr, mod) || 1109 (checkcore && 1110 within_module_core((unsigned long)p->addr, mod))) { 1111 /* 1112 * The vaddr this probe is installed will soon 1113 * be vfreed buy not synced to disk. Hence, 1114 * disarming the breakpoint isn't needed. 1115 */ 1116 kill_kprobe(p); 1117 } 1118 } 1119 mutex_unlock(&kprobe_mutex); 1120 return NOTIFY_DONE; 1121} 1122 1123static struct notifier_block kprobe_module_nb = { 1124 .notifier_call = kprobes_module_callback, 1125 .priority = 0 1126}; 1127 1128static int __init init_kprobes(void) 1129{ 1130 int i, err = 0; 1131 unsigned long offset = 0, size = 0; 1132 char *modname, namebuf[128]; 1133 const char *symbol_name; 1134 void *addr; 1135 struct kprobe_blackpoint *kb; 1136 1137 /* FIXME allocate the probe table, currently defined statically */ 1138 /* initialize all list heads */ 1139 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1140 INIT_HLIST_HEAD(&kprobe_table[i]); 1141 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1142 spin_lock_init(&(kretprobe_table_locks[i].lock)); 1143 } 1144 1145 /* 1146 * Lookup and populate the kprobe_blacklist. 1147 * 1148 * Unlike the kretprobe blacklist, we'll need to determine 1149 * the range of addresses that belong to the said functions, 1150 * since a kprobe need not necessarily be at the beginning 1151 * of a function. 1152 */ 1153 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1154 kprobe_lookup_name(kb->name, addr); 1155 if (!addr) 1156 continue; 1157 1158 kb->start_addr = (unsigned long)addr; 1159 symbol_name = kallsyms_lookup(kb->start_addr, 1160 &size, &offset, &modname, namebuf); 1161 if (!symbol_name) 1162 kb->range = 0; 1163 else 1164 kb->range = size; 1165 } 1166 1167 if (kretprobe_blacklist_size) { 1168 /* lookup the function address from its name */ 1169 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1170 kprobe_lookup_name(kretprobe_blacklist[i].name, 1171 kretprobe_blacklist[i].addr); 1172 if (!kretprobe_blacklist[i].addr) 1173 printk("kretprobe: lookup failed: %s\n", 1174 kretprobe_blacklist[i].name); 1175 } 1176 } 1177 1178 /* By default, kprobes are enabled */ 1179 kprobe_enabled = true; 1180 1181 err = arch_init_kprobes(); 1182 if (!err) 1183 err = register_die_notifier(&kprobe_exceptions_nb); 1184 if (!err) 1185 err = register_module_notifier(&kprobe_module_nb); 1186 1187 kprobes_initialized = (err == 0); 1188 1189 if (!err) 1190 init_test_probes(); 1191 return err; 1192} 1193 1194#ifdef CONFIG_DEBUG_FS 1195static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1196 const char *sym, int offset,char *modname) 1197{ 1198 char *kprobe_type; 1199 1200 if (p->pre_handler == pre_handler_kretprobe) 1201 kprobe_type = "r"; 1202 else if (p->pre_handler == setjmp_pre_handler) 1203 kprobe_type = "j"; 1204 else 1205 kprobe_type = "k"; 1206 if (sym) 1207 seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, 1208 sym, offset, (modname ? modname : " "), 1209 (kprobe_gone(p) ? "[GONE]" : "")); 1210 else 1211 seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, 1212 (kprobe_gone(p) ? "[GONE]" : "")); 1213} 1214 1215static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1216{ 1217 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 1218} 1219 1220static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 1221{ 1222 (*pos)++; 1223 if (*pos >= KPROBE_TABLE_SIZE) 1224 return NULL; 1225 return pos; 1226} 1227 1228static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 1229{ 1230 /* Nothing to do */ 1231} 1232 1233static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 1234{ 1235 struct hlist_head *head; 1236 struct hlist_node *node; 1237 struct kprobe *p, *kp; 1238 const char *sym = NULL; 1239 unsigned int i = *(loff_t *) v; 1240 unsigned long offset = 0; 1241 char *modname, namebuf[128]; 1242 1243 head = &kprobe_table[i]; 1244 preempt_disable(); 1245 hlist_for_each_entry_rcu(p, node, head, hlist) { 1246 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1247 &offset, &modname, namebuf); 1248 if (p->pre_handler == aggr_pre_handler) { 1249 list_for_each_entry_rcu(kp, &p->list, list) 1250 report_probe(pi, kp, sym, offset, modname); 1251 } else 1252 report_probe(pi, p, sym, offset, modname); 1253 } 1254 preempt_enable(); 1255 return 0; 1256} 1257 1258static struct seq_operations kprobes_seq_ops = { 1259 .start = kprobe_seq_start, 1260 .next = kprobe_seq_next, 1261 .stop = kprobe_seq_stop, 1262 .show = show_kprobe_addr 1263}; 1264 1265static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 1266{ 1267 return seq_open(filp, &kprobes_seq_ops); 1268} 1269 1270static struct file_operations debugfs_kprobes_operations = { 1271 .open = kprobes_open, 1272 .read = seq_read, 1273 .llseek = seq_lseek, 1274 .release = seq_release, 1275}; 1276 1277static void __kprobes enable_all_kprobes(void) 1278{ 1279 struct hlist_head *head; 1280 struct hlist_node *node; 1281 struct kprobe *p; 1282 unsigned int i; 1283 1284 mutex_lock(&kprobe_mutex); 1285 1286 /* If kprobes are already enabled, just return */ 1287 if (kprobe_enabled) 1288 goto already_enabled; 1289 1290 mutex_lock(&text_mutex); 1291 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1292 head = &kprobe_table[i]; 1293 hlist_for_each_entry_rcu(p, node, head, hlist) 1294 if (!kprobe_gone(p)) 1295 arch_arm_kprobe(p); 1296 } 1297 mutex_unlock(&text_mutex); 1298 1299 kprobe_enabled = true; 1300 printk(KERN_INFO "Kprobes globally enabled\n"); 1301 1302already_enabled: 1303 mutex_unlock(&kprobe_mutex); 1304 return; 1305} 1306 1307static void __kprobes disable_all_kprobes(void) 1308{ 1309 struct hlist_head *head; 1310 struct hlist_node *node; 1311 struct kprobe *p; 1312 unsigned int i; 1313 1314 mutex_lock(&kprobe_mutex); 1315 1316 /* If kprobes are already disabled, just return */ 1317 if (!kprobe_enabled) 1318 goto already_disabled; 1319 1320 kprobe_enabled = false; 1321 printk(KERN_INFO "Kprobes globally disabled\n"); 1322 mutex_lock(&text_mutex); 1323 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1324 head = &kprobe_table[i]; 1325 hlist_for_each_entry_rcu(p, node, head, hlist) { 1326 if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) 1327 arch_disarm_kprobe(p); 1328 } 1329 } 1330 1331 mutex_unlock(&text_mutex); 1332 mutex_unlock(&kprobe_mutex); 1333 /* Allow all currently running kprobes to complete */ 1334 synchronize_sched(); 1335 return; 1336 1337already_disabled: 1338 mutex_unlock(&kprobe_mutex); 1339 return; 1340} 1341 1342/* 1343 * XXX: The debugfs bool file interface doesn't allow for callbacks 1344 * when the bool state is switched. We can reuse that facility when 1345 * available 1346 */ 1347static ssize_t read_enabled_file_bool(struct file *file, 1348 char __user *user_buf, size_t count, loff_t *ppos) 1349{ 1350 char buf[3]; 1351 1352 if (kprobe_enabled) 1353 buf[0] = '1'; 1354 else 1355 buf[0] = '0'; 1356 buf[1] = '\n'; 1357 buf[2] = 0x00; 1358 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1359} 1360 1361static ssize_t write_enabled_file_bool(struct file *file, 1362 const char __user *user_buf, size_t count, loff_t *ppos) 1363{ 1364 char buf[32]; 1365 int buf_size; 1366 1367 buf_size = min(count, (sizeof(buf)-1)); 1368 if (copy_from_user(buf, user_buf, buf_size)) 1369 return -EFAULT; 1370 1371 switch (buf[0]) { 1372 case 'y': 1373 case 'Y': 1374 case '1': 1375 enable_all_kprobes(); 1376 break; 1377 case 'n': 1378 case 'N': 1379 case '0': 1380 disable_all_kprobes(); 1381 break; 1382 } 1383 1384 return count; 1385} 1386 1387static struct file_operations fops_kp = { 1388 .read = read_enabled_file_bool, 1389 .write = write_enabled_file_bool, 1390}; 1391 1392static int __kprobes debugfs_kprobe_init(void) 1393{ 1394 struct dentry *dir, *file; 1395 unsigned int value = 1; 1396 1397 dir = debugfs_create_dir("kprobes", NULL); 1398 if (!dir) 1399 return -ENOMEM; 1400 1401 file = debugfs_create_file("list", 0444, dir, NULL, 1402 &debugfs_kprobes_operations); 1403 if (!file) { 1404 debugfs_remove(dir); 1405 return -ENOMEM; 1406 } 1407 1408 file = debugfs_create_file("enabled", 0600, dir, 1409 &value, &fops_kp); 1410 if (!file) { 1411 debugfs_remove(dir); 1412 return -ENOMEM; 1413 } 1414 1415 return 0; 1416} 1417 1418late_initcall(debugfs_kprobe_init); 1419#endif /* CONFIG_DEBUG_FS */ 1420 1421module_init(init_kprobes); 1422 1423EXPORT_SYMBOL_GPL(register_kprobe); 1424EXPORT_SYMBOL_GPL(unregister_kprobe); 1425EXPORT_SYMBOL_GPL(register_kprobes); 1426EXPORT_SYMBOL_GPL(unregister_kprobes); 1427EXPORT_SYMBOL_GPL(register_jprobe); 1428EXPORT_SYMBOL_GPL(unregister_jprobe); 1429EXPORT_SYMBOL_GPL(register_jprobes); 1430EXPORT_SYMBOL_GPL(unregister_jprobes); 1431EXPORT_SYMBOL_GPL(jprobe_return); 1432EXPORT_SYMBOL_GPL(register_kretprobe); 1433EXPORT_SYMBOL_GPL(unregister_kretprobe); 1434EXPORT_SYMBOL_GPL(register_kretprobes); 1435EXPORT_SYMBOL_GPL(unregister_kretprobes); 1436