kprobes.c revision 6f0f1dd71953d4243c11e490dd49ef24ebaf6c0b
1/* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34#include <linux/kprobes.h> 35#include <linux/hash.h> 36#include <linux/init.h> 37#include <linux/slab.h> 38#include <linux/stddef.h> 39#include <linux/module.h> 40#include <linux/moduleloader.h> 41#include <linux/kallsyms.h> 42#include <linux/freezer.h> 43#include <linux/seq_file.h> 44#include <linux/debugfs.h> 45#include <linux/sysctl.h> 46#include <linux/kdebug.h> 47#include <linux/memory.h> 48#include <linux/ftrace.h> 49#include <linux/cpu.h> 50#include <linux/jump_label.h> 51 52#include <asm-generic/sections.h> 53#include <asm/cacheflush.h> 54#include <asm/errno.h> 55#include <asm/uaccess.h> 56 57#define KPROBE_HASH_BITS 6 58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 59 60 61/* 62 * Some oddball architectures like 64bit powerpc have function descriptors 63 * so this must be overridable. 64 */ 65#ifndef kprobe_lookup_name 66#define kprobe_lookup_name(name, addr) \ 67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 68#endif 69 70static int kprobes_initialized; 71static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 72static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 73 74/* NOTE: change this value only with kprobe_mutex held */ 75static bool kprobes_all_disarmed; 76 77/* This protects kprobe_table and optimizing_list */ 78static DEFINE_MUTEX(kprobe_mutex); 79static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 80static struct { 81 spinlock_t lock ____cacheline_aligned_in_smp; 82} kretprobe_table_locks[KPROBE_TABLE_SIZE]; 83 84static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 85{ 86 return &(kretprobe_table_locks[hash].lock); 87} 88 89/* 90 * Normally, functions that we'd want to prohibit kprobes in, are marked 91 * __kprobes. But, there are cases where such functions already belong to 92 * a different section (__sched for preempt_schedule) 93 * 94 * For such cases, we now have a blacklist 95 */ 96static struct kprobe_blackpoint kprobe_blacklist[] = { 97 {"preempt_schedule",}, 98 {"native_get_debugreg",}, 99 {"irq_entries_start",}, 100 {"common_interrupt",}, 101 {"mcount",}, /* mcount can be called from everywhere */ 102 {NULL} /* Terminator */ 103}; 104 105#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 106/* 107 * kprobe->ainsn.insn points to the copy of the instruction to be 108 * single-stepped. x86_64, POWER4 and above have no-exec support and 109 * stepping on the instruction on a vmalloced/kmalloced/data page 110 * is a recipe for disaster 111 */ 112struct kprobe_insn_page { 113 struct list_head list; 114 kprobe_opcode_t *insns; /* Page of instruction slots */ 115 int nused; 116 int ngarbage; 117 char slot_used[]; 118}; 119 120#define KPROBE_INSN_PAGE_SIZE(slots) \ 121 (offsetof(struct kprobe_insn_page, slot_used) + \ 122 (sizeof(char) * (slots))) 123 124struct kprobe_insn_cache { 125 struct list_head pages; /* list of kprobe_insn_page */ 126 size_t insn_size; /* size of instruction slot */ 127 int nr_garbage; 128}; 129 130static int slots_per_page(struct kprobe_insn_cache *c) 131{ 132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 133} 134 135enum kprobe_slot_state { 136 SLOT_CLEAN = 0, 137 SLOT_DIRTY = 1, 138 SLOT_USED = 2, 139}; 140 141static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */ 142static struct kprobe_insn_cache kprobe_insn_slots = { 143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 144 .insn_size = MAX_INSN_SIZE, 145 .nr_garbage = 0, 146}; 147static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); 148 149/** 150 * __get_insn_slot() - Find a slot on an executable page for an instruction. 151 * We allocate an executable page if there's no room on existing ones. 152 */ 153static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 154{ 155 struct kprobe_insn_page *kip; 156 157 retry: 158 list_for_each_entry(kip, &c->pages, list) { 159 if (kip->nused < slots_per_page(c)) { 160 int i; 161 for (i = 0; i < slots_per_page(c); i++) { 162 if (kip->slot_used[i] == SLOT_CLEAN) { 163 kip->slot_used[i] = SLOT_USED; 164 kip->nused++; 165 return kip->insns + (i * c->insn_size); 166 } 167 } 168 /* kip->nused is broken. Fix it. */ 169 kip->nused = slots_per_page(c); 170 WARN_ON(1); 171 } 172 } 173 174 /* If there are any garbage slots, collect it and try again. */ 175 if (c->nr_garbage && collect_garbage_slots(c) == 0) 176 goto retry; 177 178 /* All out of space. Need to allocate a new page. */ 179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 180 if (!kip) 181 return NULL; 182 183 /* 184 * Use module_alloc so this page is within +/- 2GB of where the 185 * kernel image and loaded module images reside. This is required 186 * so x86_64 can correctly handle the %rip-relative fixups. 187 */ 188 kip->insns = module_alloc(PAGE_SIZE); 189 if (!kip->insns) { 190 kfree(kip); 191 return NULL; 192 } 193 INIT_LIST_HEAD(&kip->list); 194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 195 kip->slot_used[0] = SLOT_USED; 196 kip->nused = 1; 197 kip->ngarbage = 0; 198 list_add(&kip->list, &c->pages); 199 return kip->insns; 200} 201 202 203kprobe_opcode_t __kprobes *get_insn_slot(void) 204{ 205 kprobe_opcode_t *ret = NULL; 206 207 mutex_lock(&kprobe_insn_mutex); 208 ret = __get_insn_slot(&kprobe_insn_slots); 209 mutex_unlock(&kprobe_insn_mutex); 210 211 return ret; 212} 213 214/* Return 1 if all garbages are collected, otherwise 0. */ 215static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 216{ 217 kip->slot_used[idx] = SLOT_CLEAN; 218 kip->nused--; 219 if (kip->nused == 0) { 220 /* 221 * Page is no longer in use. Free it unless 222 * it's the last one. We keep the last one 223 * so as not to have to set it up again the 224 * next time somebody inserts a probe. 225 */ 226 if (!list_is_singular(&kip->list)) { 227 list_del(&kip->list); 228 module_free(NULL, kip->insns); 229 kfree(kip); 230 } 231 return 1; 232 } 233 return 0; 234} 235 236static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) 237{ 238 struct kprobe_insn_page *kip, *next; 239 240 /* Ensure no-one is interrupted on the garbages */ 241 synchronize_sched(); 242 243 list_for_each_entry_safe(kip, next, &c->pages, list) { 244 int i; 245 if (kip->ngarbage == 0) 246 continue; 247 kip->ngarbage = 0; /* we will collect all garbages */ 248 for (i = 0; i < slots_per_page(c); i++) { 249 if (kip->slot_used[i] == SLOT_DIRTY && 250 collect_one_slot(kip, i)) 251 break; 252 } 253 } 254 c->nr_garbage = 0; 255 return 0; 256} 257 258static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 259 kprobe_opcode_t *slot, int dirty) 260{ 261 struct kprobe_insn_page *kip; 262 263 list_for_each_entry(kip, &c->pages, list) { 264 long idx = ((long)slot - (long)kip->insns) / 265 (c->insn_size * sizeof(kprobe_opcode_t)); 266 if (idx >= 0 && idx < slots_per_page(c)) { 267 WARN_ON(kip->slot_used[idx] != SLOT_USED); 268 if (dirty) { 269 kip->slot_used[idx] = SLOT_DIRTY; 270 kip->ngarbage++; 271 if (++c->nr_garbage > slots_per_page(c)) 272 collect_garbage_slots(c); 273 } else 274 collect_one_slot(kip, idx); 275 return; 276 } 277 } 278 /* Could not free this slot. */ 279 WARN_ON(1); 280} 281 282void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 283{ 284 mutex_lock(&kprobe_insn_mutex); 285 __free_insn_slot(&kprobe_insn_slots, slot, dirty); 286 mutex_unlock(&kprobe_insn_mutex); 287} 288#ifdef CONFIG_OPTPROBES 289/* For optimized_kprobe buffer */ 290static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */ 291static struct kprobe_insn_cache kprobe_optinsn_slots = { 292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 293 /* .insn_size is initialized later */ 294 .nr_garbage = 0, 295}; 296/* Get a slot for optimized_kprobe buffer */ 297kprobe_opcode_t __kprobes *get_optinsn_slot(void) 298{ 299 kprobe_opcode_t *ret = NULL; 300 301 mutex_lock(&kprobe_optinsn_mutex); 302 ret = __get_insn_slot(&kprobe_optinsn_slots); 303 mutex_unlock(&kprobe_optinsn_mutex); 304 305 return ret; 306} 307 308void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty) 309{ 310 mutex_lock(&kprobe_optinsn_mutex); 311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty); 312 mutex_unlock(&kprobe_optinsn_mutex); 313} 314#endif 315#endif 316 317/* We have preemption disabled.. so it is safe to use __ versions */ 318static inline void set_kprobe_instance(struct kprobe *kp) 319{ 320 __get_cpu_var(kprobe_instance) = kp; 321} 322 323static inline void reset_kprobe_instance(void) 324{ 325 __get_cpu_var(kprobe_instance) = NULL; 326} 327 328/* 329 * This routine is called either: 330 * - under the kprobe_mutex - during kprobe_[un]register() 331 * OR 332 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 333 */ 334struct kprobe __kprobes *get_kprobe(void *addr) 335{ 336 struct hlist_head *head; 337 struct hlist_node *node; 338 struct kprobe *p; 339 340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 341 hlist_for_each_entry_rcu(p, node, head, hlist) { 342 if (p->addr == addr) 343 return p; 344 } 345 346 return NULL; 347} 348 349static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 350 351/* Return true if the kprobe is an aggregator */ 352static inline int kprobe_aggrprobe(struct kprobe *p) 353{ 354 return p->pre_handler == aggr_pre_handler; 355} 356 357/* 358 * Keep all fields in the kprobe consistent 359 */ 360static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 361{ 362 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 363 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 364} 365 366#ifdef CONFIG_OPTPROBES 367/* NOTE: change this value only with kprobe_mutex held */ 368static bool kprobes_allow_optimization; 369 370/* 371 * Call all pre_handler on the list, but ignores its return value. 372 * This must be called from arch-dep optimized caller. 373 */ 374void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 375{ 376 struct kprobe *kp; 377 378 list_for_each_entry_rcu(kp, &p->list, list) { 379 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 380 set_kprobe_instance(kp); 381 kp->pre_handler(kp, regs); 382 } 383 reset_kprobe_instance(); 384 } 385} 386 387/* Return true(!0) if the kprobe is ready for optimization. */ 388static inline int kprobe_optready(struct kprobe *p) 389{ 390 struct optimized_kprobe *op; 391 392 if (kprobe_aggrprobe(p)) { 393 op = container_of(p, struct optimized_kprobe, kp); 394 return arch_prepared_optinsn(&op->optinsn); 395 } 396 397 return 0; 398} 399 400/* 401 * Return an optimized kprobe whose optimizing code replaces 402 * instructions including addr (exclude breakpoint). 403 */ 404static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) 405{ 406 int i; 407 struct kprobe *p = NULL; 408 struct optimized_kprobe *op; 409 410 /* Don't check i == 0, since that is a breakpoint case. */ 411 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 412 p = get_kprobe((void *)(addr - i)); 413 414 if (p && kprobe_optready(p)) { 415 op = container_of(p, struct optimized_kprobe, kp); 416 if (arch_within_optimized_kprobe(op, addr)) 417 return p; 418 } 419 420 return NULL; 421} 422 423/* Optimization staging list, protected by kprobe_mutex */ 424static LIST_HEAD(optimizing_list); 425 426static void kprobe_optimizer(struct work_struct *work); 427static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 428#define OPTIMIZE_DELAY 5 429 430/* Kprobe jump optimizer */ 431static __kprobes void kprobe_optimizer(struct work_struct *work) 432{ 433 struct optimized_kprobe *op, *tmp; 434 435 /* Lock modules while optimizing kprobes */ 436 mutex_lock(&module_mutex); 437 mutex_lock(&kprobe_mutex); 438 if (kprobes_all_disarmed || !kprobes_allow_optimization) 439 goto end; 440 441 /* 442 * Wait for quiesence period to ensure all running interrupts 443 * are done. Because optprobe may modify multiple instructions 444 * there is a chance that Nth instruction is interrupted. In that 445 * case, running interrupt can return to 2nd-Nth byte of jump 446 * instruction. This wait is for avoiding it. 447 */ 448 synchronize_sched(); 449 450 /* 451 * The optimization/unoptimization refers online_cpus via 452 * stop_machine() and cpu-hotplug modifies online_cpus. 453 * And same time, text_mutex will be held in cpu-hotplug and here. 454 * This combination can cause a deadlock (cpu-hotplug try to lock 455 * text_mutex but stop_machine can not be done because online_cpus 456 * has been changed) 457 * To avoid this deadlock, we need to call get_online_cpus() 458 * for preventing cpu-hotplug outside of text_mutex locking. 459 */ 460 get_online_cpus(); 461 mutex_lock(&text_mutex); 462 list_for_each_entry_safe(op, tmp, &optimizing_list, list) { 463 WARN_ON(kprobe_disabled(&op->kp)); 464 if (arch_optimize_kprobe(op) < 0) 465 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 466 list_del_init(&op->list); 467 } 468 mutex_unlock(&text_mutex); 469 put_online_cpus(); 470end: 471 mutex_unlock(&kprobe_mutex); 472 mutex_unlock(&module_mutex); 473} 474 475/* Optimize kprobe if p is ready to be optimized */ 476static __kprobes void optimize_kprobe(struct kprobe *p) 477{ 478 struct optimized_kprobe *op; 479 480 /* Check if the kprobe is disabled or not ready for optimization. */ 481 if (!kprobe_optready(p) || !kprobes_allow_optimization || 482 (kprobe_disabled(p) || kprobes_all_disarmed)) 483 return; 484 485 /* Both of break_handler and post_handler are not supported. */ 486 if (p->break_handler || p->post_handler) 487 return; 488 489 op = container_of(p, struct optimized_kprobe, kp); 490 491 /* Check there is no other kprobes at the optimized instructions */ 492 if (arch_check_optimized_kprobe(op) < 0) 493 return; 494 495 /* Check if it is already optimized. */ 496 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) 497 return; 498 499 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 500 list_add(&op->list, &optimizing_list); 501 if (!delayed_work_pending(&optimizing_work)) 502 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 503} 504 505/* Unoptimize a kprobe if p is optimized */ 506static __kprobes void unoptimize_kprobe(struct kprobe *p) 507{ 508 struct optimized_kprobe *op; 509 510 if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) { 511 op = container_of(p, struct optimized_kprobe, kp); 512 if (!list_empty(&op->list)) 513 /* Dequeue from the optimization queue */ 514 list_del_init(&op->list); 515 else 516 /* Replace jump with break */ 517 arch_unoptimize_kprobe(op); 518 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 519 } 520} 521 522/* Remove optimized instructions */ 523static void __kprobes kill_optimized_kprobe(struct kprobe *p) 524{ 525 struct optimized_kprobe *op; 526 527 op = container_of(p, struct optimized_kprobe, kp); 528 if (!list_empty(&op->list)) { 529 /* Dequeue from the optimization queue */ 530 list_del_init(&op->list); 531 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 532 } 533 /* Don't unoptimize, because the target code will be freed. */ 534 arch_remove_optimized_kprobe(op); 535} 536 537/* Try to prepare optimized instructions */ 538static __kprobes void prepare_optimized_kprobe(struct kprobe *p) 539{ 540 struct optimized_kprobe *op; 541 542 op = container_of(p, struct optimized_kprobe, kp); 543 arch_prepare_optimized_kprobe(op); 544} 545 546/* Free optimized instructions and optimized_kprobe */ 547static __kprobes void free_aggr_kprobe(struct kprobe *p) 548{ 549 struct optimized_kprobe *op; 550 551 op = container_of(p, struct optimized_kprobe, kp); 552 arch_remove_optimized_kprobe(op); 553 kfree(op); 554} 555 556/* Allocate new optimized_kprobe and try to prepare optimized instructions */ 557static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 558{ 559 struct optimized_kprobe *op; 560 561 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 562 if (!op) 563 return NULL; 564 565 INIT_LIST_HEAD(&op->list); 566 op->kp.addr = p->addr; 567 arch_prepare_optimized_kprobe(op); 568 569 return &op->kp; 570} 571 572static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 573 574/* 575 * Prepare an optimized_kprobe and optimize it 576 * NOTE: p must be a normal registered kprobe 577 */ 578static __kprobes void try_to_optimize_kprobe(struct kprobe *p) 579{ 580 struct kprobe *ap; 581 struct optimized_kprobe *op; 582 583 ap = alloc_aggr_kprobe(p); 584 if (!ap) 585 return; 586 587 op = container_of(ap, struct optimized_kprobe, kp); 588 if (!arch_prepared_optinsn(&op->optinsn)) { 589 /* If failed to setup optimizing, fallback to kprobe */ 590 free_aggr_kprobe(ap); 591 return; 592 } 593 594 init_aggr_kprobe(ap, p); 595 optimize_kprobe(ap); 596} 597 598#ifdef CONFIG_SYSCTL 599/* This should be called with kprobe_mutex locked */ 600static void __kprobes optimize_all_kprobes(void) 601{ 602 struct hlist_head *head; 603 struct hlist_node *node; 604 struct kprobe *p; 605 unsigned int i; 606 607 /* If optimization is already allowed, just return */ 608 if (kprobes_allow_optimization) 609 return; 610 611 kprobes_allow_optimization = true; 612 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 613 head = &kprobe_table[i]; 614 hlist_for_each_entry_rcu(p, node, head, hlist) 615 if (!kprobe_disabled(p)) 616 optimize_kprobe(p); 617 } 618 printk(KERN_INFO "Kprobes globally optimized\n"); 619} 620 621/* This should be called with kprobe_mutex locked */ 622static void __kprobes unoptimize_all_kprobes(void) 623{ 624 struct hlist_head *head; 625 struct hlist_node *node; 626 struct kprobe *p; 627 unsigned int i; 628 629 /* If optimization is already prohibited, just return */ 630 if (!kprobes_allow_optimization) 631 return; 632 633 kprobes_allow_optimization = false; 634 printk(KERN_INFO "Kprobes globally unoptimized\n"); 635 get_online_cpus(); /* For avoiding text_mutex deadlock */ 636 mutex_lock(&text_mutex); 637 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 638 head = &kprobe_table[i]; 639 hlist_for_each_entry_rcu(p, node, head, hlist) { 640 if (!kprobe_disabled(p)) 641 unoptimize_kprobe(p); 642 } 643 } 644 645 mutex_unlock(&text_mutex); 646 put_online_cpus(); 647 /* Allow all currently running kprobes to complete */ 648 synchronize_sched(); 649} 650 651int sysctl_kprobes_optimization; 652int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 653 void __user *buffer, size_t *length, 654 loff_t *ppos) 655{ 656 int ret; 657 658 mutex_lock(&kprobe_mutex); 659 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 660 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 661 662 if (sysctl_kprobes_optimization) 663 optimize_all_kprobes(); 664 else 665 unoptimize_all_kprobes(); 666 mutex_unlock(&kprobe_mutex); 667 668 return ret; 669} 670#endif /* CONFIG_SYSCTL */ 671 672static void __kprobes __arm_kprobe(struct kprobe *p) 673{ 674 struct kprobe *_p; 675 676 /* Check collision with other optimized kprobes */ 677 _p = get_optimized_kprobe((unsigned long)p->addr); 678 if (unlikely(_p)) 679 unoptimize_kprobe(_p); /* Fallback to unoptimized kprobe */ 680 681 arch_arm_kprobe(p); 682 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 683} 684 685static void __kprobes __disarm_kprobe(struct kprobe *p) 686{ 687 struct kprobe *_p; 688 689 unoptimize_kprobe(p); /* Try to unoptimize */ 690 arch_disarm_kprobe(p); 691 692 /* If another kprobe was blocked, optimize it. */ 693 _p = get_optimized_kprobe((unsigned long)p->addr); 694 if (unlikely(_p)) 695 optimize_kprobe(_p); 696} 697 698#else /* !CONFIG_OPTPROBES */ 699 700#define optimize_kprobe(p) do {} while (0) 701#define unoptimize_kprobe(p) do {} while (0) 702#define kill_optimized_kprobe(p) do {} while (0) 703#define prepare_optimized_kprobe(p) do {} while (0) 704#define try_to_optimize_kprobe(p) do {} while (0) 705#define __arm_kprobe(p) arch_arm_kprobe(p) 706#define __disarm_kprobe(p) arch_disarm_kprobe(p) 707 708static __kprobes void free_aggr_kprobe(struct kprobe *p) 709{ 710 kfree(p); 711} 712 713static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 714{ 715 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 716} 717#endif /* CONFIG_OPTPROBES */ 718 719/* Arm a kprobe with text_mutex */ 720static void __kprobes arm_kprobe(struct kprobe *kp) 721{ 722 /* 723 * Here, since __arm_kprobe() doesn't use stop_machine(), 724 * this doesn't cause deadlock on text_mutex. So, we don't 725 * need get_online_cpus(). 726 */ 727 mutex_lock(&text_mutex); 728 __arm_kprobe(kp); 729 mutex_unlock(&text_mutex); 730} 731 732/* Disarm a kprobe with text_mutex */ 733static void __kprobes disarm_kprobe(struct kprobe *kp) 734{ 735 get_online_cpus(); /* For avoiding text_mutex deadlock */ 736 mutex_lock(&text_mutex); 737 __disarm_kprobe(kp); 738 mutex_unlock(&text_mutex); 739 put_online_cpus(); 740} 741 742/* 743 * Aggregate handlers for multiple kprobes support - these handlers 744 * take care of invoking the individual kprobe handlers on p->list 745 */ 746static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 747{ 748 struct kprobe *kp; 749 750 list_for_each_entry_rcu(kp, &p->list, list) { 751 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 752 set_kprobe_instance(kp); 753 if (kp->pre_handler(kp, regs)) 754 return 1; 755 } 756 reset_kprobe_instance(); 757 } 758 return 0; 759} 760 761static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 762 unsigned long flags) 763{ 764 struct kprobe *kp; 765 766 list_for_each_entry_rcu(kp, &p->list, list) { 767 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 768 set_kprobe_instance(kp); 769 kp->post_handler(kp, regs, flags); 770 reset_kprobe_instance(); 771 } 772 } 773} 774 775static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 776 int trapnr) 777{ 778 struct kprobe *cur = __get_cpu_var(kprobe_instance); 779 780 /* 781 * if we faulted "during" the execution of a user specified 782 * probe handler, invoke just that probe's fault handler 783 */ 784 if (cur && cur->fault_handler) { 785 if (cur->fault_handler(cur, regs, trapnr)) 786 return 1; 787 } 788 return 0; 789} 790 791static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 792{ 793 struct kprobe *cur = __get_cpu_var(kprobe_instance); 794 int ret = 0; 795 796 if (cur && cur->break_handler) { 797 if (cur->break_handler(cur, regs)) 798 ret = 1; 799 } 800 reset_kprobe_instance(); 801 return ret; 802} 803 804/* Walks the list and increments nmissed count for multiprobe case */ 805void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 806{ 807 struct kprobe *kp; 808 if (!kprobe_aggrprobe(p)) { 809 p->nmissed++; 810 } else { 811 list_for_each_entry_rcu(kp, &p->list, list) 812 kp->nmissed++; 813 } 814 return; 815} 816 817void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 818 struct hlist_head *head) 819{ 820 struct kretprobe *rp = ri->rp; 821 822 /* remove rp inst off the rprobe_inst_table */ 823 hlist_del(&ri->hlist); 824 INIT_HLIST_NODE(&ri->hlist); 825 if (likely(rp)) { 826 spin_lock(&rp->lock); 827 hlist_add_head(&ri->hlist, &rp->free_instances); 828 spin_unlock(&rp->lock); 829 } else 830 /* Unregistering */ 831 hlist_add_head(&ri->hlist, head); 832} 833 834void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 835 struct hlist_head **head, unsigned long *flags) 836__acquires(hlist_lock) 837{ 838 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 839 spinlock_t *hlist_lock; 840 841 *head = &kretprobe_inst_table[hash]; 842 hlist_lock = kretprobe_table_lock_ptr(hash); 843 spin_lock_irqsave(hlist_lock, *flags); 844} 845 846static void __kprobes kretprobe_table_lock(unsigned long hash, 847 unsigned long *flags) 848__acquires(hlist_lock) 849{ 850 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 851 spin_lock_irqsave(hlist_lock, *flags); 852} 853 854void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 855 unsigned long *flags) 856__releases(hlist_lock) 857{ 858 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 859 spinlock_t *hlist_lock; 860 861 hlist_lock = kretprobe_table_lock_ptr(hash); 862 spin_unlock_irqrestore(hlist_lock, *flags); 863} 864 865static void __kprobes kretprobe_table_unlock(unsigned long hash, 866 unsigned long *flags) 867__releases(hlist_lock) 868{ 869 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 870 spin_unlock_irqrestore(hlist_lock, *flags); 871} 872 873/* 874 * This function is called from finish_task_switch when task tk becomes dead, 875 * so that we can recycle any function-return probe instances associated 876 * with this task. These left over instances represent probed functions 877 * that have been called but will never return. 878 */ 879void __kprobes kprobe_flush_task(struct task_struct *tk) 880{ 881 struct kretprobe_instance *ri; 882 struct hlist_head *head, empty_rp; 883 struct hlist_node *node, *tmp; 884 unsigned long hash, flags = 0; 885 886 if (unlikely(!kprobes_initialized)) 887 /* Early boot. kretprobe_table_locks not yet initialized. */ 888 return; 889 890 hash = hash_ptr(tk, KPROBE_HASH_BITS); 891 head = &kretprobe_inst_table[hash]; 892 kretprobe_table_lock(hash, &flags); 893 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 894 if (ri->task == tk) 895 recycle_rp_inst(ri, &empty_rp); 896 } 897 kretprobe_table_unlock(hash, &flags); 898 INIT_HLIST_HEAD(&empty_rp); 899 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 900 hlist_del(&ri->hlist); 901 kfree(ri); 902 } 903} 904 905static inline void free_rp_inst(struct kretprobe *rp) 906{ 907 struct kretprobe_instance *ri; 908 struct hlist_node *pos, *next; 909 910 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 911 hlist_del(&ri->hlist); 912 kfree(ri); 913 } 914} 915 916static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 917{ 918 unsigned long flags, hash; 919 struct kretprobe_instance *ri; 920 struct hlist_node *pos, *next; 921 struct hlist_head *head; 922 923 /* No race here */ 924 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 925 kretprobe_table_lock(hash, &flags); 926 head = &kretprobe_inst_table[hash]; 927 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 928 if (ri->rp == rp) 929 ri->rp = NULL; 930 } 931 kretprobe_table_unlock(hash, &flags); 932 } 933 free_rp_inst(rp); 934} 935 936/* 937* Add the new probe to ap->list. Fail if this is the 938* second jprobe at the address - two jprobes can't coexist 939*/ 940static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 941{ 942 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 943 944 if (p->break_handler || p->post_handler) 945 unoptimize_kprobe(ap); /* Fall back to normal kprobe */ 946 947 if (p->break_handler) { 948 if (ap->break_handler) 949 return -EEXIST; 950 list_add_tail_rcu(&p->list, &ap->list); 951 ap->break_handler = aggr_break_handler; 952 } else 953 list_add_rcu(&p->list, &ap->list); 954 if (p->post_handler && !ap->post_handler) 955 ap->post_handler = aggr_post_handler; 956 957 if (kprobe_disabled(ap) && !kprobe_disabled(p)) { 958 ap->flags &= ~KPROBE_FLAG_DISABLED; 959 if (!kprobes_all_disarmed) 960 /* Arm the breakpoint again. */ 961 __arm_kprobe(ap); 962 } 963 return 0; 964} 965 966/* 967 * Fill in the required fields of the "manager kprobe". Replace the 968 * earlier kprobe in the hlist with the manager kprobe 969 */ 970static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 971{ 972 /* Copy p's insn slot to ap */ 973 copy_kprobe(p, ap); 974 flush_insn_slot(ap); 975 ap->addr = p->addr; 976 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 977 ap->pre_handler = aggr_pre_handler; 978 ap->fault_handler = aggr_fault_handler; 979 /* We don't care the kprobe which has gone. */ 980 if (p->post_handler && !kprobe_gone(p)) 981 ap->post_handler = aggr_post_handler; 982 if (p->break_handler && !kprobe_gone(p)) 983 ap->break_handler = aggr_break_handler; 984 985 INIT_LIST_HEAD(&ap->list); 986 INIT_HLIST_NODE(&ap->hlist); 987 988 list_add_rcu(&p->list, &ap->list); 989 hlist_replace_rcu(&p->hlist, &ap->hlist); 990} 991 992/* 993 * This is the second or subsequent kprobe at the address - handle 994 * the intricacies 995 */ 996static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, 997 struct kprobe *p) 998{ 999 int ret = 0; 1000 struct kprobe *ap = orig_p; 1001 1002 if (!kprobe_aggrprobe(orig_p)) { 1003 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1004 ap = alloc_aggr_kprobe(orig_p); 1005 if (!ap) 1006 return -ENOMEM; 1007 init_aggr_kprobe(ap, orig_p); 1008 } 1009 1010 if (kprobe_gone(ap)) { 1011 /* 1012 * Attempting to insert new probe at the same location that 1013 * had a probe in the module vaddr area which already 1014 * freed. So, the instruction slot has already been 1015 * released. We need a new slot for the new probe. 1016 */ 1017 ret = arch_prepare_kprobe(ap); 1018 if (ret) 1019 /* 1020 * Even if fail to allocate new slot, don't need to 1021 * free aggr_probe. It will be used next time, or 1022 * freed by unregister_kprobe. 1023 */ 1024 return ret; 1025 1026 /* Prepare optimized instructions if possible. */ 1027 prepare_optimized_kprobe(ap); 1028 1029 /* 1030 * Clear gone flag to prevent allocating new slot again, and 1031 * set disabled flag because it is not armed yet. 1032 */ 1033 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1034 | KPROBE_FLAG_DISABLED; 1035 } 1036 1037 /* Copy ap's insn slot to p */ 1038 copy_kprobe(ap, p); 1039 return add_new_kprobe(ap, p); 1040} 1041 1042static int __kprobes in_kprobes_functions(unsigned long addr) 1043{ 1044 struct kprobe_blackpoint *kb; 1045 1046 if (addr >= (unsigned long)__kprobes_text_start && 1047 addr < (unsigned long)__kprobes_text_end) 1048 return -EINVAL; 1049 /* 1050 * If there exists a kprobe_blacklist, verify and 1051 * fail any probe registration in the prohibited area 1052 */ 1053 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1054 if (kb->start_addr) { 1055 if (addr >= kb->start_addr && 1056 addr < (kb->start_addr + kb->range)) 1057 return -EINVAL; 1058 } 1059 } 1060 return 0; 1061} 1062 1063/* 1064 * If we have a symbol_name argument, look it up and add the offset field 1065 * to it. This way, we can specify a relative address to a symbol. 1066 */ 1067static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 1068{ 1069 kprobe_opcode_t *addr = p->addr; 1070 if (p->symbol_name) { 1071 if (addr) 1072 return NULL; 1073 kprobe_lookup_name(p->symbol_name, addr); 1074 } 1075 1076 if (!addr) 1077 return NULL; 1078 return (kprobe_opcode_t *)(((char *)addr) + p->offset); 1079} 1080 1081/* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1082static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 1083{ 1084 struct kprobe *ap, *list_p; 1085 1086 ap = get_kprobe(p->addr); 1087 if (unlikely(!ap)) 1088 return NULL; 1089 1090 if (p != ap) { 1091 list_for_each_entry_rcu(list_p, &ap->list, list) 1092 if (list_p == p) 1093 /* kprobe p is a valid probe */ 1094 goto valid; 1095 return NULL; 1096 } 1097valid: 1098 return ap; 1099} 1100 1101/* Return error if the kprobe is being re-registered */ 1102static inline int check_kprobe_rereg(struct kprobe *p) 1103{ 1104 int ret = 0; 1105 1106 mutex_lock(&kprobe_mutex); 1107 if (__get_valid_kprobe(p)) 1108 ret = -EINVAL; 1109 mutex_unlock(&kprobe_mutex); 1110 1111 return ret; 1112} 1113 1114int __kprobes register_kprobe(struct kprobe *p) 1115{ 1116 int ret = 0; 1117 struct kprobe *old_p; 1118 struct module *probed_mod; 1119 kprobe_opcode_t *addr; 1120 1121 addr = kprobe_addr(p); 1122 if (!addr) 1123 return -EINVAL; 1124 p->addr = addr; 1125 1126 ret = check_kprobe_rereg(p); 1127 if (ret) 1128 return ret; 1129 1130 jump_label_lock(); 1131 preempt_disable(); 1132 if (!kernel_text_address((unsigned long) p->addr) || 1133 in_kprobes_functions((unsigned long) p->addr) || 1134 ftrace_text_reserved(p->addr, p->addr) || 1135 jump_label_text_reserved(p->addr, p->addr)) 1136 goto fail_with_jump_label; 1137 1138 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1139 p->flags &= KPROBE_FLAG_DISABLED; 1140 1141 /* 1142 * Check if are we probing a module. 1143 */ 1144 probed_mod = __module_text_address((unsigned long) p->addr); 1145 if (probed_mod) { 1146 /* 1147 * We must hold a refcount of the probed module while updating 1148 * its code to prohibit unexpected unloading. 1149 */ 1150 if (unlikely(!try_module_get(probed_mod))) 1151 goto fail_with_jump_label; 1152 1153 /* 1154 * If the module freed .init.text, we couldn't insert 1155 * kprobes in there. 1156 */ 1157 if (within_module_init((unsigned long)p->addr, probed_mod) && 1158 probed_mod->state != MODULE_STATE_COMING) { 1159 module_put(probed_mod); 1160 goto fail_with_jump_label; 1161 } 1162 } 1163 preempt_enable(); 1164 jump_label_unlock(); 1165 1166 p->nmissed = 0; 1167 INIT_LIST_HEAD(&p->list); 1168 mutex_lock(&kprobe_mutex); 1169 1170 jump_label_lock(); /* needed to call jump_label_text_reserved() */ 1171 1172 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1173 mutex_lock(&text_mutex); 1174 1175 old_p = get_kprobe(p->addr); 1176 if (old_p) { 1177 /* Since this may unoptimize old_p, locking text_mutex. */ 1178 ret = register_aggr_kprobe(old_p, p); 1179 goto out; 1180 } 1181 1182 ret = arch_prepare_kprobe(p); 1183 if (ret) 1184 goto out; 1185 1186 INIT_HLIST_NODE(&p->hlist); 1187 hlist_add_head_rcu(&p->hlist, 1188 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1189 1190 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1191 __arm_kprobe(p); 1192 1193 /* Try to optimize kprobe */ 1194 try_to_optimize_kprobe(p); 1195 1196out: 1197 mutex_unlock(&text_mutex); 1198 put_online_cpus(); 1199 jump_label_unlock(); 1200 mutex_unlock(&kprobe_mutex); 1201 1202 if (probed_mod) 1203 module_put(probed_mod); 1204 1205 return ret; 1206 1207fail_with_jump_label: 1208 preempt_enable(); 1209 jump_label_unlock(); 1210 return -EINVAL; 1211} 1212EXPORT_SYMBOL_GPL(register_kprobe); 1213 1214/* Check if all probes on the aggrprobe are disabled */ 1215static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) 1216{ 1217 struct kprobe *kp; 1218 1219 list_for_each_entry_rcu(kp, &ap->list, list) 1220 if (!kprobe_disabled(kp)) 1221 /* 1222 * There is an active probe on the list. 1223 * We can't disable this ap. 1224 */ 1225 return 0; 1226 1227 return 1; 1228} 1229 1230/* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1231static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) 1232{ 1233 struct kprobe *orig_p; 1234 1235 /* Get an original kprobe for return */ 1236 orig_p = __get_valid_kprobe(p); 1237 if (unlikely(orig_p == NULL)) 1238 return NULL; 1239 1240 if (!kprobe_disabled(p)) { 1241 /* Disable probe if it is a child probe */ 1242 if (p != orig_p) 1243 p->flags |= KPROBE_FLAG_DISABLED; 1244 1245 /* Try to disarm and disable this/parent probe */ 1246 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1247 disarm_kprobe(orig_p); 1248 orig_p->flags |= KPROBE_FLAG_DISABLED; 1249 } 1250 } 1251 1252 return orig_p; 1253} 1254 1255/* 1256 * Unregister a kprobe without a scheduler synchronization. 1257 */ 1258static int __kprobes __unregister_kprobe_top(struct kprobe *p) 1259{ 1260 struct kprobe *ap, *list_p; 1261 1262 /* Disable kprobe. This will disarm it if needed. */ 1263 ap = __disable_kprobe(p); 1264 if (ap == NULL) 1265 return -EINVAL; 1266 1267 if (ap == p) 1268 /* 1269 * This probe is an independent(and non-optimized) kprobe 1270 * (not an aggrprobe). Remove from the hash list. 1271 */ 1272 goto disarmed; 1273 1274 /* Following process expects this probe is an aggrprobe */ 1275 WARN_ON(!kprobe_aggrprobe(ap)); 1276 1277 if (list_is_singular(&ap->list)) 1278 /* This probe is the last child of aggrprobe */ 1279 goto disarmed; 1280 else { 1281 /* If disabling probe has special handlers, update aggrprobe */ 1282 if (p->break_handler && !kprobe_gone(p)) 1283 ap->break_handler = NULL; 1284 if (p->post_handler && !kprobe_gone(p)) { 1285 list_for_each_entry_rcu(list_p, &ap->list, list) { 1286 if ((list_p != p) && (list_p->post_handler)) 1287 goto noclean; 1288 } 1289 ap->post_handler = NULL; 1290 } 1291noclean: 1292 /* 1293 * Remove from the aggrprobe: this path will do nothing in 1294 * __unregister_kprobe_bottom(). 1295 */ 1296 list_del_rcu(&p->list); 1297 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1298 /* 1299 * Try to optimize this probe again, because post 1300 * handler may have been changed. 1301 */ 1302 optimize_kprobe(ap); 1303 } 1304 return 0; 1305 1306disarmed: 1307 hlist_del_rcu(&ap->hlist); 1308 return 0; 1309} 1310 1311static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 1312{ 1313 struct kprobe *ap; 1314 1315 if (list_empty(&p->list)) 1316 arch_remove_kprobe(p); 1317 else if (list_is_singular(&p->list)) { 1318 /* "p" is the last child of an aggr_kprobe */ 1319 ap = list_entry(p->list.next, struct kprobe, list); 1320 list_del(&p->list); 1321 arch_remove_kprobe(ap); 1322 free_aggr_kprobe(ap); 1323 } 1324} 1325 1326int __kprobes register_kprobes(struct kprobe **kps, int num) 1327{ 1328 int i, ret = 0; 1329 1330 if (num <= 0) 1331 return -EINVAL; 1332 for (i = 0; i < num; i++) { 1333 ret = register_kprobe(kps[i]); 1334 if (ret < 0) { 1335 if (i > 0) 1336 unregister_kprobes(kps, i); 1337 break; 1338 } 1339 } 1340 return ret; 1341} 1342EXPORT_SYMBOL_GPL(register_kprobes); 1343 1344void __kprobes unregister_kprobe(struct kprobe *p) 1345{ 1346 unregister_kprobes(&p, 1); 1347} 1348EXPORT_SYMBOL_GPL(unregister_kprobe); 1349 1350void __kprobes unregister_kprobes(struct kprobe **kps, int num) 1351{ 1352 int i; 1353 1354 if (num <= 0) 1355 return; 1356 mutex_lock(&kprobe_mutex); 1357 for (i = 0; i < num; i++) 1358 if (__unregister_kprobe_top(kps[i]) < 0) 1359 kps[i]->addr = NULL; 1360 mutex_unlock(&kprobe_mutex); 1361 1362 synchronize_sched(); 1363 for (i = 0; i < num; i++) 1364 if (kps[i]->addr) 1365 __unregister_kprobe_bottom(kps[i]); 1366} 1367EXPORT_SYMBOL_GPL(unregister_kprobes); 1368 1369static struct notifier_block kprobe_exceptions_nb = { 1370 .notifier_call = kprobe_exceptions_notify, 1371 .priority = 0x7fffffff /* we need to be notified first */ 1372}; 1373 1374unsigned long __weak arch_deref_entry_point(void *entry) 1375{ 1376 return (unsigned long)entry; 1377} 1378 1379int __kprobes register_jprobes(struct jprobe **jps, int num) 1380{ 1381 struct jprobe *jp; 1382 int ret = 0, i; 1383 1384 if (num <= 0) 1385 return -EINVAL; 1386 for (i = 0; i < num; i++) { 1387 unsigned long addr, offset; 1388 jp = jps[i]; 1389 addr = arch_deref_entry_point(jp->entry); 1390 1391 /* Verify probepoint is a function entry point */ 1392 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && 1393 offset == 0) { 1394 jp->kp.pre_handler = setjmp_pre_handler; 1395 jp->kp.break_handler = longjmp_break_handler; 1396 ret = register_kprobe(&jp->kp); 1397 } else 1398 ret = -EINVAL; 1399 1400 if (ret < 0) { 1401 if (i > 0) 1402 unregister_jprobes(jps, i); 1403 break; 1404 } 1405 } 1406 return ret; 1407} 1408EXPORT_SYMBOL_GPL(register_jprobes); 1409 1410int __kprobes register_jprobe(struct jprobe *jp) 1411{ 1412 return register_jprobes(&jp, 1); 1413} 1414EXPORT_SYMBOL_GPL(register_jprobe); 1415 1416void __kprobes unregister_jprobe(struct jprobe *jp) 1417{ 1418 unregister_jprobes(&jp, 1); 1419} 1420EXPORT_SYMBOL_GPL(unregister_jprobe); 1421 1422void __kprobes unregister_jprobes(struct jprobe **jps, int num) 1423{ 1424 int i; 1425 1426 if (num <= 0) 1427 return; 1428 mutex_lock(&kprobe_mutex); 1429 for (i = 0; i < num; i++) 1430 if (__unregister_kprobe_top(&jps[i]->kp) < 0) 1431 jps[i]->kp.addr = NULL; 1432 mutex_unlock(&kprobe_mutex); 1433 1434 synchronize_sched(); 1435 for (i = 0; i < num; i++) { 1436 if (jps[i]->kp.addr) 1437 __unregister_kprobe_bottom(&jps[i]->kp); 1438 } 1439} 1440EXPORT_SYMBOL_GPL(unregister_jprobes); 1441 1442#ifdef CONFIG_KRETPROBES 1443/* 1444 * This kprobe pre_handler is registered with every kretprobe. When probe 1445 * hits it will set up the return probe. 1446 */ 1447static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1448 struct pt_regs *regs) 1449{ 1450 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1451 unsigned long hash, flags = 0; 1452 struct kretprobe_instance *ri; 1453 1454 /*TODO: consider to only swap the RA after the last pre_handler fired */ 1455 hash = hash_ptr(current, KPROBE_HASH_BITS); 1456 spin_lock_irqsave(&rp->lock, flags); 1457 if (!hlist_empty(&rp->free_instances)) { 1458 ri = hlist_entry(rp->free_instances.first, 1459 struct kretprobe_instance, hlist); 1460 hlist_del(&ri->hlist); 1461 spin_unlock_irqrestore(&rp->lock, flags); 1462 1463 ri->rp = rp; 1464 ri->task = current; 1465 1466 if (rp->entry_handler && rp->entry_handler(ri, regs)) 1467 return 0; 1468 1469 arch_prepare_kretprobe(ri, regs); 1470 1471 /* XXX(hch): why is there no hlist_move_head? */ 1472 INIT_HLIST_NODE(&ri->hlist); 1473 kretprobe_table_lock(hash, &flags); 1474 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 1475 kretprobe_table_unlock(hash, &flags); 1476 } else { 1477 rp->nmissed++; 1478 spin_unlock_irqrestore(&rp->lock, flags); 1479 } 1480 return 0; 1481} 1482 1483int __kprobes register_kretprobe(struct kretprobe *rp) 1484{ 1485 int ret = 0; 1486 struct kretprobe_instance *inst; 1487 int i; 1488 void *addr; 1489 1490 if (kretprobe_blacklist_size) { 1491 addr = kprobe_addr(&rp->kp); 1492 if (!addr) 1493 return -EINVAL; 1494 1495 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1496 if (kretprobe_blacklist[i].addr == addr) 1497 return -EINVAL; 1498 } 1499 } 1500 1501 rp->kp.pre_handler = pre_handler_kretprobe; 1502 rp->kp.post_handler = NULL; 1503 rp->kp.fault_handler = NULL; 1504 rp->kp.break_handler = NULL; 1505 1506 /* Pre-allocate memory for max kretprobe instances */ 1507 if (rp->maxactive <= 0) { 1508#ifdef CONFIG_PREEMPT 1509 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 1510#else 1511 rp->maxactive = num_possible_cpus(); 1512#endif 1513 } 1514 spin_lock_init(&rp->lock); 1515 INIT_HLIST_HEAD(&rp->free_instances); 1516 for (i = 0; i < rp->maxactive; i++) { 1517 inst = kmalloc(sizeof(struct kretprobe_instance) + 1518 rp->data_size, GFP_KERNEL); 1519 if (inst == NULL) { 1520 free_rp_inst(rp); 1521 return -ENOMEM; 1522 } 1523 INIT_HLIST_NODE(&inst->hlist); 1524 hlist_add_head(&inst->hlist, &rp->free_instances); 1525 } 1526 1527 rp->nmissed = 0; 1528 /* Establish function entry probe point */ 1529 ret = register_kprobe(&rp->kp); 1530 if (ret != 0) 1531 free_rp_inst(rp); 1532 return ret; 1533} 1534EXPORT_SYMBOL_GPL(register_kretprobe); 1535 1536int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1537{ 1538 int ret = 0, i; 1539 1540 if (num <= 0) 1541 return -EINVAL; 1542 for (i = 0; i < num; i++) { 1543 ret = register_kretprobe(rps[i]); 1544 if (ret < 0) { 1545 if (i > 0) 1546 unregister_kretprobes(rps, i); 1547 break; 1548 } 1549 } 1550 return ret; 1551} 1552EXPORT_SYMBOL_GPL(register_kretprobes); 1553 1554void __kprobes unregister_kretprobe(struct kretprobe *rp) 1555{ 1556 unregister_kretprobes(&rp, 1); 1557} 1558EXPORT_SYMBOL_GPL(unregister_kretprobe); 1559 1560void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1561{ 1562 int i; 1563 1564 if (num <= 0) 1565 return; 1566 mutex_lock(&kprobe_mutex); 1567 for (i = 0; i < num; i++) 1568 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1569 rps[i]->kp.addr = NULL; 1570 mutex_unlock(&kprobe_mutex); 1571 1572 synchronize_sched(); 1573 for (i = 0; i < num; i++) { 1574 if (rps[i]->kp.addr) { 1575 __unregister_kprobe_bottom(&rps[i]->kp); 1576 cleanup_rp_inst(rps[i]); 1577 } 1578 } 1579} 1580EXPORT_SYMBOL_GPL(unregister_kretprobes); 1581 1582#else /* CONFIG_KRETPROBES */ 1583int __kprobes register_kretprobe(struct kretprobe *rp) 1584{ 1585 return -ENOSYS; 1586} 1587EXPORT_SYMBOL_GPL(register_kretprobe); 1588 1589int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1590{ 1591 return -ENOSYS; 1592} 1593EXPORT_SYMBOL_GPL(register_kretprobes); 1594 1595void __kprobes unregister_kretprobe(struct kretprobe *rp) 1596{ 1597} 1598EXPORT_SYMBOL_GPL(unregister_kretprobe); 1599 1600void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1601{ 1602} 1603EXPORT_SYMBOL_GPL(unregister_kretprobes); 1604 1605static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1606 struct pt_regs *regs) 1607{ 1608 return 0; 1609} 1610 1611#endif /* CONFIG_KRETPROBES */ 1612 1613/* Set the kprobe gone and remove its instruction buffer. */ 1614static void __kprobes kill_kprobe(struct kprobe *p) 1615{ 1616 struct kprobe *kp; 1617 1618 p->flags |= KPROBE_FLAG_GONE; 1619 if (kprobe_aggrprobe(p)) { 1620 /* 1621 * If this is an aggr_kprobe, we have to list all the 1622 * chained probes and mark them GONE. 1623 */ 1624 list_for_each_entry_rcu(kp, &p->list, list) 1625 kp->flags |= KPROBE_FLAG_GONE; 1626 p->post_handler = NULL; 1627 p->break_handler = NULL; 1628 kill_optimized_kprobe(p); 1629 } 1630 /* 1631 * Here, we can remove insn_slot safely, because no thread calls 1632 * the original probed function (which will be freed soon) any more. 1633 */ 1634 arch_remove_kprobe(p); 1635} 1636 1637/* Disable one kprobe */ 1638int __kprobes disable_kprobe(struct kprobe *kp) 1639{ 1640 int ret = 0; 1641 1642 mutex_lock(&kprobe_mutex); 1643 1644 /* Disable this kprobe */ 1645 if (__disable_kprobe(kp) == NULL) 1646 ret = -EINVAL; 1647 1648 mutex_unlock(&kprobe_mutex); 1649 return ret; 1650} 1651EXPORT_SYMBOL_GPL(disable_kprobe); 1652 1653/* Enable one kprobe */ 1654int __kprobes enable_kprobe(struct kprobe *kp) 1655{ 1656 int ret = 0; 1657 struct kprobe *p; 1658 1659 mutex_lock(&kprobe_mutex); 1660 1661 /* Check whether specified probe is valid. */ 1662 p = __get_valid_kprobe(kp); 1663 if (unlikely(p == NULL)) { 1664 ret = -EINVAL; 1665 goto out; 1666 } 1667 1668 if (kprobe_gone(kp)) { 1669 /* This kprobe has gone, we couldn't enable it. */ 1670 ret = -EINVAL; 1671 goto out; 1672 } 1673 1674 if (p != kp) 1675 kp->flags &= ~KPROBE_FLAG_DISABLED; 1676 1677 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 1678 p->flags &= ~KPROBE_FLAG_DISABLED; 1679 arm_kprobe(p); 1680 } 1681out: 1682 mutex_unlock(&kprobe_mutex); 1683 return ret; 1684} 1685EXPORT_SYMBOL_GPL(enable_kprobe); 1686 1687void __kprobes dump_kprobe(struct kprobe *kp) 1688{ 1689 printk(KERN_WARNING "Dumping kprobe:\n"); 1690 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", 1691 kp->symbol_name, kp->addr, kp->offset); 1692} 1693 1694/* Module notifier call back, checking kprobes on the module */ 1695static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1696 unsigned long val, void *data) 1697{ 1698 struct module *mod = data; 1699 struct hlist_head *head; 1700 struct hlist_node *node; 1701 struct kprobe *p; 1702 unsigned int i; 1703 int checkcore = (val == MODULE_STATE_GOING); 1704 1705 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 1706 return NOTIFY_DONE; 1707 1708 /* 1709 * When MODULE_STATE_GOING was notified, both of module .text and 1710 * .init.text sections would be freed. When MODULE_STATE_LIVE was 1711 * notified, only .init.text section would be freed. We need to 1712 * disable kprobes which have been inserted in the sections. 1713 */ 1714 mutex_lock(&kprobe_mutex); 1715 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1716 head = &kprobe_table[i]; 1717 hlist_for_each_entry_rcu(p, node, head, hlist) 1718 if (within_module_init((unsigned long)p->addr, mod) || 1719 (checkcore && 1720 within_module_core((unsigned long)p->addr, mod))) { 1721 /* 1722 * The vaddr this probe is installed will soon 1723 * be vfreed buy not synced to disk. Hence, 1724 * disarming the breakpoint isn't needed. 1725 */ 1726 kill_kprobe(p); 1727 } 1728 } 1729 mutex_unlock(&kprobe_mutex); 1730 return NOTIFY_DONE; 1731} 1732 1733static struct notifier_block kprobe_module_nb = { 1734 .notifier_call = kprobes_module_callback, 1735 .priority = 0 1736}; 1737 1738static int __init init_kprobes(void) 1739{ 1740 int i, err = 0; 1741 unsigned long offset = 0, size = 0; 1742 char *modname, namebuf[128]; 1743 const char *symbol_name; 1744 void *addr; 1745 struct kprobe_blackpoint *kb; 1746 1747 /* FIXME allocate the probe table, currently defined statically */ 1748 /* initialize all list heads */ 1749 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1750 INIT_HLIST_HEAD(&kprobe_table[i]); 1751 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1752 spin_lock_init(&(kretprobe_table_locks[i].lock)); 1753 } 1754 1755 /* 1756 * Lookup and populate the kprobe_blacklist. 1757 * 1758 * Unlike the kretprobe blacklist, we'll need to determine 1759 * the range of addresses that belong to the said functions, 1760 * since a kprobe need not necessarily be at the beginning 1761 * of a function. 1762 */ 1763 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1764 kprobe_lookup_name(kb->name, addr); 1765 if (!addr) 1766 continue; 1767 1768 kb->start_addr = (unsigned long)addr; 1769 symbol_name = kallsyms_lookup(kb->start_addr, 1770 &size, &offset, &modname, namebuf); 1771 if (!symbol_name) 1772 kb->range = 0; 1773 else 1774 kb->range = size; 1775 } 1776 1777 if (kretprobe_blacklist_size) { 1778 /* lookup the function address from its name */ 1779 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1780 kprobe_lookup_name(kretprobe_blacklist[i].name, 1781 kretprobe_blacklist[i].addr); 1782 if (!kretprobe_blacklist[i].addr) 1783 printk("kretprobe: lookup failed: %s\n", 1784 kretprobe_blacklist[i].name); 1785 } 1786 } 1787 1788#if defined(CONFIG_OPTPROBES) 1789#if defined(__ARCH_WANT_KPROBES_INSN_SLOT) 1790 /* Init kprobe_optinsn_slots */ 1791 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 1792#endif 1793 /* By default, kprobes can be optimized */ 1794 kprobes_allow_optimization = true; 1795#endif 1796 1797 /* By default, kprobes are armed */ 1798 kprobes_all_disarmed = false; 1799 1800 err = arch_init_kprobes(); 1801 if (!err) 1802 err = register_die_notifier(&kprobe_exceptions_nb); 1803 if (!err) 1804 err = register_module_notifier(&kprobe_module_nb); 1805 1806 kprobes_initialized = (err == 0); 1807 1808 if (!err) 1809 init_test_probes(); 1810 return err; 1811} 1812 1813#ifdef CONFIG_DEBUG_FS 1814static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 1815 const char *sym, int offset, char *modname, struct kprobe *pp) 1816{ 1817 char *kprobe_type; 1818 1819 if (p->pre_handler == pre_handler_kretprobe) 1820 kprobe_type = "r"; 1821 else if (p->pre_handler == setjmp_pre_handler) 1822 kprobe_type = "j"; 1823 else 1824 kprobe_type = "k"; 1825 1826 if (sym) 1827 seq_printf(pi, "%p %s %s+0x%x %s ", 1828 p->addr, kprobe_type, sym, offset, 1829 (modname ? modname : " ")); 1830 else 1831 seq_printf(pi, "%p %s %p ", 1832 p->addr, kprobe_type, p->addr); 1833 1834 if (!pp) 1835 pp = p; 1836 seq_printf(pi, "%s%s%s\n", 1837 (kprobe_gone(p) ? "[GONE]" : ""), 1838 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 1839 (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); 1840} 1841 1842static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 1843{ 1844 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 1845} 1846 1847static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 1848{ 1849 (*pos)++; 1850 if (*pos >= KPROBE_TABLE_SIZE) 1851 return NULL; 1852 return pos; 1853} 1854 1855static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 1856{ 1857 /* Nothing to do */ 1858} 1859 1860static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 1861{ 1862 struct hlist_head *head; 1863 struct hlist_node *node; 1864 struct kprobe *p, *kp; 1865 const char *sym = NULL; 1866 unsigned int i = *(loff_t *) v; 1867 unsigned long offset = 0; 1868 char *modname, namebuf[128]; 1869 1870 head = &kprobe_table[i]; 1871 preempt_disable(); 1872 hlist_for_each_entry_rcu(p, node, head, hlist) { 1873 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 1874 &offset, &modname, namebuf); 1875 if (kprobe_aggrprobe(p)) { 1876 list_for_each_entry_rcu(kp, &p->list, list) 1877 report_probe(pi, kp, sym, offset, modname, p); 1878 } else 1879 report_probe(pi, p, sym, offset, modname, NULL); 1880 } 1881 preempt_enable(); 1882 return 0; 1883} 1884 1885static const struct seq_operations kprobes_seq_ops = { 1886 .start = kprobe_seq_start, 1887 .next = kprobe_seq_next, 1888 .stop = kprobe_seq_stop, 1889 .show = show_kprobe_addr 1890}; 1891 1892static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 1893{ 1894 return seq_open(filp, &kprobes_seq_ops); 1895} 1896 1897static const struct file_operations debugfs_kprobes_operations = { 1898 .open = kprobes_open, 1899 .read = seq_read, 1900 .llseek = seq_lseek, 1901 .release = seq_release, 1902}; 1903 1904static void __kprobes arm_all_kprobes(void) 1905{ 1906 struct hlist_head *head; 1907 struct hlist_node *node; 1908 struct kprobe *p; 1909 unsigned int i; 1910 1911 mutex_lock(&kprobe_mutex); 1912 1913 /* If kprobes are armed, just return */ 1914 if (!kprobes_all_disarmed) 1915 goto already_enabled; 1916 1917 /* Arming kprobes doesn't optimize kprobe itself */ 1918 mutex_lock(&text_mutex); 1919 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1920 head = &kprobe_table[i]; 1921 hlist_for_each_entry_rcu(p, node, head, hlist) 1922 if (!kprobe_disabled(p)) 1923 __arm_kprobe(p); 1924 } 1925 mutex_unlock(&text_mutex); 1926 1927 kprobes_all_disarmed = false; 1928 printk(KERN_INFO "Kprobes globally enabled\n"); 1929 1930already_enabled: 1931 mutex_unlock(&kprobe_mutex); 1932 return; 1933} 1934 1935static void __kprobes disarm_all_kprobes(void) 1936{ 1937 struct hlist_head *head; 1938 struct hlist_node *node; 1939 struct kprobe *p; 1940 unsigned int i; 1941 1942 mutex_lock(&kprobe_mutex); 1943 1944 /* If kprobes are already disarmed, just return */ 1945 if (kprobes_all_disarmed) 1946 goto already_disabled; 1947 1948 kprobes_all_disarmed = true; 1949 printk(KERN_INFO "Kprobes globally disabled\n"); 1950 1951 /* 1952 * Here we call get_online_cpus() for avoiding text_mutex deadlock, 1953 * because disarming may also unoptimize kprobes. 1954 */ 1955 get_online_cpus(); 1956 mutex_lock(&text_mutex); 1957 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1958 head = &kprobe_table[i]; 1959 hlist_for_each_entry_rcu(p, node, head, hlist) { 1960 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 1961 __disarm_kprobe(p); 1962 } 1963 } 1964 1965 mutex_unlock(&text_mutex); 1966 put_online_cpus(); 1967 mutex_unlock(&kprobe_mutex); 1968 /* Allow all currently running kprobes to complete */ 1969 synchronize_sched(); 1970 return; 1971 1972already_disabled: 1973 mutex_unlock(&kprobe_mutex); 1974 return; 1975} 1976 1977/* 1978 * XXX: The debugfs bool file interface doesn't allow for callbacks 1979 * when the bool state is switched. We can reuse that facility when 1980 * available 1981 */ 1982static ssize_t read_enabled_file_bool(struct file *file, 1983 char __user *user_buf, size_t count, loff_t *ppos) 1984{ 1985 char buf[3]; 1986 1987 if (!kprobes_all_disarmed) 1988 buf[0] = '1'; 1989 else 1990 buf[0] = '0'; 1991 buf[1] = '\n'; 1992 buf[2] = 0x00; 1993 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1994} 1995 1996static ssize_t write_enabled_file_bool(struct file *file, 1997 const char __user *user_buf, size_t count, loff_t *ppos) 1998{ 1999 char buf[32]; 2000 int buf_size; 2001 2002 buf_size = min(count, (sizeof(buf)-1)); 2003 if (copy_from_user(buf, user_buf, buf_size)) 2004 return -EFAULT; 2005 2006 switch (buf[0]) { 2007 case 'y': 2008 case 'Y': 2009 case '1': 2010 arm_all_kprobes(); 2011 break; 2012 case 'n': 2013 case 'N': 2014 case '0': 2015 disarm_all_kprobes(); 2016 break; 2017 } 2018 2019 return count; 2020} 2021 2022static const struct file_operations fops_kp = { 2023 .read = read_enabled_file_bool, 2024 .write = write_enabled_file_bool, 2025 .llseek = default_llseek, 2026}; 2027 2028static int __kprobes debugfs_kprobe_init(void) 2029{ 2030 struct dentry *dir, *file; 2031 unsigned int value = 1; 2032 2033 dir = debugfs_create_dir("kprobes", NULL); 2034 if (!dir) 2035 return -ENOMEM; 2036 2037 file = debugfs_create_file("list", 0444, dir, NULL, 2038 &debugfs_kprobes_operations); 2039 if (!file) { 2040 debugfs_remove(dir); 2041 return -ENOMEM; 2042 } 2043 2044 file = debugfs_create_file("enabled", 0600, dir, 2045 &value, &fops_kp); 2046 if (!file) { 2047 debugfs_remove(dir); 2048 return -ENOMEM; 2049 } 2050 2051 return 0; 2052} 2053 2054late_initcall(debugfs_kprobe_init); 2055#endif /* CONFIG_DEBUG_FS */ 2056 2057module_init(init_kprobes); 2058 2059/* defined in arch/.../kernel/kprobes.c */ 2060EXPORT_SYMBOL_GPL(jprobe_return); 2061