trace.c revision 42748aca73359c83881556c8b28f6cda4f1c143b
1#include "config.h" 2 3#include <asm/unistd.h> 4#include <sys/types.h> 5#include <sys/wait.h> 6#include <assert.h> 7#include <errno.h> 8#include <error.h> 9#include <stdio.h> 10#include <stdlib.h> 11#include <string.h> 12#include <unistd.h> 13 14#ifdef HAVE_LIBSELINUX 15# include <selinux/selinux.h> 16#endif 17 18#include "ptrace.h" 19#include "common.h" 20#include "breakpoint.h" 21#include "proc.h" 22#include "linux-gnu/trace.h" 23 24/* If the system headers did not provide the constants, hard-code the normal 25 values. */ 26#ifndef PTRACE_EVENT_FORK 27 28#define PTRACE_OLDSETOPTIONS 21 29#define PTRACE_SETOPTIONS 0x4200 30#define PTRACE_GETEVENTMSG 0x4201 31 32/* options set using PTRACE_SETOPTIONS */ 33#define PTRACE_O_TRACESYSGOOD 0x00000001 34#define PTRACE_O_TRACEFORK 0x00000002 35#define PTRACE_O_TRACEVFORK 0x00000004 36#define PTRACE_O_TRACECLONE 0x00000008 37#define PTRACE_O_TRACEEXEC 0x00000010 38#define PTRACE_O_TRACEVFORKDONE 0x00000020 39#define PTRACE_O_TRACEEXIT 0x00000040 40 41/* Wait extended result codes for the above trace options. */ 42#define PTRACE_EVENT_FORK 1 43#define PTRACE_EVENT_VFORK 2 44#define PTRACE_EVENT_CLONE 3 45#define PTRACE_EVENT_EXEC 4 46#define PTRACE_EVENT_VFORK_DONE 5 47#define PTRACE_EVENT_EXIT 6 48 49#endif /* PTRACE_EVENT_FORK */ 50 51#ifdef ARCH_HAVE_UMOVELONG 52extern int arch_umovelong (Process *, void *, long *, arg_type_info *); 53int 54umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { 55 return arch_umovelong (proc, addr, result, info); 56} 57#else 58/* Read a single long from the process's memory address 'addr' */ 59int 60umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { 61 long pointed_to; 62 63 errno = 0; 64 pointed_to = ptrace (PTRACE_PEEKTEXT, proc->pid, addr, 0); 65 if (pointed_to == -1 && errno) 66 return -errno; 67 68 *result = pointed_to; 69 if (info) { 70 switch(info->type) { 71 case ARGTYPE_INT: 72 *result &= 0x00000000ffffffffUL; 73 default: 74 break; 75 }; 76 } 77 return 0; 78} 79#endif 80 81void 82trace_fail_warning(pid_t pid) 83{ 84 /* This was adapted from GDB. */ 85#ifdef HAVE_LIBSELINUX 86 static int checked = 0; 87 if (checked) 88 return; 89 checked = 1; 90 91 /* -1 is returned for errors, 0 if it has no effect, 1 if 92 * PTRACE_ATTACH is forbidden. */ 93 if (security_get_boolean_active("deny_ptrace") == 1) 94 fprintf(stderr, 95"The SELinux boolean 'deny_ptrace' is enabled, which may prevent ltrace from\n" 96"tracing other processes. You can disable this process attach protection by\n" 97"issuing 'setsebool deny_ptrace=0' in the superuser context.\n"); 98#endif /* HAVE_LIBSELINUX */ 99} 100 101void 102trace_me(void) 103{ 104 debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid()); 105 if (ptrace(PTRACE_TRACEME, 0, 1, 0) < 0) { 106 perror("PTRACE_TRACEME"); 107 trace_fail_warning(getpid()); 108 exit(1); 109 } 110} 111 112/* There's a (hopefully) brief period of time after the child process 113 * forks when we can't trace it yet. Here we wait for kernel to 114 * prepare the process. */ 115int 116wait_for_proc(pid_t pid) 117{ 118 /* man ptrace: PTRACE_ATTACH attaches to the process specified 119 in pid. The child is sent a SIGSTOP, but will not 120 necessarily have stopped by the completion of this call; 121 use wait() to wait for the child to stop. */ 122 if (waitpid(pid, NULL, __WALL) != pid) { 123 perror ("trace_pid: waitpid"); 124 return -1; 125 } 126 127 return 0; 128} 129 130int 131trace_pid(pid_t pid) 132{ 133 debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid); 134 /* This shouldn't emit error messages, as there are legitimate 135 * reasons that the PID can't be attached: like it may have 136 * already ended. */ 137 if (ptrace(PTRACE_ATTACH, pid, 1, 0) < 0) 138 return -1; 139 140 return wait_for_proc(pid); 141} 142 143void 144trace_set_options(struct Process *proc) 145{ 146 if (proc->tracesysgood & 0x80) 147 return; 148 149 pid_t pid = proc->pid; 150 debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid); 151 152 long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK | 153 PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE | 154 PTRACE_O_TRACEEXEC; 155 if (ptrace(PTRACE_SETOPTIONS, pid, 0, options) < 0 && 156 ptrace(PTRACE_OLDSETOPTIONS, pid, 0, options) < 0) { 157 perror("PTRACE_SETOPTIONS"); 158 return; 159 } 160 proc->tracesysgood |= 0x80; 161} 162 163void 164untrace_pid(pid_t pid) { 165 debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid); 166 ptrace(PTRACE_DETACH, pid, 1, 0); 167} 168 169void 170continue_after_signal(pid_t pid, int signum) { 171 debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d", pid, signum); 172 ptrace(PTRACE_SYSCALL, pid, 0, signum); 173} 174 175static enum ecb_status 176event_for_pid(Event * event, void * data) 177{ 178 if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data) 179 return ecb_yield; 180 return ecb_cont; 181} 182 183static int 184have_events_for(pid_t pid) 185{ 186 return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL; 187} 188 189void 190continue_process(pid_t pid) 191{ 192 debug(DEBUG_PROCESS, "continue_process: pid=%d", pid); 193 194 /* Only really continue the process if there are no events in 195 the queue for this process. Otherwise just wait for the 196 other events to arrive. */ 197 if (!have_events_for(pid)) 198 /* We always trace syscalls to control fork(), 199 * clone(), execve()... */ 200 ptrace(PTRACE_SYSCALL, pid, 0, 0); 201 else 202 debug(DEBUG_PROCESS, 203 "putting off the continue, events in que."); 204} 205 206static struct pid_task * 207get_task_info(struct pid_set * pids, pid_t pid) 208{ 209 assert(pid != 0); 210 size_t i; 211 for (i = 0; i < pids->count; ++i) 212 if (pids->tasks[i].pid == pid) 213 return &pids->tasks[i]; 214 215 return NULL; 216} 217 218static struct pid_task * 219add_task_info(struct pid_set * pids, pid_t pid) 220{ 221 if (pids->count == pids->alloc) { 222 size_t ns = (2 * pids->alloc) ?: 4; 223 struct pid_task * n = realloc(pids->tasks, 224 sizeof(*pids->tasks) * ns); 225 if (n == NULL) 226 return NULL; 227 pids->tasks = n; 228 pids->alloc = ns; 229 } 230 struct pid_task * task_info = &pids->tasks[pids->count++]; 231 memset(task_info, 0, sizeof(*task_info)); 232 task_info->pid = pid; 233 return task_info; 234} 235 236static enum callback_status 237task_stopped(struct Process *task, void *data) 238{ 239 enum process_status st = process_status(task->pid); 240 if (data != NULL) 241 *(enum process_status *)data = st; 242 243 /* If the task is already stopped, don't worry about it. 244 * Likewise if it managed to become a zombie or terminate in 245 * the meantime. This can happen when the whole thread group 246 * is terminating. */ 247 switch (st) { 248 case ps_invalid: 249 case ps_tracing_stop: 250 case ps_zombie: 251 return CBS_CONT; 252 case ps_sleeping: 253 case ps_stop: 254 case ps_other: 255 return CBS_STOP; 256 } 257 258 abort (); 259} 260 261/* Task is blocked if it's stopped, or if it's a vfork parent. */ 262static enum callback_status 263task_blocked(struct Process *task, void *data) 264{ 265 struct pid_set * pids = data; 266 struct pid_task * task_info = get_task_info(pids, task->pid); 267 if (task_info != NULL 268 && task_info->vforked) 269 return CBS_CONT; 270 271 return task_stopped(task, NULL); 272} 273 274static Event *process_vfork_on_event(struct event_handler *super, Event *event); 275 276static enum callback_status 277task_vforked(struct Process *task, void *data) 278{ 279 if (task->event_handler != NULL 280 && task->event_handler->on_event == &process_vfork_on_event) 281 return CBS_STOP; 282 return CBS_CONT; 283} 284 285static int 286is_vfork_parent(struct Process *task) 287{ 288 return each_task(task->leader, NULL, &task_vforked, NULL) != NULL; 289} 290 291static enum callback_status 292send_sigstop(struct Process *task, void *data) 293{ 294 Process * leader = task->leader; 295 struct pid_set * pids = data; 296 297 /* Look for pre-existing task record, or add new. */ 298 struct pid_task * task_info = get_task_info(pids, task->pid); 299 if (task_info == NULL) 300 task_info = add_task_info(pids, task->pid); 301 if (task_info == NULL) { 302 perror("send_sigstop: add_task_info"); 303 destroy_event_handler(leader); 304 /* Signal failure upwards. */ 305 return CBS_STOP; 306 } 307 308 /* This task still has not been attached to. It should be 309 stopped by the kernel. */ 310 if (task->state == STATE_BEING_CREATED) 311 return CBS_CONT; 312 313 /* Don't bother sending SIGSTOP if we are already stopped, or 314 * if we sent the SIGSTOP already, which happens when we are 315 * handling "onexit" and inherited the handler from breakpoint 316 * re-enablement. */ 317 enum process_status st; 318 if (task_stopped(task, &st) == CBS_CONT) 319 return CBS_CONT; 320 if (task_info->sigstopped) { 321 if (!task_info->delivered) 322 return CBS_CONT; 323 task_info->delivered = 0; 324 } 325 326 /* Also don't attempt to stop the process if it's a parent of 327 * vforked process. We set up event handler specially to hint 328 * us. In that case parent is in D state, which we use to 329 * weed out unnecessary looping. */ 330 if (st == ps_sleeping 331 && is_vfork_parent (task)) { 332 task_info->vforked = 1; 333 return CBS_CONT; 334 } 335 336 if (task_kill(task->pid, SIGSTOP) >= 0) { 337 debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid); 338 task_info->sigstopped = 1; 339 } else 340 fprintf(stderr, 341 "Warning: couldn't send SIGSTOP to %d\n", task->pid); 342 343 return CBS_CONT; 344} 345 346/* On certain kernels, detaching right after a singlestep causes the 347 tracee to be killed with a SIGTRAP (that even though the singlestep 348 was properly caught by waitpid. The ugly workaround is to put a 349 breakpoint where IP points and let the process continue. After 350 this the breakpoint can be retracted and the process detached. */ 351static void 352ugly_workaround(Process * proc) 353{ 354 void * ip = get_instruction_pointer(proc); 355 struct breakpoint *sbp = dict_find_entry(proc->leader->breakpoints, ip); 356 if (sbp != NULL) 357 enable_breakpoint(proc, sbp); 358 else 359 insert_breakpoint(proc, ip, NULL); 360 ptrace(PTRACE_CONT, proc->pid, 0, 0); 361} 362 363static void 364process_stopping_done(struct process_stopping_handler * self, Process * leader) 365{ 366 debug(DEBUG_PROCESS, "process stopping done %d", 367 self->task_enabling_breakpoint->pid); 368 size_t i; 369 if (!self->exiting) { 370 for (i = 0; i < self->pids.count; ++i) 371 if (self->pids.tasks[i].pid != 0 372 && (self->pids.tasks[i].delivered 373 || self->pids.tasks[i].sysret)) 374 continue_process(self->pids.tasks[i].pid); 375 continue_process(self->task_enabling_breakpoint->pid); 376 destroy_event_handler(leader); 377 } 378 379 if (self->exiting) { 380 ugly_workaround: 381 self->state = psh_ugly_workaround; 382 ugly_workaround(self->task_enabling_breakpoint); 383 } else { 384 switch ((self->ugly_workaround_p)(self)) { 385 case CBS_FAIL: 386 /* xxx handle me */ 387 case CBS_STOP: 388 break; 389 case CBS_CONT: 390 goto ugly_workaround; 391 } 392 } 393} 394 395/* Before we detach, we need to make sure that task's IP is on the 396 * edge of an instruction. So for tasks that have a breakpoint event 397 * in the queue, we adjust the instruction pointer, just like 398 * continue_after_breakpoint does. */ 399static enum ecb_status 400undo_breakpoint(Event * event, void * data) 401{ 402 if (event != NULL 403 && event->proc->leader == data 404 && event->type == EVENT_BREAKPOINT) 405 set_instruction_pointer(event->proc, event->e_un.brk_addr); 406 return ecb_cont; 407} 408 409static enum callback_status 410untrace_task(struct Process *task, void *data) 411{ 412 if (task != data) 413 untrace_pid(task->pid); 414 return CBS_CONT; 415} 416 417static enum callback_status 418remove_task(struct Process *task, void *data) 419{ 420 /* Don't untrace leader just yet. */ 421 if (task != data) 422 remove_process(task); 423 return CBS_CONT; 424} 425 426static void 427detach_process(Process * leader) 428{ 429 each_qd_event(&undo_breakpoint, leader); 430 disable_all_breakpoints(leader); 431 432 /* Now untrace the process, if it was attached to by -p. */ 433 struct opt_p_t * it; 434 for (it = opt_p; it != NULL; it = it->next) { 435 Process * proc = pid2proc(it->pid); 436 if (proc == NULL) 437 continue; 438 if (proc->leader == leader) { 439 each_task(leader, NULL, &untrace_task, NULL); 440 break; 441 } 442 } 443 each_task(leader, NULL, &remove_task, leader); 444 destroy_event_handler(leader); 445 remove_task(leader, NULL); 446} 447 448static void 449handle_stopping_event(struct pid_task * task_info, Event ** eventp) 450{ 451 /* Mark all events, so that we know whom to SIGCONT later. */ 452 if (task_info != NULL) 453 task_info->got_event = 1; 454 455 Event * event = *eventp; 456 457 /* In every state, sink SIGSTOP events for tasks that it was 458 * sent to. */ 459 if (task_info != NULL 460 && event->type == EVENT_SIGNAL 461 && event->e_un.signum == SIGSTOP) { 462 debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid); 463 if (task_info->sigstopped 464 && !task_info->delivered) { 465 task_info->delivered = 1; 466 *eventp = NULL; // sink the event 467 } else 468 fprintf(stderr, "suspicious: %d got SIGSTOP, but " 469 "sigstopped=%d and delivered=%d\n", 470 task_info->pid, task_info->sigstopped, 471 task_info->delivered); 472 } 473} 474 475/* Some SIGSTOPs may have not been delivered to their respective tasks 476 * yet. They are still in the queue. If we have seen an event for 477 * that process, continue it, so that the SIGSTOP can be delivered and 478 * caught by ltrace. We don't mind that the process is after 479 * breakpoint (and therefore potentially doesn't have aligned IP), 480 * because the signal will be delivered without the process actually 481 * starting. */ 482static void 483continue_for_sigstop_delivery(struct pid_set * pids) 484{ 485 size_t i; 486 for (i = 0; i < pids->count; ++i) { 487 if (pids->tasks[i].pid != 0 488 && pids->tasks[i].sigstopped 489 && !pids->tasks[i].delivered 490 && pids->tasks[i].got_event) { 491 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery", 492 pids->tasks[i].pid); 493 ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0); 494 } 495 } 496} 497 498static int 499event_exit_p(Event * event) 500{ 501 return event != NULL && (event->type == EVENT_EXIT 502 || event->type == EVENT_EXIT_SIGNAL); 503} 504 505static int 506event_exit_or_none_p(Event * event) 507{ 508 return event == NULL || event_exit_p(event) 509 || event->type == EVENT_NONE; 510} 511 512static int 513await_sigstop_delivery(struct pid_set * pids, struct pid_task * task_info, 514 Event * event) 515{ 516 /* If we still didn't get our SIGSTOP, continue the process 517 * and carry on. */ 518 if (event != NULL && !event_exit_or_none_p(event) 519 && task_info != NULL && task_info->sigstopped) { 520 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery", 521 task_info->pid); 522 /* We should get the signal the first thing 523 * after this, so it should be OK to continue 524 * even if we are over a breakpoint. */ 525 ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0); 526 527 } else { 528 /* If all SIGSTOPs were delivered, uninstall the 529 * handler and continue everyone. */ 530 /* XXX I suspect that we should check tasks that are 531 * still around. Is things are now, there should be a 532 * race between waiting for everyone to stop and one 533 * of the tasks exiting. */ 534 int all_clear = 1; 535 size_t i; 536 for (i = 0; i < pids->count; ++i) 537 if (pids->tasks[i].pid != 0 538 && pids->tasks[i].sigstopped 539 && !pids->tasks[i].delivered) { 540 all_clear = 0; 541 break; 542 } 543 return all_clear; 544 } 545 546 return 0; 547} 548 549static int 550all_stops_accountable(struct pid_set * pids) 551{ 552 size_t i; 553 for (i = 0; i < pids->count; ++i) 554 if (pids->tasks[i].pid != 0 555 && !pids->tasks[i].got_event 556 && !have_events_for(pids->tasks[i].pid)) 557 return 0; 558 return 1; 559} 560 561/* The protocol is: 0 for success, negative for failure, positive if 562 * default singlestep is to be used. */ 563int arch_atomic_singlestep(struct Process *proc, struct breakpoint *sbp, 564 int (*add_cb)(void *addr, void *data), 565 void *add_cb_data); 566 567#ifndef ARCH_HAVE_ATOMIC_SINGLESTEP 568int 569arch_atomic_singlestep(struct Process *proc, struct breakpoint *sbp, 570 int (*add_cb)(void *addr, void *data), 571 void *add_cb_data) 572{ 573 return 1; 574} 575#endif 576 577static Event *process_stopping_on_event(struct event_handler *super, 578 Event *event); 579 580static void 581remove_atomic_breakpoints(struct Process *proc) 582{ 583 struct process_stopping_handler *self = (void *)proc->event_handler; 584 assert(self->super.on_event == process_stopping_on_event); 585 586 int ct = sizeof(self->atomic_skip_bp_addrs) 587 / sizeof(*self->atomic_skip_bp_addrs); 588 int i; 589 for (i = 0; i < ct; ++i) 590 if (self->atomic_skip_bp_addrs[i] != 0) { 591 delete_breakpoint(proc->leader, 592 self->atomic_skip_bp_addrs[i]); 593 self->atomic_skip_bp_addrs[i] = 0; 594 } 595} 596 597static void 598atomic_singlestep_bp_on_hit(struct breakpoint *bp, struct Process *proc) 599{ 600 remove_atomic_breakpoints(proc); 601} 602 603static int 604atomic_singlestep_add_bp(void *addr, void *data) 605{ 606 struct process_stopping_handler *self = data; 607 struct Process *proc = self->task_enabling_breakpoint; 608 609 int ct = sizeof(self->atomic_skip_bp_addrs) 610 / sizeof(*self->atomic_skip_bp_addrs); 611 int i; 612 for (i = 0; i < ct; ++i) 613 if (self->atomic_skip_bp_addrs[i] == 0) { 614 self->atomic_skip_bp_addrs[i] = addr; 615 static struct bp_callbacks cbs = { 616 .on_hit = atomic_singlestep_bp_on_hit, 617 }; 618 struct breakpoint *bp 619 = insert_breakpoint(proc->leader, addr, NULL); 620 breakpoint_set_callbacks(bp, &cbs); 621 return 0; 622 } 623 624 assert(!"Too many atomic singlestep breakpoints!"); 625 abort(); 626} 627 628static int 629singlestep(struct process_stopping_handler *self) 630{ 631 struct Process *proc = self->task_enabling_breakpoint; 632 633 int status = arch_atomic_singlestep(self->task_enabling_breakpoint, 634 self->breakpoint_being_enabled, 635 &atomic_singlestep_add_bp, self); 636 637 /* Propagate failure and success. */ 638 if (status <= 0) 639 return status; 640 641 /* Otherwise do the default action: singlestep. */ 642 debug(1, "PTRACE_SINGLESTEP"); 643 if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) { 644 perror("PTRACE_SINGLESTEP"); 645 return -1; 646 } 647 return 0; 648} 649 650static void 651post_singlestep(struct process_stopping_handler *self, 652 struct Event **eventp) 653{ 654 continue_for_sigstop_delivery(&self->pids); 655 656 if (*eventp != NULL && (*eventp)->type == EVENT_BREAKPOINT) 657 *eventp = NULL; // handled 658 659 struct Process *proc = self->task_enabling_breakpoint; 660 661 remove_atomic_breakpoints(proc); 662 self->breakpoint_being_enabled = NULL; 663} 664 665static void 666singlestep_error(struct process_stopping_handler *self) 667{ 668 struct Process *teb = self->task_enabling_breakpoint; 669 struct breakpoint *sbp = self->breakpoint_being_enabled; 670 fprintf(stderr, "%d couldn't continue when handling %s (%p) at %p\n", 671 teb->pid, sbp->libsym != NULL ? sbp->libsym->name : NULL, 672 sbp->addr, get_instruction_pointer(teb)); 673 delete_breakpoint(teb->leader, sbp->addr); 674} 675 676static void 677pt_continue(struct process_stopping_handler *self) 678{ 679 struct Process *teb = self->task_enabling_breakpoint; 680 debug(1, "PTRACE_CONT"); 681 ptrace(PTRACE_CONT, teb->pid, 0, 0); 682} 683 684static void 685pt_singlestep(struct process_stopping_handler *self) 686{ 687 if (singlestep(self) < 0) 688 singlestep_error(self); 689} 690 691static void 692disable_and(struct process_stopping_handler *self, 693 void (*do_this)(struct process_stopping_handler *self)) 694{ 695 struct Process *teb = self->task_enabling_breakpoint; 696 debug(DEBUG_PROCESS, "all stopped, now singlestep/cont %d", teb->pid); 697 if (self->breakpoint_being_enabled->enabled) 698 disable_breakpoint(teb, self->breakpoint_being_enabled); 699 (do_this)(self); 700 self->state = psh_singlestep; 701} 702 703void 704linux_ptrace_disable_and_singlestep(struct process_stopping_handler *self) 705{ 706 disable_and(self, &pt_singlestep); 707} 708 709void 710linux_ptrace_disable_and_continue(struct process_stopping_handler *self) 711{ 712 disable_and(self, &pt_continue); 713} 714 715/* This event handler is installed when we are in the process of 716 * stopping the whole thread group to do the pointer re-enablement for 717 * one of the threads. We pump all events to the queue for later 718 * processing while we wait for all the threads to stop. When this 719 * happens, we let the re-enablement thread to PTRACE_SINGLESTEP, 720 * re-enable, and continue everyone. */ 721static Event * 722process_stopping_on_event(struct event_handler *super, Event *event) 723{ 724 struct process_stopping_handler * self = (void *)super; 725 Process * task = event->proc; 726 Process * leader = task->leader; 727 Process * teb = self->task_enabling_breakpoint; 728 729 debug(DEBUG_PROCESS, 730 "process_stopping_on_event: pid %d; event type %d; state %d", 731 task->pid, event->type, self->state); 732 733 struct pid_task * task_info = get_task_info(&self->pids, task->pid); 734 if (task_info == NULL) 735 fprintf(stderr, "new task??? %d\n", task->pid); 736 handle_stopping_event(task_info, &event); 737 738 int state = self->state; 739 int event_to_queue = !event_exit_or_none_p(event); 740 741 /* Deactivate the entry if the task exits. */ 742 if (event_exit_p(event) && task_info != NULL) 743 task_info->pid = 0; 744 745 /* Always handle sysrets. Whether sysret occurred and what 746 * sys it rets from may need to be determined based on process 747 * stack, so we need to keep that in sync with reality. Note 748 * that we don't continue the process after the sysret is 749 * handled. See continue_after_syscall. */ 750 if (event != NULL && event->type == EVENT_SYSRET) { 751 debug(1, "%d LT_EV_SYSRET", event->proc->pid); 752 event_to_queue = 0; 753 task_info->sysret = 1; 754 } 755 756 switch (state) { 757 case psh_stopping: 758 /* If everyone is stopped, singlestep. */ 759 if (each_task(leader, NULL, &task_blocked, 760 &self->pids) == NULL) { 761 (self->on_all_stopped)(self); 762 state = self->state; 763 } 764 break; 765 766 case psh_singlestep: 767 /* In singlestep state, breakpoint signifies that we 768 * have now stepped, and can re-enable the breakpoint. */ 769 if (event != NULL && task == teb) { 770 771 /* If this was caused by a real breakpoint, as 772 * opposed to a singlestep, assume that it's 773 * an artificial breakpoint installed for some 774 * reason for the re-enablement. In that case 775 * handle it. */ 776 if (event->type == EVENT_BREAKPOINT) { 777 target_address_t ip 778 = get_instruction_pointer(task); 779 struct breakpoint *other 780 = address2bpstruct(leader, ip); 781 if (other != NULL) 782 breakpoint_on_hit(other, task); 783 } 784 785 /* If we got SIGNAL instead of BREAKPOINT, 786 * then this is not singlestep at all. */ 787 if (event->type == EVENT_SIGNAL) { 788 do_singlestep: 789 if (singlestep(self) < 0) { 790 singlestep_error(self); 791 post_singlestep(self, &event); 792 goto psh_sinking; 793 } 794 break; 795 } else { 796 switch ((self->keep_stepping_p)(self)) { 797 case CBS_FAIL: 798 /* XXX handle me */ 799 case CBS_STOP: 800 break; 801 case CBS_CONT: 802 /* Sink singlestep event. */ 803 if (event->type == EVENT_BREAKPOINT) 804 event = NULL; 805 goto do_singlestep; 806 } 807 } 808 809 /* Re-enable the breakpoint that we are 810 * stepping over. */ 811 struct breakpoint *sbp = self->breakpoint_being_enabled; 812 if (sbp->enabled) 813 enable_breakpoint(teb, sbp); 814 815 post_singlestep(self, &event); 816 goto psh_sinking; 817 } 818 break; 819 820 psh_sinking: 821 state = self->state = psh_sinking; 822 case psh_sinking: 823 if (await_sigstop_delivery(&self->pids, task_info, event)) 824 process_stopping_done(self, leader); 825 break; 826 827 case psh_ugly_workaround: 828 if (event == NULL) 829 break; 830 if (event->type == EVENT_BREAKPOINT) { 831 undo_breakpoint(event, leader); 832 if (task == teb) 833 self->task_enabling_breakpoint = NULL; 834 } 835 if (self->task_enabling_breakpoint == NULL 836 && all_stops_accountable(&self->pids)) { 837 undo_breakpoint(event, leader); 838 detach_process(leader); 839 event = NULL; // handled 840 } 841 } 842 843 if (event != NULL && event_to_queue) { 844 enque_event(event); 845 event = NULL; // sink the event 846 } 847 848 return event; 849} 850 851static void 852process_stopping_destroy(struct event_handler *super) 853{ 854 struct process_stopping_handler * self = (void *)super; 855 free(self->pids.tasks); 856} 857 858static enum callback_status 859no(struct process_stopping_handler *self) 860{ 861 return CBS_STOP; 862} 863 864int 865process_install_stopping_handler(struct Process *proc, struct breakpoint *sbp, 866 void (*as)(struct process_stopping_handler *), 867 enum callback_status (*ks) 868 (struct process_stopping_handler *), 869 enum callback_status (*uw) 870 (struct process_stopping_handler *)) 871{ 872 debug(DEBUG_FUNCTION, 873 "process_install_stopping_handler: pid=%d", proc->pid); 874 875 struct process_stopping_handler *handler = calloc(sizeof(*handler), 1); 876 if (handler == NULL) 877 return -1; 878 879 if (as == NULL) 880 as = &linux_ptrace_disable_and_singlestep; 881 if (ks == NULL) 882 ks = &no; 883 if (uw == NULL) 884 uw = &no; 885 886 handler->super.on_event = process_stopping_on_event; 887 handler->super.destroy = process_stopping_destroy; 888 handler->task_enabling_breakpoint = proc; 889 handler->breakpoint_being_enabled = sbp; 890 handler->on_all_stopped = as; 891 handler->keep_stepping_p = ks; 892 handler->ugly_workaround_p = uw; 893 894 install_event_handler(proc->leader, &handler->super); 895 896 if (each_task(proc->leader, NULL, &send_sigstop, 897 &handler->pids) != NULL) { 898 destroy_event_handler(proc); 899 return -1; 900 } 901 902 /* And deliver the first fake event, in case all the 903 * conditions are already fulfilled. */ 904 Event ev = { 905 .type = EVENT_NONE, 906 .proc = proc, 907 }; 908 process_stopping_on_event(&handler->super, &ev); 909 910 return 0; 911} 912 913void 914continue_after_breakpoint(Process *proc, struct breakpoint *sbp) 915{ 916 debug(DEBUG_PROCESS, 917 "continue_after_breakpoint: pid=%d, addr=%p", 918 proc->pid, sbp->addr); 919 920 set_instruction_pointer(proc, sbp->addr); 921 922 if (sbp->enabled == 0) { 923 continue_process(proc->pid); 924 } else { 925#if defined __sparc__ || defined __ia64___ || defined __mips__ 926 /* we don't want to singlestep here */ 927 continue_process(proc->pid); 928#else 929 if (process_install_stopping_handler 930 (proc, sbp, NULL, NULL, NULL) < 0) { 931 perror("process_stopping_handler_create"); 932 /* Carry on not bothering to re-enable. */ 933 continue_process(proc->pid); 934 } 935#endif 936 } 937} 938 939/** 940 * Ltrace exit. When we are about to exit, we have to go through all 941 * the processes, stop them all, remove all the breakpoints, and then 942 * detach the processes that we attached to using -p. If we left the 943 * other tasks running, they might hit stray return breakpoints and 944 * produce artifacts, so we better stop everyone, even if it's a bit 945 * of extra work. 946 */ 947struct ltrace_exiting_handler 948{ 949 struct event_handler super; 950 struct pid_set pids; 951}; 952 953static Event * 954ltrace_exiting_on_event(struct event_handler *super, Event *event) 955{ 956 struct ltrace_exiting_handler * self = (void *)super; 957 Process * task = event->proc; 958 Process * leader = task->leader; 959 960 debug(DEBUG_PROCESS, 961 "ltrace_exiting_on_event: pid %d; event type %d", 962 task->pid, event->type); 963 964 struct pid_task * task_info = get_task_info(&self->pids, task->pid); 965 handle_stopping_event(task_info, &event); 966 967 if (event != NULL && event->type == EVENT_BREAKPOINT) 968 undo_breakpoint(event, leader); 969 970 if (await_sigstop_delivery(&self->pids, task_info, event) 971 && all_stops_accountable(&self->pids)) 972 detach_process(leader); 973 974 /* Sink all non-exit events. We are about to exit, so we 975 * don't bother with queuing them. */ 976 if (event_exit_or_none_p(event)) 977 return event; 978 979 return NULL; 980} 981 982static void 983ltrace_exiting_destroy(struct event_handler *super) 984{ 985 struct ltrace_exiting_handler * self = (void *)super; 986 free(self->pids.tasks); 987} 988 989static int 990ltrace_exiting_install_handler(Process * proc) 991{ 992 /* Only install to leader. */ 993 if (proc->leader != proc) 994 return 0; 995 996 /* Perhaps we are already installed, if the user passed 997 * several -p options that are tasks of one process. */ 998 if (proc->event_handler != NULL 999 && proc->event_handler->on_event == <race_exiting_on_event) 1000 return 0; 1001 1002 /* If stopping handler is already present, let it do the 1003 * work. */ 1004 if (proc->event_handler != NULL) { 1005 assert(proc->event_handler->on_event 1006 == &process_stopping_on_event); 1007 struct process_stopping_handler * other 1008 = (void *)proc->event_handler; 1009 other->exiting = 1; 1010 return 0; 1011 } 1012 1013 struct ltrace_exiting_handler * handler 1014 = calloc(sizeof(*handler), 1); 1015 if (handler == NULL) { 1016 perror("malloc exiting handler"); 1017 fatal: 1018 /* XXXXXXXXXXXXXXXXXXX fixme */ 1019 return -1; 1020 } 1021 1022 handler->super.on_event = ltrace_exiting_on_event; 1023 handler->super.destroy = ltrace_exiting_destroy; 1024 install_event_handler(proc->leader, &handler->super); 1025 1026 if (each_task(proc->leader, NULL, &send_sigstop, 1027 &handler->pids) != NULL) 1028 goto fatal; 1029 1030 return 0; 1031} 1032 1033/* 1034 * When the traced process vforks, it's suspended until the child 1035 * process calls _exit or exec*. In the meantime, the two share the 1036 * address space. 1037 * 1038 * The child process should only ever call _exit or exec*, but we 1039 * can't count on that (it's not the role of ltrace to policy, but to 1040 * observe). In any case, we will _at least_ have to deal with 1041 * removal of vfork return breakpoint (which we have to smuggle back 1042 * in, so that the parent can see it, too), and introduction of exec* 1043 * return breakpoint. Since we already have both breakpoint actions 1044 * to deal with, we might as well support it all. 1045 * 1046 * The gist is that we pretend that the child is in a thread group 1047 * with its parent, and handle it as a multi-threaded case, with the 1048 * exception that we know that the parent is blocked, and don't 1049 * attempt to stop it. When the child execs, we undo the setup. 1050 */ 1051 1052struct process_vfork_handler 1053{ 1054 struct event_handler super; 1055 void * bp_addr; 1056}; 1057 1058static Event * 1059process_vfork_on_event(struct event_handler *super, Event *event) 1060{ 1061 debug(DEBUG_PROCESS, 1062 "process_vfork_on_event: pid %d; event type %d", 1063 event->proc->pid, event->type); 1064 1065 struct process_vfork_handler * self = (void *)super; 1066 struct breakpoint *sbp; 1067 assert(self != NULL); 1068 1069 switch (event->type) { 1070 case EVENT_BREAKPOINT: 1071 /* Remember the vfork return breakpoint. */ 1072 if (self->bp_addr == NULL) 1073 self->bp_addr = event->e_un.brk_addr; 1074 break; 1075 1076 case EVENT_EXIT: 1077 case EVENT_EXIT_SIGNAL: 1078 case EVENT_EXEC: 1079 /* Smuggle back in the vfork return breakpoint, so 1080 * that our parent can trip over it once again. */ 1081 if (self->bp_addr != NULL) { 1082 sbp = dict_find_entry(event->proc->leader->breakpoints, 1083 self->bp_addr); 1084 if (sbp != NULL) 1085 insert_breakpoint(event->proc->parent, 1086 self->bp_addr, sbp->libsym); 1087 } 1088 1089 continue_process(event->proc->parent->pid); 1090 1091 /* Remove the leader that we artificially set up 1092 * earlier. */ 1093 change_process_leader(event->proc, event->proc); 1094 destroy_event_handler(event->proc); 1095 1096 default: 1097 ; 1098 } 1099 1100 return event; 1101} 1102 1103void 1104continue_after_vfork(Process * proc) 1105{ 1106 debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid); 1107 struct process_vfork_handler * handler = calloc(sizeof(*handler), 1); 1108 if (handler == NULL) { 1109 perror("malloc vfork handler"); 1110 /* Carry on not bothering to treat the process as 1111 * necessary. */ 1112 continue_process(proc->parent->pid); 1113 return; 1114 } 1115 1116 /* We must set up custom event handler, so that we see 1117 * exec/exit events for the task itself. */ 1118 handler->super.on_event = process_vfork_on_event; 1119 install_event_handler(proc, &handler->super); 1120 1121 /* Make sure that the child is sole thread. */ 1122 assert(proc->leader == proc); 1123 assert(proc->next == NULL || proc->next->leader != proc); 1124 1125 /* Make sure that the child's parent is properly set up. */ 1126 assert(proc->parent != NULL); 1127 assert(proc->parent->leader != NULL); 1128 1129 change_process_leader(proc, proc->parent->leader); 1130} 1131 1132static int 1133is_mid_stopping(Process *proc) 1134{ 1135 return proc != NULL 1136 && proc->event_handler != NULL 1137 && proc->event_handler->on_event == &process_stopping_on_event; 1138} 1139 1140void 1141continue_after_syscall(Process * proc, int sysnum, int ret_p) 1142{ 1143 /* Don't continue if we are mid-stopping. */ 1144 if (ret_p && (is_mid_stopping(proc) || is_mid_stopping(proc->leader))) { 1145 debug(DEBUG_PROCESS, 1146 "continue_after_syscall: don't continue %d", 1147 proc->pid); 1148 return; 1149 } 1150 continue_process(proc->pid); 1151} 1152 1153/* If ltrace gets SIGINT, the processes directly or indirectly run by 1154 * ltrace get it too. We just have to wait long enough for the signal 1155 * to be delivered and the process terminated, which we notice and 1156 * exit ltrace, too. So there's not much we need to do there. We 1157 * want to keep tracing those processes as usual, in case they just 1158 * SIG_IGN the SIGINT to do their shutdown etc. 1159 * 1160 * For processes ran on the background, we want to install an exit 1161 * handler that stops all the threads, removes all breakpoints, and 1162 * detaches. 1163 */ 1164void 1165os_ltrace_exiting(void) 1166{ 1167 struct opt_p_t * it; 1168 for (it = opt_p; it != NULL; it = it->next) { 1169 Process * proc = pid2proc(it->pid); 1170 if (proc == NULL || proc->leader == NULL) 1171 continue; 1172 if (ltrace_exiting_install_handler(proc->leader) < 0) 1173 fprintf(stderr, 1174 "Couldn't install exiting handler for %d.\n", 1175 proc->pid); 1176 } 1177} 1178 1179int 1180os_ltrace_exiting_sighandler(void) 1181{ 1182 extern int linux_in_waitpid; 1183 if (linux_in_waitpid) { 1184 os_ltrace_exiting(); 1185 return 1; 1186 } 1187 return 0; 1188} 1189 1190size_t 1191umovebytes(Process *proc, void *addr, void *laddr, size_t len) { 1192 1193 union { 1194 long a; 1195 char c[sizeof(long)]; 1196 } a; 1197 int started = 0; 1198 size_t offset = 0, bytes_read = 0; 1199 1200 while (offset < len) { 1201 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0); 1202 if (a.a == -1 && errno) { 1203 if (started && errno == EIO) 1204 return bytes_read; 1205 else 1206 return -1; 1207 } 1208 started = 1; 1209 1210 if (len - offset >= sizeof(long)) { 1211 memcpy(laddr + offset, &a.c[0], sizeof(long)); 1212 bytes_read += sizeof(long); 1213 } 1214 else { 1215 memcpy(laddr + offset, &a.c[0], len - offset); 1216 bytes_read += (len - offset); 1217 } 1218 offset += sizeof(long); 1219 } 1220 1221 return bytes_read; 1222} 1223 1224/* Read a series of bytes starting at the process's memory address 1225 'addr' and continuing until a NUL ('\0') is seen or 'len' bytes 1226 have been read. 1227*/ 1228int 1229umovestr(Process *proc, void *addr, int len, void *laddr) { 1230 union { 1231 long a; 1232 char c[sizeof(long)]; 1233 } a; 1234 unsigned i; 1235 int offset = 0; 1236 1237 while (offset < len) { 1238 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0); 1239 for (i = 0; i < sizeof(long); i++) { 1240 if (a.c[i] && offset + (signed)i < len) { 1241 *(char *)(laddr + offset + i) = a.c[i]; 1242 } else { 1243 *(char *)(laddr + offset + i) = '\0'; 1244 return 0; 1245 } 1246 } 1247 offset += sizeof(long); 1248 } 1249 *(char *)(laddr + offset) = '\0'; 1250 return 0; 1251} 1252