trace.c revision 9d29b3e7d2d7f7ccb1891dc56b03e91a0319cf42
1#include <stdio.h> 2#include <stdlib.h> 3#include <string.h> 4#include <errno.h> 5#include <unistd.h> 6#include <sys/types.h> 7#include <sys/wait.h> 8#include "ptrace.h" 9#include <asm/unistd.h> 10#include <assert.h> 11 12#include "common.h" 13 14/* If the system headers did not provide the constants, hard-code the normal 15 values. */ 16#ifndef PTRACE_EVENT_FORK 17 18#define PTRACE_OLDSETOPTIONS 21 19#define PTRACE_SETOPTIONS 0x4200 20#define PTRACE_GETEVENTMSG 0x4201 21 22/* options set using PTRACE_SETOPTIONS */ 23#define PTRACE_O_TRACESYSGOOD 0x00000001 24#define PTRACE_O_TRACEFORK 0x00000002 25#define PTRACE_O_TRACEVFORK 0x00000004 26#define PTRACE_O_TRACECLONE 0x00000008 27#define PTRACE_O_TRACEEXEC 0x00000010 28#define PTRACE_O_TRACEVFORKDONE 0x00000020 29#define PTRACE_O_TRACEEXIT 0x00000040 30 31/* Wait extended result codes for the above trace options. */ 32#define PTRACE_EVENT_FORK 1 33#define PTRACE_EVENT_VFORK 2 34#define PTRACE_EVENT_CLONE 3 35#define PTRACE_EVENT_EXEC 4 36#define PTRACE_EVENT_VFORK_DONE 5 37#define PTRACE_EVENT_EXIT 6 38 39#endif /* PTRACE_EVENT_FORK */ 40 41#ifdef ARCH_HAVE_UMOVELONG 42extern int arch_umovelong (Process *, void *, long *, arg_type_info *); 43int 44umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { 45 return arch_umovelong (proc, addr, result, info); 46} 47#else 48/* Read a single long from the process's memory address 'addr' */ 49int 50umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { 51 long pointed_to; 52 53 errno = 0; 54 pointed_to = ptrace (PTRACE_PEEKTEXT, proc->pid, addr, 0); 55 if (pointed_to == -1 && errno) 56 return -errno; 57 58 *result = pointed_to; 59 if (info) { 60 switch(info->type) { 61 case ARGTYPE_INT: 62 *result &= 0x00000000ffffffffUL; 63 default: 64 break; 65 }; 66 } 67 return 0; 68} 69#endif 70 71void 72trace_me(void) { 73 debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid()); 74 if (ptrace(PTRACE_TRACEME, 0, 1, 0) < 0) { 75 perror("PTRACE_TRACEME"); 76 exit(1); 77 } 78} 79 80int 81trace_pid(pid_t pid) { 82 debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid); 83 if (ptrace(PTRACE_ATTACH, pid, 1, 0) < 0) { 84 return -1; 85 } 86 87 /* man ptrace: PTRACE_ATTACH attaches to the process specified 88 in pid. The child is sent a SIGSTOP, but will not 89 necessarily have stopped by the completion of this call; 90 use wait() to wait for the child to stop. */ 91 if (waitpid (pid, NULL, __WALL) != pid) { 92 perror ("trace_pid: waitpid"); 93 return -1; 94 } 95 96 return 0; 97} 98 99void 100trace_set_options(Process *proc, pid_t pid) { 101 if (proc->tracesysgood & 0x80) 102 return; 103 104 debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid); 105 106 long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK | 107 PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE | 108 PTRACE_O_TRACEEXEC; 109 if (ptrace(PTRACE_SETOPTIONS, pid, 0, options) < 0 && 110 ptrace(PTRACE_OLDSETOPTIONS, pid, 0, options) < 0) { 111 perror("PTRACE_SETOPTIONS"); 112 return; 113 } 114 proc->tracesysgood |= 0x80; 115} 116 117void 118untrace_pid(pid_t pid) { 119 debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid); 120 ptrace(PTRACE_DETACH, pid, 1, 0); 121} 122 123void 124continue_after_signal(pid_t pid, int signum) { 125 debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d", pid, signum); 126 ptrace(PTRACE_SYSCALL, pid, 0, signum); 127} 128 129static enum ecb_status 130event_for_pid(Event * event, void * data) 131{ 132 if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data) 133 return ecb_yield; 134 return ecb_cont; 135} 136 137static int 138have_events_for(pid_t pid) 139{ 140 return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL; 141} 142 143void 144continue_process(pid_t pid) 145{ 146 debug(DEBUG_PROCESS, "continue_process: pid=%d", pid); 147 148 /* Only really continue the process if there are no events in 149 the queue for this process. Otherwise just wait for the 150 other events to arrive. */ 151 if (!have_events_for(pid)) 152 /* We always trace syscalls to control fork(), 153 * clone(), execve()... */ 154 ptrace(PTRACE_SYSCALL, pid, 0, 0); 155 else 156 debug(DEBUG_PROCESS, 157 "putting off the continue, events in que."); 158} 159 160/** 161 * This is used for bookkeeping related to PIDs that the event 162 * handlers work with. 163 */ 164struct pid_task { 165 pid_t pid; /* This may be 0 for tasks that exited 166 * mid-handling. */ 167 int sigstopped : 1; 168 int got_event : 1; 169 int delivered : 1; 170 int vforked : 1; 171 int sysret : 1; 172} * pids; 173 174struct pid_set { 175 struct pid_task * tasks; 176 size_t count; 177 size_t alloc; 178}; 179 180/** 181 * Breakpoint re-enablement. When we hit a breakpoint, we must 182 * disable it, single-step, and re-enable it. That single-step can be 183 * done only by one task in a task group, while others are stopped, 184 * otherwise the processes would race for who sees the breakpoint 185 * disabled and who doesn't. The following is to keep track of it 186 * all. 187 */ 188struct process_stopping_handler 189{ 190 Event_Handler super; 191 192 /* The task that is doing the re-enablement. */ 193 Process * task_enabling_breakpoint; 194 195 /* The pointer being re-enabled. */ 196 Breakpoint * breakpoint_being_enabled; 197 198 enum { 199 /* We are waiting for everyone to land in t/T. */ 200 psh_stopping = 0, 201 202 /* We are doing the PTRACE_SINGLESTEP. */ 203 psh_singlestep, 204 205 /* We are waiting for all the SIGSTOPs to arrive so 206 * that we can sink them. */ 207 psh_sinking, 208 209 /* This is for tracking the ugly workaround. */ 210 psh_ugly_workaround, 211 } state; 212 213 int exiting; 214 215 struct pid_set pids; 216}; 217 218static struct pid_task * 219get_task_info(struct pid_set * pids, pid_t pid) 220{ 221 assert(pid != 0); 222 size_t i; 223 for (i = 0; i < pids->count; ++i) 224 if (pids->tasks[i].pid == pid) 225 return &pids->tasks[i]; 226 227 return NULL; 228} 229 230static struct pid_task * 231add_task_info(struct pid_set * pids, pid_t pid) 232{ 233 if (pids->count == pids->alloc) { 234 size_t ns = (2 * pids->alloc) ?: 4; 235 struct pid_task * n = realloc(pids->tasks, 236 sizeof(*pids->tasks) * ns); 237 if (n == NULL) 238 return NULL; 239 pids->tasks = n; 240 pids->alloc = ns; 241 } 242 struct pid_task * task_info = &pids->tasks[pids->count++]; 243 memset(task_info, 0, sizeof(*task_info)); 244 task_info->pid = pid; 245 return task_info; 246} 247 248static enum pcb_status 249task_stopped(Process * task, void * data) 250{ 251 enum process_status st = process_status(task->pid); 252 if (data != NULL) 253 *(enum process_status *)data = st; 254 255 /* If the task is already stopped, don't worry about it. 256 * Likewise if it managed to become a zombie or terminate in 257 * the meantime. This can happen when the whole thread group 258 * is terminating. */ 259 switch (st) { 260 case ps_invalid: 261 case ps_tracing_stop: 262 case ps_zombie: 263 case ps_sleeping: 264 return pcb_cont; 265 case ps_stop: 266 case ps_other: 267 return pcb_stop; 268 } 269 270 abort (); 271} 272 273/* Task is blocked if it's stopped, or if it's a vfork parent. */ 274static enum pcb_status 275task_blocked(Process * task, void * data) 276{ 277 struct pid_set * pids = data; 278 struct pid_task * task_info = get_task_info(pids, task->pid); 279 if (task_info != NULL 280 && task_info->vforked) 281 return pcb_cont; 282 283 return task_stopped(task, NULL); 284} 285 286static Event * process_vfork_on_event(Event_Handler * super, Event * event); 287 288static enum pcb_status 289task_vforked(Process * task, void * data) 290{ 291 if (task->event_handler != NULL 292 && task->event_handler->on_event == &process_vfork_on_event) 293 return pcb_stop; 294 return pcb_cont; 295} 296 297static int 298is_vfork_parent(Process * task) 299{ 300 return each_task(task->leader, &task_vforked, NULL) != NULL; 301} 302 303static enum pcb_status 304send_sigstop(Process * task, void * data) 305{ 306 Process * leader = task->leader; 307 struct pid_set * pids = data; 308 309 /* Look for pre-existing task record, or add new. */ 310 struct pid_task * task_info = get_task_info(pids, task->pid); 311 if (task_info == NULL) 312 task_info = add_task_info(pids, task->pid); 313 if (task_info == NULL) { 314 perror("send_sigstop: add_task_info"); 315 destroy_event_handler(leader); 316 /* Signal failure upwards. */ 317 return pcb_stop; 318 } 319 320 /* This task still has not been attached to. It should be 321 stopped by the kernel. */ 322 if (task->state == STATE_BEING_CREATED) 323 return pcb_cont; 324 325 /* Don't bother sending SIGSTOP if we are already stopped, or 326 * if we sent the SIGSTOP already, which happens when we are 327 * handling "onexit" and inherited the handler from breakpoint 328 * re-enablement. */ 329 enum process_status st; 330 if (task_stopped(task, &st) == pcb_cont) 331 return pcb_cont; 332 if (task_info->sigstopped) { 333 if (!task_info->delivered) 334 return pcb_cont; 335 task_info->delivered = 0; 336 } 337 338 /* Also don't attempt to stop the process if it's a parent of 339 * vforked process. We set up event handler specially to hint 340 * us. In that case parent is in D state, which we use to 341 * weed out unnecessary looping. */ 342 if (st == ps_sleeping 343 && is_vfork_parent (task)) { 344 task_info->vforked = 1; 345 return pcb_cont; 346 } 347 348 if (task_kill(task->pid, SIGSTOP) >= 0) { 349 debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid); 350 task_info->sigstopped = 1; 351 } else 352 fprintf(stderr, 353 "Warning: couldn't send SIGSTOP to %d\n", task->pid); 354 355 return pcb_cont; 356} 357 358/* On certain kernels, detaching right after a singlestep causes the 359 tracee to be killed with a SIGTRAP (that even though the singlestep 360 was properly caught by waitpid. The ugly workaround is to put a 361 breakpoint where IP points and let the process continue. After 362 this the breakpoint can be retracted and the process detached. */ 363static void 364ugly_workaround(Process * proc) 365{ 366 void * ip = get_instruction_pointer(proc); 367 Breakpoint * sbp = dict_find_entry(proc->leader->breakpoints, ip); 368 if (sbp != NULL) 369 enable_breakpoint(proc, sbp); 370 else 371 insert_breakpoint(proc, ip, NULL, 1); 372 ptrace(PTRACE_CONT, proc->pid, 0, 0); 373} 374 375static void 376process_stopping_done(struct process_stopping_handler * self, Process * leader) 377{ 378 debug(DEBUG_PROCESS, "process stopping done %d", 379 self->task_enabling_breakpoint->pid); 380 size_t i; 381 if (!self->exiting) { 382 for (i = 0; i < self->pids.count; ++i) 383 if (self->pids.tasks[i].pid != 0 384 && (self->pids.tasks[i].delivered 385 || self->pids.tasks[i].sysret)) 386 continue_process(self->pids.tasks[i].pid); 387 continue_process(self->task_enabling_breakpoint->pid); 388 destroy_event_handler(leader); 389 } else { 390 self->state = psh_ugly_workaround; 391 ugly_workaround(self->task_enabling_breakpoint); 392 } 393} 394 395/* Before we detach, we need to make sure that task's IP is on the 396 * edge of an instruction. So for tasks that have a breakpoint event 397 * in the queue, we adjust the instruction pointer, just like 398 * continue_after_breakpoint does. */ 399static enum ecb_status 400undo_breakpoint(Event * event, void * data) 401{ 402 if (event != NULL 403 && event->proc->leader == data 404 && event->type == EVENT_BREAKPOINT) 405 set_instruction_pointer(event->proc, event->e_un.brk_addr); 406 return ecb_cont; 407} 408 409static enum pcb_status 410untrace_task(Process * task, void * data) 411{ 412 if (task != data) 413 untrace_pid(task->pid); 414 return pcb_cont; 415} 416 417static enum pcb_status 418remove_task(Process * task, void * data) 419{ 420 /* Don't untrace leader just yet. */ 421 if (task != data) 422 remove_process(task); 423 return pcb_cont; 424} 425 426static void 427detach_process(Process * leader) 428{ 429 each_qd_event(&undo_breakpoint, leader); 430 disable_all_breakpoints(leader); 431 432 /* Now untrace the process, if it was attached to by -p. */ 433 struct opt_p_t * it; 434 for (it = opt_p; it != NULL; it = it->next) { 435 Process * proc = pid2proc(it->pid); 436 if (proc == NULL) 437 continue; 438 if (proc->leader == leader) { 439 each_task(leader, &untrace_task, NULL); 440 break; 441 } 442 } 443 each_task(leader, &remove_task, leader); 444 destroy_event_handler(leader); 445 remove_task(leader, NULL); 446} 447 448static void 449handle_stopping_event(struct pid_task * task_info, Event ** eventp) 450{ 451 /* Mark all events, so that we know whom to SIGCONT later. */ 452 if (task_info != NULL) 453 task_info->got_event = 1; 454 455 Event * event = *eventp; 456 457 /* In every state, sink SIGSTOP events for tasks that it was 458 * sent to. */ 459 if (task_info != NULL 460 && event->type == EVENT_SIGNAL 461 && event->e_un.signum == SIGSTOP) { 462 debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid); 463 if (task_info->sigstopped 464 && !task_info->delivered) { 465 task_info->delivered = 1; 466 *eventp = NULL; // sink the event 467 } else 468 fprintf(stderr, "suspicious: %d got SIGSTOP, but " 469 "sigstopped=%d and delivered=%d\n", 470 task_info->pid, task_info->sigstopped, 471 task_info->delivered); 472 } 473} 474 475/* Some SIGSTOPs may have not been delivered to their respective tasks 476 * yet. They are still in the queue. If we have seen an event for 477 * that process, continue it, so that the SIGSTOP can be delivered and 478 * caught by ltrace. We don't mind that the process is after 479 * breakpoint (and therefore potentially doesn't have aligned IP), 480 * because the signal will be delivered without the process actually 481 * starting. */ 482static void 483continue_for_sigstop_delivery(struct pid_set * pids) 484{ 485 size_t i; 486 for (i = 0; i < pids->count; ++i) { 487 if (pids->tasks[i].pid != 0 488 && pids->tasks[i].sigstopped 489 && !pids->tasks[i].delivered 490 && pids->tasks[i].got_event) { 491 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery", 492 pids->tasks[i].pid); 493 ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0); 494 } 495 } 496} 497 498static int 499event_exit_p(Event * event) 500{ 501 return event != NULL && (event->type == EVENT_EXIT 502 || event->type == EVENT_EXIT_SIGNAL); 503} 504 505static int 506event_exit_or_none_p(Event * event) 507{ 508 return event == NULL || event_exit_p(event) 509 || event->type == EVENT_NONE; 510} 511 512static int 513await_sigstop_delivery(struct pid_set * pids, struct pid_task * task_info, 514 Event * event) 515{ 516 /* If we still didn't get our SIGSTOP, continue the process 517 * and carry on. */ 518 if (event != NULL && !event_exit_or_none_p(event) 519 && task_info != NULL && task_info->sigstopped) { 520 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery", 521 task_info->pid); 522 /* We should get the signal the first thing 523 * after this, so it should be OK to continue 524 * even if we are over a breakpoint. */ 525 ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0); 526 527 } else { 528 /* If all SIGSTOPs were delivered, uninstall the 529 * handler and continue everyone. */ 530 /* XXX I suspect that we should check tasks that are 531 * still around. Is things are now, there should be a 532 * race between waiting for everyone to stop and one 533 * of the tasks exiting. */ 534 int all_clear = 1; 535 size_t i; 536 for (i = 0; i < pids->count; ++i) 537 if (pids->tasks[i].pid != 0 538 && pids->tasks[i].sigstopped 539 && !pids->tasks[i].delivered) { 540 all_clear = 0; 541 break; 542 } 543 return all_clear; 544 } 545 546 return 0; 547} 548 549static int 550all_stops_accountable(struct pid_set * pids) 551{ 552 size_t i; 553 for (i = 0; i < pids->count; ++i) 554 if (pids->tasks[i].pid != 0 555 && !pids->tasks[i].got_event 556 && !have_events_for(pids->tasks[i].pid)) 557 return 0; 558 return 1; 559} 560 561static void 562singlestep(Process * proc) 563{ 564 debug(1, "PTRACE_SINGLESTEP"); 565 if (ptrace(PTRACE_SINGLESTEP, proc->pid, 0, 0)) 566 perror("PTRACE_SINGLESTEP"); 567} 568 569/* This event handler is installed when we are in the process of 570 * stopping the whole thread group to do the pointer re-enablement for 571 * one of the threads. We pump all events to the queue for later 572 * processing while we wait for all the threads to stop. When this 573 * happens, we let the re-enablement thread to PTRACE_SINGLESTEP, 574 * re-enable, and continue everyone. */ 575static Event * 576process_stopping_on_event(Event_Handler * super, Event * event) 577{ 578 struct process_stopping_handler * self = (void *)super; 579 Process * task = event->proc; 580 Process * leader = task->leader; 581 Breakpoint * sbp = self->breakpoint_being_enabled; 582 Process * teb = self->task_enabling_breakpoint; 583 584 debug(DEBUG_PROCESS, 585 "pid %d; event type %d; state %d", 586 task->pid, event->type, self->state); 587 588 struct pid_task * task_info = get_task_info(&self->pids, task->pid); 589 if (task_info == NULL) 590 fprintf(stderr, "new task??? %d\n", task->pid); 591 handle_stopping_event(task_info, &event); 592 593 int state = self->state; 594 int event_to_queue = !event_exit_or_none_p(event); 595 596 /* Deactivate the entry if the task exits. */ 597 if (event_exit_p(event) && task_info != NULL) 598 task_info->pid = 0; 599 600 /* Always handle sysrets. Whether sysret occurred and what 601 * sys it rets from may need to be determined based on process 602 * stack, so we need to keep that in sync with reality. Note 603 * that we don't continue the process after the sysret is 604 * handled. See continue_after_syscall. */ 605 if (event != NULL && event->type == EVENT_SYSRET) { 606 debug(1, "%d LT_EV_SYSRET", event->proc->pid); 607 event_to_queue = 0; 608 task_info->sysret = 1; 609 } 610 611 switch (state) { 612 case psh_stopping: 613 /* If everyone is stopped, singlestep. */ 614 if (each_task(leader, &task_blocked, &self->pids) == NULL) { 615 debug(DEBUG_PROCESS, "all stopped, now SINGLESTEP %d", 616 teb->pid); 617 if (sbp->enabled) 618 disable_breakpoint(teb, sbp); 619 singlestep(teb); 620 self->state = state = psh_singlestep; 621 } 622 break; 623 624 case psh_singlestep: 625 /* In singlestep state, breakpoint signifies that we 626 * have now stepped, and can re-enable the breakpoint. */ 627 if (event != NULL && task == teb) { 628 629 /* This is not the singlestep that we are waiting for. */ 630 if (event->type == EVENT_SIGNAL) { 631 singlestep(task); 632 break; 633 } 634 635 /* Essentially we don't care what event caused 636 * the thread to stop. We can do the 637 * re-enablement now. */ 638 if (sbp->enabled) 639 enable_breakpoint(teb, sbp); 640 641 continue_for_sigstop_delivery(&self->pids); 642 643 self->breakpoint_being_enabled = NULL; 644 self->state = state = psh_sinking; 645 646 if (event->type == EVENT_BREAKPOINT) 647 event = NULL; // handled 648 } else 649 break; 650 651 /* fall-through */ 652 653 case psh_sinking: 654 if (await_sigstop_delivery(&self->pids, task_info, event)) 655 process_stopping_done(self, leader); 656 break; 657 658 case psh_ugly_workaround: 659 if (event == NULL) 660 break; 661 if (event->type == EVENT_BREAKPOINT) { 662 undo_breakpoint(event, leader); 663 if (task == teb) 664 self->task_enabling_breakpoint = NULL; 665 } 666 if (self->task_enabling_breakpoint == NULL 667 && all_stops_accountable(&self->pids)) { 668 undo_breakpoint(event, leader); 669 detach_process(leader); 670 event = NULL; // handled 671 } 672 } 673 674 if (event != NULL && event_to_queue) { 675 enque_event(event); 676 event = NULL; // sink the event 677 } 678 679 return event; 680} 681 682static void 683process_stopping_destroy(Event_Handler * super) 684{ 685 struct process_stopping_handler * self = (void *)super; 686 free(self->pids.tasks); 687} 688 689void 690continue_after_breakpoint(Process *proc, Breakpoint *sbp) 691{ 692 set_instruction_pointer(proc, sbp->addr); 693 if (sbp->enabled == 0) { 694 continue_process(proc->pid); 695 } else { 696 debug(DEBUG_PROCESS, 697 "continue_after_breakpoint: pid=%d, addr=%p", 698 proc->pid, sbp->addr); 699#if defined __sparc__ || defined __ia64___ || defined __mips__ 700 /* we don't want to singlestep here */ 701 continue_process(proc->pid); 702#else 703 struct process_stopping_handler * handler 704 = calloc(sizeof(*handler), 1); 705 if (handler == NULL) { 706 perror("malloc breakpoint disable handler"); 707 fatal: 708 /* Carry on not bothering to re-enable. */ 709 continue_process(proc->pid); 710 return; 711 } 712 713 handler->super.on_event = process_stopping_on_event; 714 handler->super.destroy = process_stopping_destroy; 715 handler->task_enabling_breakpoint = proc; 716 handler->breakpoint_being_enabled = sbp; 717 install_event_handler(proc->leader, &handler->super); 718 719 if (each_task(proc->leader, &send_sigstop, 720 &handler->pids) != NULL) 721 goto fatal; 722 723 /* And deliver the first fake event, in case all the 724 * conditions are already fulfilled. */ 725 Event ev; 726 ev.type = EVENT_NONE; 727 ev.proc = proc; 728 process_stopping_on_event(&handler->super, &ev); 729#endif 730 } 731} 732 733/** 734 * Ltrace exit. When we are about to exit, we have to go through all 735 * the processes, stop them all, remove all the breakpoints, and then 736 * detach the processes that we attached to using -p. If we left the 737 * other tasks running, they might hit stray return breakpoints and 738 * produce artifacts, so we better stop everyone, even if it's a bit 739 * of extra work. 740 */ 741struct ltrace_exiting_handler 742{ 743 Event_Handler super; 744 struct pid_set pids; 745}; 746 747static Event * 748ltrace_exiting_on_event(Event_Handler * super, Event * event) 749{ 750 struct ltrace_exiting_handler * self = (void *)super; 751 Process * task = event->proc; 752 Process * leader = task->leader; 753 754 debug(DEBUG_PROCESS, "pid %d; event type %d", task->pid, event->type); 755 756 struct pid_task * task_info = get_task_info(&self->pids, task->pid); 757 handle_stopping_event(task_info, &event); 758 759 if (event != NULL && event->type == EVENT_BREAKPOINT) 760 undo_breakpoint(event, leader); 761 762 if (await_sigstop_delivery(&self->pids, task_info, event) 763 && all_stops_accountable(&self->pids)) 764 detach_process(leader); 765 766 /* Sink all non-exit events. We are about to exit, so we 767 * don't bother with queuing them. */ 768 if (event_exit_or_none_p(event)) 769 return event; 770 771 return NULL; 772} 773 774static void 775ltrace_exiting_destroy(Event_Handler * super) 776{ 777 struct ltrace_exiting_handler * self = (void *)super; 778 free(self->pids.tasks); 779} 780 781static int 782ltrace_exiting_install_handler(Process * proc) 783{ 784 /* Only install to leader. */ 785 if (proc->leader != proc) 786 return 0; 787 788 /* Perhaps we are already installed, if the user passed 789 * several -p options that are tasks of one process. */ 790 if (proc->event_handler != NULL 791 && proc->event_handler->on_event == <race_exiting_on_event) 792 return 0; 793 794 /* If stopping handler is already present, let it do the 795 * work. */ 796 if (proc->event_handler != NULL) { 797 assert(proc->event_handler->on_event 798 == &process_stopping_on_event); 799 struct process_stopping_handler * other 800 = (void *)proc->event_handler; 801 other->exiting = 1; 802 return 0; 803 } 804 805 struct ltrace_exiting_handler * handler 806 = calloc(sizeof(*handler), 1); 807 if (handler == NULL) { 808 perror("malloc exiting handler"); 809 fatal: 810 /* XXXXXXXXXXXXXXXXXXX fixme */ 811 return -1; 812 } 813 814 handler->super.on_event = ltrace_exiting_on_event; 815 handler->super.destroy = ltrace_exiting_destroy; 816 install_event_handler(proc->leader, &handler->super); 817 818 if (each_task(proc->leader, &send_sigstop, 819 &handler->pids) != NULL) 820 goto fatal; 821 822 return 0; 823} 824 825/* 826 * When the traced process vforks, it's suspended until the child 827 * process calls _exit or exec*. In the meantime, the two share the 828 * address space. 829 * 830 * The child process should only ever call _exit or exec*, but we 831 * can't count on that (it's not the role of ltrace to policy, but to 832 * observe). In any case, we will _at least_ have to deal with 833 * removal of vfork return breakpoint (which we have to smuggle back 834 * in, so that the parent can see it, too), and introduction of exec* 835 * return breakpoint. Since we already have both breakpoint actions 836 * to deal with, we might as well support it all. 837 * 838 * The gist is that we pretend that the child is in a thread group 839 * with its parent, and handle it as a multi-threaded case, with the 840 * exception that we know that the parent is blocked, and don't 841 * attempt to stop it. When the child execs, we undo the setup. 842 */ 843 844struct process_vfork_handler 845{ 846 Event_Handler super; 847 void * bp_addr; 848}; 849 850static Event * 851process_vfork_on_event(Event_Handler * super, Event * event) 852{ 853 struct process_vfork_handler * self = (void *)super; 854 Breakpoint * sbp; 855 assert(self != NULL); 856 857 switch (event->type) { 858 case EVENT_BREAKPOINT: 859 /* Remember the vfork return breakpoint. */ 860 if (self->bp_addr == NULL) 861 self->bp_addr = event->e_un.brk_addr; 862 break; 863 864 case EVENT_EXIT: 865 case EVENT_EXIT_SIGNAL: 866 case EVENT_EXEC: 867 /* Smuggle back in the vfork return breakpoint, so 868 * that our parent can trip over it once again. */ 869 if (self->bp_addr != NULL) { 870 sbp = dict_find_entry(event->proc->leader->breakpoints, 871 self->bp_addr); 872 if (sbp != NULL) 873 insert_breakpoint(event->proc->parent, 874 self->bp_addr, 875 sbp->libsym, 1); 876 } 877 878 continue_process(event->proc->parent->pid); 879 880 /* Remove the leader that we artificially set up 881 * earlier. */ 882 change_process_leader(event->proc, event->proc); 883 destroy_event_handler(event->proc); 884 885 default: 886 ; 887 } 888 889 return event; 890} 891 892void 893continue_after_vfork(Process * proc) 894{ 895 debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid); 896 struct process_vfork_handler * handler = calloc(sizeof(*handler), 1); 897 if (handler == NULL) { 898 perror("malloc vfork handler"); 899 /* Carry on not bothering to treat the process as 900 * necessary. */ 901 continue_process(proc->parent->pid); 902 return; 903 } 904 905 /* We must set up custom event handler, so that we see 906 * exec/exit events for the task itself. */ 907 handler->super.on_event = process_vfork_on_event; 908 install_event_handler(proc, &handler->super); 909 910 /* Make sure that the child is sole thread. */ 911 assert(proc->leader == proc); 912 assert(proc->next == NULL || proc->next->leader != proc); 913 914 /* Make sure that the child's parent is properly set up. */ 915 assert(proc->parent != NULL); 916 assert(proc->parent->leader != NULL); 917 918 change_process_leader(proc, proc->parent->leader); 919} 920 921static int 922is_mid_stopping(Process *proc) 923{ 924 return proc != NULL 925 && proc->event_handler != NULL 926 && proc->event_handler->on_event == &process_stopping_on_event; 927} 928 929void 930continue_after_syscall(Process * proc, int sysnum, int ret_p) 931{ 932 /* Don't continue if we are mid-stopping. */ 933 if (ret_p && (is_mid_stopping(proc) || is_mid_stopping(proc->leader))) { 934 debug(DEBUG_PROCESS, 935 "continue_after_syscall: don't continue %d", 936 proc->pid); 937 return; 938 } 939 continue_process(proc->pid); 940} 941 942/* If ltrace gets SIGINT, the processes directly or indirectly run by 943 * ltrace get it too. We just have to wait long enough for the signal 944 * to be delivered and the process terminated, which we notice and 945 * exit ltrace, too. So there's not much we need to do there. We 946 * want to keep tracing those processes as usual, in case they just 947 * SIG_IGN the SIGINT to do their shutdown etc. 948 * 949 * For processes ran on the background, we want to install an exit 950 * handler that stops all the threads, removes all breakpoints, and 951 * detaches. 952 */ 953void 954ltrace_exiting(void) 955{ 956 struct opt_p_t * it; 957 for (it = opt_p; it != NULL; it = it->next) { 958 Process * proc = pid2proc(it->pid); 959 if (proc == NULL || proc->leader == NULL) 960 continue; 961 if (ltrace_exiting_install_handler(proc->leader) < 0) 962 fprintf(stderr, 963 "Couldn't install exiting handler for %d.\n", 964 proc->pid); 965 } 966} 967 968size_t 969umovebytes(Process *proc, void *addr, void *laddr, size_t len) { 970 971 union { 972 long a; 973 char c[sizeof(long)]; 974 } a; 975 int started = 0; 976 size_t offset = 0, bytes_read = 0; 977 978 while (offset < len) { 979 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0); 980 if (a.a == -1 && errno) { 981 if (started && errno == EIO) 982 return bytes_read; 983 else 984 return -1; 985 } 986 started = 1; 987 988 if (len - offset >= sizeof(long)) { 989 memcpy(laddr + offset, &a.c[0], sizeof(long)); 990 bytes_read += sizeof(long); 991 } 992 else { 993 memcpy(laddr + offset, &a.c[0], len - offset); 994 bytes_read += (len - offset); 995 } 996 offset += sizeof(long); 997 } 998 999 return bytes_read; 1000} 1001 1002/* Read a series of bytes starting at the process's memory address 1003 'addr' and continuing until a NUL ('\0') is seen or 'len' bytes 1004 have been read. 1005*/ 1006int 1007umovestr(Process *proc, void *addr, int len, void *laddr) { 1008 union { 1009 long a; 1010 char c[sizeof(long)]; 1011 } a; 1012 unsigned i; 1013 int offset = 0; 1014 1015 while (offset < len) { 1016 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0); 1017 for (i = 0; i < sizeof(long); i++) { 1018 if (a.c[i] && offset + (signed)i < len) { 1019 *(char *)(laddr + offset + i) = a.c[i]; 1020 } else { 1021 *(char *)(laddr + offset + i) = '\0'; 1022 return 0; 1023 } 1024 } 1025 offset += sizeof(long); 1026 } 1027 *(char *)(laddr + offset) = '\0'; 1028 return 0; 1029} 1030