1/* Copyright (c) 2005-2007, Google Inc.
2 * All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * ---
31 * Author: Markus Gutschke
32 */
33
34#include "base/linuxthreads.h"
35
36#ifdef THREADS
37#ifdef __cplusplus
38extern "C" {
39#endif
40
41#include <sched.h>
42#include <signal.h>
43#include <stdlib.h>
44#include <string.h>
45#include <fcntl.h>
46#include <sys/socket.h>
47#include <sys/wait.h>
48
49#include "base/linux_syscall_support.h"
50#include "base/thread_lister.h"
51
52#ifndef CLONE_UNTRACED
53#define CLONE_UNTRACED 0x00800000
54#endif
55
56
57/* Synchronous signals that should not be blocked while in the lister thread.
58 */
59static const int sync_signals[]  = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
60                                     SIGXCPU, SIGXFSZ };
61
62/* itoa() is not a standard function, and we cannot safely call printf()
63 * after suspending threads. So, we just implement our own copy. A
64 * recursive approach is the easiest here.
65 */
66static char *local_itoa(char *buf, int i) {
67  if (i < 0) {
68    *buf++ = '-';
69    return local_itoa(buf, -i);
70  } else {
71    if (i >= 10)
72      buf = local_itoa(buf, i/10);
73    *buf++ = (i%10) + '0';
74    *buf   = '\000';
75    return buf;
76  }
77}
78
79
80/* Wrapper around clone() that runs "fn" on the same stack as the
81 * caller! Unlike fork(), the cloned thread shares the same address space.
82 * The caller must be careful to use only minimal amounts of stack until
83 * the cloned thread has returned.
84 * There is a good chance that the cloned thread and the caller will share
85 * the same copy of errno!
86 */
87#ifdef __GNUC__
88#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
89/* Try to force this function into a separate stack frame, and make sure
90 * that arguments are passed on the stack.
91 */
92static int local_clone (int (*fn)(void *), void *arg, ...)
93  __attribute__ ((noinline));
94#endif
95#endif
96
97static int local_clone (int (*fn)(void *), void *arg, ...) {
98  /* Leave 4kB of gap between the callers stack and the new clone. This
99   * should be more than sufficient for the caller to call waitpid() until
100   * the cloned thread terminates.
101   *
102   * It is important that we set the CLONE_UNTRACED flag, because newer
103   * versions of "gdb" otherwise attempt to attach to our thread, and will
104   * attempt to reap its status codes. This subsequently results in the
105   * caller hanging indefinitely in waitpid(), waiting for a change in
106   * status that will never happen. By setting the CLONE_UNTRACED flag, we
107   * prevent "gdb" from stealing events, but we still expect the thread
108   * lister to fail, because it cannot PTRACE_ATTACH to the process that
109   * is being debugged. This is OK and the error code will be reported
110   * correctly.
111   */
112  return sys_clone(fn, (char *)&arg - 4096,
113                   CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0);
114}
115
116
117/* Local substitute for the atoi() function, which is not necessarily safe
118 * to call once threads are suspended (depending on whether libc looks up
119 * locale information,  when executing atoi()).
120 */
121static int local_atoi(const char *s) {
122  int n   = 0;
123  int neg = *s == '-';
124  if (neg)
125    s++;
126  while (*s >= '0' && *s <= '9')
127    n = 10*n + (*s++ - '0');
128  return neg ? -n : n;
129}
130
131
132/* Re-runs fn until it doesn't cause EINTR
133 */
134#define NO_INTR(fn)   do {} while ((fn) < 0 && errno == EINTR)
135
136
137/* Wrap a class around system calls, in order to give us access to
138 * a private copy of errno. This only works in C++, but it has the
139 * advantage of not needing nested functions, which are a non-standard
140 * language extension.
141 */
142#ifdef __cplusplus
143namespace {
144  class SysCalls {
145   public:
146    #define SYS_CPLUSPLUS
147    #define SYS_ERRNO     my_errno
148    #define SYS_INLINE    inline
149    #define SYS_PREFIX    -1
150    #undef  SYS_LINUX_SYSCALL_SUPPORT_H
151    #include "linux_syscall_support.h"
152    SysCalls() : my_errno(0) { }
153    int my_errno;
154  };
155}
156#define ERRNO sys.my_errno
157#else
158#define ERRNO my_errno
159#endif
160
161
162/* Wrapper for open() which is guaranteed to never return EINTR.
163 */
164static int c_open(const char *fname, int flags, int mode) {
165  ssize_t rc;
166  NO_INTR(rc = sys_open(fname, flags, mode));
167  return rc;
168}
169
170
171/* abort() is not safely reentrant, and changes it's behavior each time
172 * it is called. This means, if the main application ever called abort()
173 * we cannot safely call it again. This would happen if we were called
174 * from a SIGABRT signal handler in the main application. So, document
175 * that calling SIGABRT from the thread lister makes it not signal safe
176 * (and vice-versa).
177 * Also, since we share address space with the main application, we
178 * cannot call abort() from the callback and expect the main application
179 * to behave correctly afterwards. In fact, the only thing we can do, is
180 * to terminate the main application with extreme prejudice (aka
181 * PTRACE_KILL).
182 * We set up our own SIGABRT handler to do this.
183 * In order to find the main application from the signal handler, we
184 * need to store information about it in global variables. This is
185 * safe, because the main application should be suspended at this
186 * time. If the callback ever called ResumeAllProcessThreads(), then
187 * we are running a higher risk, though. So, try to avoid calling
188 * abort() after calling ResumeAllProcessThreads.
189 */
190static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker;
191
192
193/* Signal handler to help us recover from dying while we are attached to
194 * other threads.
195 */
196static void SignalHandler(int signum, siginfo_t *si, void *data) {
197  if (sig_pids != NULL) {
198    if (signum == SIGABRT) {
199      while (sig_num_threads-- > 0) {
200        /* Not sure if sched_yield is really necessary here, but it does not */
201        /* hurt, and it might be necessary for the same reasons that we have */
202        /* to do so in sys_ptrace_detach().                                  */
203        sys_sched_yield();
204        sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
205      }
206    } else if (sig_num_threads > 0) {
207      ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
208    }
209  }
210  sig_pids = NULL;
211  if (sig_marker >= 0)
212    NO_INTR(sys_close(sig_marker));
213  sig_marker = -1;
214  if (sig_proc >= 0)
215    NO_INTR(sys_close(sig_proc));
216  sig_proc = -1;
217
218  sys__exit(signum == SIGABRT ? 1 : 2);
219}
220
221
222/* Try to dirty the stack, and hope that the compiler is not smart enough
223 * to optimize this function away. Or worse, the compiler could inline the
224 * function and permanently allocate the data on the stack.
225 */
226static void DirtyStack(size_t amount) {
227  char buf[amount];
228  memset(buf, 0, amount);
229  sys_read(-1, buf, amount);
230}
231
232
233/* Data structure for passing arguments to the lister thread.
234 */
235#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
236
237struct ListerParams {
238  int         result, err;
239  char        *altstack_mem;
240  ListAllProcessThreadsCallBack callback;
241  void        *parameter;
242  va_list     ap;
243};
244
245
246static void ListerThread(struct ListerParams *args) {
247  int                found_parent = 0;
248  pid_t              clone_pid  = sys_gettid(), ppid = sys_getppid();
249  char               proc_self_task[80], marker_name[48], *marker_path;
250  const char         *proc_paths[3];
251  const char *const  *proc_path = proc_paths;
252  int                proc = -1, marker = -1, num_threads = 0;
253  int                max_threads = 0, sig;
254  struct kernel_stat marker_sb, proc_sb;
255  stack_t            altstack;
256
257  /* Create "marker" that we can use to detect threads sharing the same
258   * address space and the same file handles. By setting the FD_CLOEXEC flag
259   * we minimize the risk of misidentifying child processes as threads;
260   * and since there is still a race condition,  we will filter those out
261   * later, anyway.
262   */
263  if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
264      sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
265  failure:
266    args->result = -1;
267    args->err    = errno;
268    if (marker >= 0)
269      NO_INTR(sys_close(marker));
270    sig_marker = marker = -1;
271    if (proc >= 0)
272      NO_INTR(sys_close(proc));
273    sig_proc = proc = -1;
274    sys__exit(1);
275  }
276
277  /* Compute search paths for finding thread directories in /proc            */
278  local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
279  strcpy(marker_name, proc_self_task);
280  marker_path = marker_name + strlen(marker_name);
281  strcat(proc_self_task, "/task/");
282  proc_paths[0] = proc_self_task; /* /proc/$$/task/                          */
283  proc_paths[1] = "/proc/";       /* /proc/                                  */
284  proc_paths[2] = NULL;
285
286  /* Compute path for marker socket in /proc                                 */
287  local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
288  if (sys_stat(marker_name, &marker_sb) < 0) {
289    goto failure;
290  }
291
292  /* Catch signals on an alternate pre-allocated stack. This way, we can
293   * safely execute the signal handler even if we ran out of memory.
294   */
295  memset(&altstack, 0, sizeof(altstack));
296  altstack.ss_sp    = args->altstack_mem;
297  altstack.ss_flags = 0;
298  altstack.ss_size  = ALT_STACKSIZE;
299  sys_sigaltstack(&altstack, (const stack_t *)NULL);
300
301  /* Some kernels forget to wake up traced processes, when the
302   * tracer dies.  So, intercept synchronous signals and make sure
303   * that we wake up our tracees before dying. It is the caller's
304   * responsibility to ensure that asynchronous signals do not
305   * interfere with this function.
306   */
307  sig_marker = marker;
308  sig_proc   = -1;
309  for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
310    struct kernel_sigaction sa;
311    memset(&sa, 0, sizeof(sa));
312    sa.sa_sigaction_ = SignalHandler;
313    sys_sigfillset(&sa.sa_mask);
314    sa.sa_flags      = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
315    sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
316  }
317
318  /* Read process directories in /proc/...                                   */
319  for (;;) {
320    /* Some kernels know about threads, and hide them in "/proc"
321     * (although they are still there, if you know the process
322     * id). Threads are moved into a separate "task" directory. We
323     * check there first, and then fall back on the older naming
324     * convention if necessary.
325     */
326    if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) {
327      if (*++proc_path != NULL)
328        continue;
329      goto failure;
330    }
331    if (sys_fstat(proc, &proc_sb) < 0)
332      goto failure;
333
334    /* Since we are suspending threads, we cannot call any libc
335     * functions that might acquire locks. Most notably, we cannot
336     * call malloc(). So, we have to allocate memory on the stack,
337     * instead. Since we do not know how much memory we need, we
338     * make a best guess. And if we guessed incorrectly we retry on
339     * a second iteration (by jumping to "detach_threads").
340     *
341     * Unless the number of threads is increasing very rapidly, we
342     * should never need to do so, though, as our guestimate is very
343     * conservative.
344     */
345    if (max_threads < proc_sb.st_nlink + 100)
346      max_threads = proc_sb.st_nlink + 100;
347
348    /* scope */ {
349      pid_t pids[max_threads];
350      int   added_entries = 0;
351      sig_num_threads     = num_threads;
352      sig_pids            = pids;
353      for (;;) {
354        struct kernel_dirent *entry;
355        char buf[4096];
356        ssize_t nbytes = sys_getdents(proc, (struct kernel_dirent *)buf,
357                                      sizeof(buf));
358        if (nbytes < 0)
359          goto failure;
360        else if (nbytes == 0) {
361          if (added_entries) {
362            /* Need to keep iterating over "/proc" in multiple
363             * passes until we no longer find any more threads. This
364             * algorithm eventually completes, when all threads have
365             * been suspended.
366             */
367            added_entries = 0;
368            sys_lseek(proc, 0, SEEK_SET);
369            continue;
370          }
371          break;
372        }
373        for (entry = (struct kernel_dirent *)buf;
374             entry < (struct kernel_dirent *)&buf[nbytes];
375             entry = (struct kernel_dirent *)((char *)entry+entry->d_reclen)) {
376          if (entry->d_ino != 0) {
377            const char *ptr = entry->d_name;
378            pid_t pid;
379
380            /* Some kernels hide threads by preceding the pid with a '.'     */
381            if (*ptr == '.')
382              ptr++;
383
384            /* If the directory is not numeric, it cannot be a
385             * process/thread
386             */
387            if (*ptr < '0' || *ptr > '9')
388              continue;
389            pid = local_atoi(ptr);
390
391            /* Attach (and suspend) all threads                              */
392            if (pid && pid != clone_pid) {
393              struct kernel_stat tmp_sb;
394              char fname[entry->d_reclen + 48];
395              strcat(strcat(strcpy(fname, "/proc/"),
396                            entry->d_name), marker_path);
397
398              /* Check if the marker is identical to the one we created      */
399              if (sys_stat(fname, &tmp_sb) >= 0 &&
400                  marker_sb.st_ino == tmp_sb.st_ino) {
401                long i, j;
402
403                /* Found one of our threads, make sure it is no duplicate    */
404                for (i = 0; i < num_threads; i++) {
405                  /* Linear search is slow, but should not matter much for
406                   * the typically small number of threads.
407                   */
408                  if (pids[i] == pid) {
409                    /* Found a duplicate; most likely on second pass         */
410                    goto next_entry;
411                  }
412                }
413
414                /* Check whether data structure needs growing                */
415                if (num_threads >= max_threads) {
416                  /* Back to square one, this time with more memory          */
417                  NO_INTR(sys_close(proc));
418                  goto detach_threads;
419                }
420
421                /* Attaching to thread suspends it                           */
422                pids[num_threads++] = pid;
423                sig_num_threads     = num_threads;
424                if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
425                               (void *)0) < 0) {
426                  /* If operation failed, ignore thread. Maybe it
427                   * just died?  There might also be a race
428                   * condition with a concurrent core dumper or
429                   * with a debugger. In that case, we will just
430                   * make a best effort, rather than failing
431                   * entirely.
432                   */
433                  num_threads--;
434                  sig_num_threads = num_threads;
435                  goto next_entry;
436                }
437                while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
438                  if (errno != EINTR) {
439                    sys_ptrace_detach(pid);
440                    num_threads--;
441                    sig_num_threads = num_threads;
442                    goto next_entry;
443                  }
444                }
445
446                if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
447                    sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i   != j) {
448                  /* Address spaces are distinct, even though both
449                   * processes show the "marker". This is probably
450                   * a forked child process rather than a thread.
451                   */
452                  sys_ptrace_detach(pid);
453                  num_threads--;
454                  sig_num_threads = num_threads;
455                } else {
456                  found_parent |= pid == ppid;
457                  added_entries++;
458                }
459              }
460            }
461          }
462        next_entry:;
463        }
464      }
465      NO_INTR(sys_close(proc));
466      sig_proc = proc = -1;
467
468      /* If we failed to find any threads, try looking somewhere else in
469       * /proc. Maybe, threads are reported differently on this system.
470       */
471      if (num_threads > 1 || !*++proc_path) {
472        NO_INTR(sys_close(marker));
473        sig_marker = marker = -1;
474
475        /* If we never found the parent process, something is very wrong.
476         * Most likely, we are running in debugger. Any attempt to operate
477         * on the threads would be very incomplete. Let's just report an
478         * error to the caller.
479         */
480        if (!found_parent) {
481          ResumeAllProcessThreads(num_threads, pids);
482          sys__exit(3);
483        }
484
485        /* Now we are ready to call the callback,
486         * which takes care of resuming the threads for us.
487         */
488        args->result = args->callback(args->parameter, num_threads,
489                                      pids, args->ap);
490        args->err = errno;
491
492        /* Callback should have resumed threads, but better safe than sorry  */
493        if (ResumeAllProcessThreads(num_threads, pids)) {
494          /* Callback forgot to resume at least one thread, report error     */
495          args->err    = EINVAL;
496          args->result = -1;
497        }
498
499        sys__exit(0);
500      }
501    detach_threads:
502      /* Resume all threads prior to retrying the operation                  */
503      ResumeAllProcessThreads(num_threads, pids);
504      sig_pids = NULL;
505      num_threads = 0;
506      sig_num_threads = num_threads;
507      max_threads += 100;
508    }
509  }
510}
511
512
513/* This function gets the list of all linux threads of the current process
514 * passes them to the 'callback' along with the 'parameter' pointer; at the
515 * call back call time all the threads are paused via
516 * PTRACE_ATTACH.
517 * The callback is executed from a separate thread which shares only the
518 * address space, the filesystem, and the filehandles with the caller. Most
519 * notably, it does not share the same pid and ppid; and if it terminates,
520 * the rest of the application is still there. 'callback' is supposed to do
521 * or arrange for ResumeAllProcessThreads. This happens automatically, if
522 * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
523 * signals are blocked. If the 'callback' decides to unblock them, it must
524 * ensure that they cannot terminate the application, or that
525 * ResumeAllProcessThreads will get called.
526 * It is an error for the 'callback' to make any library calls that could
527 * acquire locks. Most notably, this means that most system calls have to
528 * avoid going through libc. Also, this means that it is not legal to call
529 * exit() or abort().
530 * We return -1 on error and the return value of 'callback' on success.
531 */
532int ListAllProcessThreads(void *parameter,
533                          ListAllProcessThreadsCallBack callback, ...) {
534  char                   altstack_mem[ALT_STACKSIZE];
535  struct ListerParams    args;
536  pid_t                  clone_pid;
537  int                    dumpable = 1, sig;
538  struct kernel_sigset_t sig_blocked, sig_old;
539
540  va_start(args.ap, callback);
541
542  /* If we are short on virtual memory, initializing the alternate stack
543   * might trigger a SIGSEGV. Let's do this early, before it could get us
544   * into more trouble (i.e. before signal handlers try to use the alternate
545   * stack, and before we attach to other threads).
546   */
547  memset(altstack_mem, 0, sizeof(altstack_mem));
548
549  /* Some of our cleanup functions could conceivable use more stack space.
550   * Try to touch the stack right now. This could be defeated by the compiler
551   * being too smart for it's own good, so try really hard.
552   */
553  DirtyStack(32768);
554
555  /* Make this process "dumpable". This is necessary in order to ptrace()
556   * after having called setuid().
557   */
558  dumpable = sys_prctl(PR_GET_DUMPABLE, 0);
559  if (!dumpable)
560    sys_prctl(PR_SET_DUMPABLE, 1);
561
562  /* Fill in argument block for dumper thread                                */
563  args.result       = -1;
564  args.err          = 0;
565  args.altstack_mem = altstack_mem;
566  args.parameter    = parameter;
567  args.callback     = callback;
568
569  /* Before cloning the thread lister, block all asynchronous signals, as we */
570  /* are not prepared to handle them.                                        */
571  sys_sigfillset(&sig_blocked);
572  for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
573    sys_sigdelset(&sig_blocked, sync_signals[sig]);
574  }
575  if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
576    args.err = errno;
577    args.result = -1;
578    goto failed;
579  }
580
581  /* scope */ {
582    /* After cloning, both the parent and the child share the same instance
583     * of errno. We must make sure that at least one of these processes
584     * (in our case, the parent) uses modified syscall macros that update
585     * a local copy of errno, instead.
586     */
587    #ifdef __cplusplus
588      #define sys0_sigprocmask sys.sigprocmask
589      #define sys0_waitpid     sys.waitpid
590      SysCalls sys;
591    #else
592      int my_errno;
593      #define SYS_ERRNO        my_errno
594      #define SYS_INLINE       inline
595      #define SYS_PREFIX       0
596      #undef  SYS_LINUX_SYSCALL_SUPPORT_H
597      #include "linux_syscall_support.h"
598    #endif
599
600    int clone_errno;
601    clone_pid = local_clone((int (*)(void *))ListerThread, &args);
602    clone_errno = errno;
603
604    sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old);
605
606    if (clone_pid >= 0) {
607      int status, rc;
608      while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 &&
609             ERRNO == EINTR) {
610             /* Keep waiting                                                 */
611      }
612      if (rc < 0) {
613        args.err = ERRNO;
614        args.result = -1;
615      } else if (WIFEXITED(status)) {
616        switch (WEXITSTATUS(status)) {
617          case 0: break;             /* Normal process termination           */
618          case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected   */
619                  args.result = -1;
620                  break;
621          case 3: args.err = EPERM;  /* Process is already being traced      */
622                  args.result = -1;
623                  break;
624          default:args.err = ECHILD; /* Child died unexpectedly              */
625                  args.result = -1;
626                  break;
627        }
628      } else if (!WIFEXITED(status)) {
629        args.err    = EFAULT;        /* Terminated due to an unhandled signal*/
630        args.result = -1;
631      }
632    } else {
633      args.result = -1;
634      args.err    = clone_errno;
635    }
636  }
637
638  /* Restore the "dumpable" state of the process                             */
639failed:
640  if (!dumpable)
641    sys_prctl(PR_SET_DUMPABLE, dumpable);
642
643  va_end(args.ap);
644
645  errno = args.err;
646  return args.result;
647}
648
649/* This function resumes the list of all linux threads that
650 * ListAllProcessThreads pauses before giving to its callback.
651 * The function returns non-zero if at least one thread was
652 * suspended and has now been resumed.
653 */
654int ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
655  int detached_at_least_one = 0;
656  while (num_threads-- > 0) {
657    detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0;
658  }
659  return detached_at_least_one;
660}
661
662#ifdef __cplusplus
663}
664#endif
665#endif
666