libminijail.c revision 3b2e6e495cf91ae3645000e71653369383997ef5
1/* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6#define _BSD_SOURCE
7#define _DEFAULT_SOURCE
8#define _GNU_SOURCE
9
10#include <asm/unistd.h>
11#include <ctype.h>
12#include <errno.h>
13#include <fcntl.h>
14#include <grp.h>
15#include <inttypes.h>
16#include <limits.h>
17#include <linux/capability.h>
18#include <pwd.h>
19#include <sched.h>
20#include <signal.h>
21#include <stdarg.h>
22#include <stdbool.h>
23#include <stddef.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <syscall.h>
28#include <sys/capability.h>
29#include <sys/mount.h>
30#include <sys/param.h>
31#include <sys/prctl.h>
32#include <sys/stat.h>
33#include <sys/types.h>
34#include <sys/user.h>
35#include <sys/utsname.h>
36#include <sys/wait.h>
37#include <unistd.h>
38
39#include "libminijail.h"
40#include "libminijail-private.h"
41
42#include "signal_handler.h"
43#include "syscall_filter.h"
44#include "util.h"
45
46#ifdef HAVE_SECUREBITS_H
47# include <linux/securebits.h>
48#else
49# define SECURE_ALL_BITS	0x55
50# define SECURE_ALL_LOCKS	(SECURE_ALL_BITS << 1)
51#endif
52/* For kernels < 4.3. */
53#define OLD_SECURE_ALL_BITS	0x15
54#define OLD_SECURE_ALL_LOCKS	(OLD_SECURE_ALL_BITS << 1)
55
56/*
57 * Assert the value of SECURE_ALL_BITS at compile-time.
58 * Brillo devices are currently compiled against 4.4 kernel headers. Kernel 4.3
59 * added a new securebit.
60 * When a new securebit is added, the new SECURE_ALL_BITS mask will return EPERM
61 * when used on older kernels. The compile-time assert will catch this situation
62 * at compile time.
63 */
64#ifdef __BRILLO__
65_Static_assert(SECURE_ALL_BITS == 0x55, "SECURE_ALL_BITS == 0x55.");
66#endif
67
68/* Until these are reliably available in linux/prctl.h. */
69#ifndef PR_SET_SECCOMP
70# define PR_SET_SECCOMP 22
71#endif
72
73#ifndef PR_ALT_SYSCALL
74# define PR_ALT_SYSCALL 0x43724f53
75#endif
76
77/* For seccomp_filter using BPF. */
78#ifndef PR_SET_NO_NEW_PRIVS
79# define PR_SET_NO_NEW_PRIVS 38
80#endif
81#ifndef SECCOMP_MODE_FILTER
82# define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
83#endif
84
85#ifdef USE_SECCOMP_SOFTFAIL
86# define SECCOMP_SOFTFAIL 1
87#else
88# define SECCOMP_SOFTFAIL 0
89#endif
90
91/* New cgroup namespace might not be in linux-headers yet. */
92#ifndef CLONE_NEWCGROUP
93# define CLONE_NEWCGROUP 0x02000000
94#endif
95
96#define MAX_CGROUPS 10 /* 10 different controllers supported by Linux. */
97
98struct mountpoint {
99	char *src;
100	char *dest;
101	char *type;
102	char *data;
103	int has_data;
104	unsigned long flags;
105	struct mountpoint *next;
106};
107
108struct minijail {
109	/*
110	 * WARNING: if you add a flag here you need to make sure it's
111	 * accounted for in minijail_pre{enter|exec}() below.
112	 */
113	struct {
114		int uid:1;
115		int gid:1;
116		int usergroups:1;
117		int suppl_gids:1;
118		int use_caps:1;
119		int capbset_drop:1;
120		int vfs:1;
121		int enter_vfs:1;
122		int skip_remount_private:1;
123		int pids:1;
124		int ipc:1;
125		int net:1;
126		int enter_net:1;
127		int ns_cgroups:1;
128		int userns:1;
129		int seccomp:1;
130		int remount_proc_ro:1;
131		int no_new_privs:1;
132		int seccomp_filter:1;
133		int log_seccomp_filter:1;
134		int chroot:1;
135		int pivot_root:1;
136		int mount_tmp:1;
137		int do_init:1;
138		int pid_file:1;
139		int cgroups:1;
140		int alt_syscall:1;
141		int reset_signal_mask:1;
142	} flags;
143	uid_t uid;
144	gid_t gid;
145	gid_t usergid;
146	char *user;
147	size_t suppl_gid_count;
148	gid_t *suppl_gid_list;
149	uint64_t caps;
150	uint64_t cap_bset;
151	pid_t initpid;
152	int mountns_fd;
153	int netns_fd;
154	char *chrootdir;
155	char *pid_file_path;
156	char *uidmap;
157	char *gidmap;
158	size_t filter_len;
159	struct sock_fprog *filter_prog;
160	char *alt_syscall_table;
161	struct mountpoint *mounts_head;
162	struct mountpoint *mounts_tail;
163	size_t mounts_count;
164	char *cgroups[MAX_CGROUPS];
165	size_t cgroup_count;
166};
167
168/*
169 * Strip out flags meant for the parent.
170 * We keep things that are not inherited across execve(2) (e.g. capabilities),
171 * or are easier to set after execve(2) (e.g. seccomp filters).
172 */
173void minijail_preenter(struct minijail *j)
174{
175	j->flags.vfs = 0;
176	j->flags.enter_vfs = 0;
177	j->flags.skip_remount_private = 0;
178	j->flags.remount_proc_ro = 0;
179	j->flags.pids = 0;
180	j->flags.do_init = 0;
181	j->flags.pid_file = 0;
182	j->flags.cgroups = 0;
183}
184
185/*
186 * Strip out flags meant for the child.
187 * We keep things that are inherited across execve(2).
188 */
189void minijail_preexec(struct minijail *j)
190{
191	int vfs = j->flags.vfs;
192	int enter_vfs = j->flags.enter_vfs;
193	int skip_remount_private = j->flags.skip_remount_private;
194	int remount_proc_ro = j->flags.remount_proc_ro;
195	int userns = j->flags.userns;
196	if (j->user)
197		free(j->user);
198	j->user = NULL;
199	if (j->suppl_gid_list)
200		free(j->suppl_gid_list);
201	j->suppl_gid_list = NULL;
202	memset(&j->flags, 0, sizeof(j->flags));
203	/* Now restore anything we meant to keep. */
204	j->flags.vfs = vfs;
205	j->flags.enter_vfs = enter_vfs;
206	j->flags.skip_remount_private = skip_remount_private;
207	j->flags.remount_proc_ro = remount_proc_ro;
208	j->flags.userns = userns;
209	/* Note, |pids| will already have been used before this call. */
210}
211
212/* Returns true if the kernel version is less than 3.8. */
213int seccomp_kernel_support_not_required()
214{
215	int major, minor;
216	struct utsname uts;
217	return (uname(&uts) != -1 &&
218			sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
219			((major < 3) || ((major == 3) && (minor < 8))));
220}
221
222/* Allow seccomp soft-fail on Android devices with kernel version < 3.8. */
223int can_softfail()
224{
225#if SECCOMP_SOFTFAIL
226	if (is_android()) {
227		if (seccomp_kernel_support_not_required())
228			return 1;
229		else
230			return 0;
231	} else {
232		return 1;
233	}
234#endif
235	return 0;
236}
237
238/* Minijail API. */
239
240struct minijail API *minijail_new(void)
241{
242	return calloc(1, sizeof(struct minijail));
243}
244
245void API minijail_change_uid(struct minijail *j, uid_t uid)
246{
247	if (uid == 0)
248		die("useless change to uid 0");
249	j->uid = uid;
250	j->flags.uid = 1;
251}
252
253void API minijail_change_gid(struct minijail *j, gid_t gid)
254{
255	if (gid == 0)
256		die("useless change to gid 0");
257	j->gid = gid;
258	j->flags.gid = 1;
259}
260
261void API minijail_set_supplementary_gids(struct minijail *j, size_t size,
262					 const gid_t *list)
263{
264	size_t i;
265
266	if (j->flags.usergroups)
267		die("cannot inherit *and* set supplementary groups");
268
269	if (size == 0) {
270		/* Clear supplementary groups. */
271		j->suppl_gid_list = NULL;
272		j->suppl_gid_count = 0;
273		j->flags.suppl_gids = 1;
274		return;
275	}
276
277	/* Copy the gid_t array. */
278	j->suppl_gid_list = calloc(size, sizeof(gid_t));
279	if (!j->suppl_gid_list) {
280		die("failed to allocate internal supplementary group array");
281	}
282	for (i = 0; i < size; i++) {
283		j->suppl_gid_list[i] = list[i];
284	}
285	j->suppl_gid_count = size;
286	j->flags.suppl_gids = 1;
287}
288
289int API minijail_change_user(struct minijail *j, const char *user)
290{
291	char *buf = NULL;
292	struct passwd pw;
293	struct passwd *ppw = NULL;
294	ssize_t sz = sysconf(_SC_GETPW_R_SIZE_MAX);
295	if (sz == -1)
296		sz = 65536;	/* your guess is as good as mine... */
297
298	/*
299	 * sysconf(_SC_GETPW_R_SIZE_MAX), under glibc, is documented to return
300	 * the maximum needed size of the buffer, so we don't have to search.
301	 */
302	buf = malloc(sz);
303	if (!buf)
304		return -ENOMEM;
305	getpwnam_r(user, &pw, buf, sz, &ppw);
306	/*
307	 * We're safe to free the buffer here. The strings inside |pw| point
308	 * inside |buf|, but we don't use any of them; this leaves the pointers
309	 * dangling but it's safe. |ppw| points at |pw| if getpwnam_r(3)
310	 * succeeded.
311	 */
312	free(buf);
313	/* getpwnam_r(3) does *not* set errno when |ppw| is NULL. */
314	if (!ppw)
315		return -1;
316	minijail_change_uid(j, ppw->pw_uid);
317	j->user = strdup(user);
318	if (!j->user)
319		return -ENOMEM;
320	j->usergid = ppw->pw_gid;
321	return 0;
322}
323
324int API minijail_change_group(struct minijail *j, const char *group)
325{
326	char *buf = NULL;
327	struct group gr;
328	struct group *pgr = NULL;
329	ssize_t sz = sysconf(_SC_GETGR_R_SIZE_MAX);
330	if (sz == -1)
331		sz = 65536;	/* and mine is as good as yours, really */
332
333	/*
334	 * sysconf(_SC_GETGR_R_SIZE_MAX), under glibc, is documented to return
335	 * the maximum needed size of the buffer, so we don't have to search.
336	 */
337	buf = malloc(sz);
338	if (!buf)
339		return -ENOMEM;
340	getgrnam_r(group, &gr, buf, sz, &pgr);
341	/*
342	 * We're safe to free the buffer here. The strings inside gr point
343	 * inside buf, but we don't use any of them; this leaves the pointers
344	 * dangling but it's safe. pgr points at gr if getgrnam_r succeeded.
345	 */
346	free(buf);
347	/* getgrnam_r(3) does *not* set errno when |pgr| is NULL. */
348	if (!pgr)
349		return -1;
350	minijail_change_gid(j, pgr->gr_gid);
351	return 0;
352}
353
354void API minijail_use_seccomp(struct minijail *j)
355{
356	j->flags.seccomp = 1;
357}
358
359void API minijail_no_new_privs(struct minijail *j)
360{
361	j->flags.no_new_privs = 1;
362}
363
364void API minijail_use_seccomp_filter(struct minijail *j)
365{
366	j->flags.seccomp_filter = 1;
367}
368
369void API minijail_log_seccomp_filter_failures(struct minijail *j)
370{
371	j->flags.log_seccomp_filter = 1;
372}
373
374void API minijail_use_caps(struct minijail *j, uint64_t capmask)
375{
376	/*
377	 * 'minijail_use_caps' configures a runtime-capabilities-only
378	 * environment, including a bounding set matching the thread's runtime
379	 * (permitted|inheritable|effective) sets.
380	 * Therefore, it will override any existing bounding set configurations
381	 * since the latter would allow gaining extra runtime capabilities from
382	 * file capabilities.
383	 */
384	if (j->flags.capbset_drop) {
385		warn("overriding bounding set configuration");
386		j->cap_bset = 0;
387		j->flags.capbset_drop = 0;
388	}
389	j->caps = capmask;
390	j->flags.use_caps = 1;
391}
392
393void API minijail_capbset_drop(struct minijail *j, uint64_t capmask)
394{
395	if (j->flags.use_caps) {
396		/*
397		 * 'minijail_use_caps' will have already configured a capability
398		 * bounding set matching the (permitted|inheritable|effective)
399		 * sets. Abort if the user tries to configure a separate
400		 * bounding set. 'minijail_capbset_drop' and 'minijail_use_caps'
401		 * are mutually exclusive.
402		 */
403		die("runtime capabilities already configured, can't drop "
404		    "bounding set separately");
405	}
406	j->cap_bset = capmask;
407	j->flags.capbset_drop = 1;
408}
409
410void API minijail_reset_signal_mask(struct minijail *j)
411{
412	j->flags.reset_signal_mask = 1;
413}
414
415void API minijail_namespace_vfs(struct minijail *j)
416{
417	j->flags.vfs = 1;
418}
419
420void API minijail_namespace_enter_vfs(struct minijail *j, const char *ns_path)
421{
422	int ns_fd = open(ns_path, O_RDONLY | O_CLOEXEC);
423	if (ns_fd < 0) {
424		pdie("failed to open namespace '%s'", ns_path);
425	}
426	j->mountns_fd = ns_fd;
427	j->flags.enter_vfs = 1;
428}
429
430void API minijail_skip_remount_private(struct minijail *j)
431{
432	j->flags.skip_remount_private = 1;
433}
434
435void API minijail_namespace_pids(struct minijail *j)
436{
437	j->flags.vfs = 1;
438	j->flags.remount_proc_ro = 1;
439	j->flags.pids = 1;
440	j->flags.do_init = 1;
441}
442
443void API minijail_namespace_ipc(struct minijail *j)
444{
445	j->flags.ipc = 1;
446}
447
448void API minijail_namespace_net(struct minijail *j)
449{
450	j->flags.net = 1;
451}
452
453void API minijail_namespace_enter_net(struct minijail *j, const char *ns_path)
454{
455	int ns_fd = open(ns_path, O_RDONLY | O_CLOEXEC);
456	if (ns_fd < 0) {
457		pdie("failed to open namespace '%s'", ns_path);
458	}
459	j->netns_fd = ns_fd;
460	j->flags.enter_net = 1;
461}
462
463void API minijail_namespace_cgroups(struct minijail *j)
464{
465	j->flags.ns_cgroups = 1;
466}
467
468void API minijail_remount_proc_readonly(struct minijail *j)
469{
470	j->flags.vfs = 1;
471	j->flags.remount_proc_ro = 1;
472}
473
474void API minijail_namespace_user(struct minijail *j)
475{
476	j->flags.userns = 1;
477}
478
479int API minijail_uidmap(struct minijail *j, const char *uidmap)
480{
481	j->uidmap = strdup(uidmap);
482	if (!j->uidmap)
483		return -ENOMEM;
484	char *ch;
485	for (ch = j->uidmap; *ch; ch++) {
486		if (*ch == ',')
487			*ch = '\n';
488	}
489	return 0;
490}
491
492int API minijail_gidmap(struct minijail *j, const char *gidmap)
493{
494	j->gidmap = strdup(gidmap);
495	if (!j->gidmap)
496		return -ENOMEM;
497	char *ch;
498	for (ch = j->gidmap; *ch; ch++) {
499		if (*ch == ',')
500			*ch = '\n';
501	}
502	return 0;
503}
504
505void API minijail_inherit_usergroups(struct minijail *j)
506{
507	j->flags.usergroups = 1;
508}
509
510void API minijail_run_as_init(struct minijail *j)
511{
512	/*
513	 * Since the jailed program will become 'init' in the new PID namespace,
514	 * Minijail does not need to fork an 'init' process.
515	 */
516	j->flags.do_init = 0;
517}
518
519int API minijail_enter_chroot(struct minijail *j, const char *dir)
520{
521	if (j->chrootdir)
522		return -EINVAL;
523	j->chrootdir = strdup(dir);
524	if (!j->chrootdir)
525		return -ENOMEM;
526	j->flags.chroot = 1;
527	return 0;
528}
529
530int API minijail_enter_pivot_root(struct minijail *j, const char *dir)
531{
532	if (j->chrootdir)
533		return -EINVAL;
534	j->chrootdir = strdup(dir);
535	if (!j->chrootdir)
536		return -ENOMEM;
537	j->flags.pivot_root = 1;
538	return 0;
539}
540
541static char *append_external_path(const char *external_path,
542				  const char *path_inside_chroot)
543{
544	char *path;
545	size_t pathlen;
546
547	/* One extra char for '/' and one for '\0', hence + 2. */
548	pathlen = strlen(path_inside_chroot) + strlen(external_path) + 2;
549	path = malloc(pathlen);
550	snprintf(path, pathlen, "%s/%s", external_path, path_inside_chroot);
551
552	return path;
553}
554
555char API *minijail_get_original_path(struct minijail *j,
556				     const char *path_inside_chroot)
557{
558	struct mountpoint *b;
559
560	b = j->mounts_head;
561	while (b) {
562		/*
563		 * If |path_inside_chroot| is the exact destination of a
564		 * mount, then the original path is exactly the source of
565		 * the mount.
566		 *  for example: "-b /some/path/exe,/chroot/path/exe"
567		 *    mount source = /some/path/exe, mount dest =
568		 *    /chroot/path/exe Then when getting the original path of
569		 *    "/chroot/path/exe", the source of that mount,
570		 *    "/some/path/exe" is what should be returned.
571		 */
572		if (!strcmp(b->dest, path_inside_chroot))
573			return strdup(b->src);
574
575		/*
576		 * If |path_inside_chroot| is within the destination path of a
577		 * mount, take the suffix of the chroot path relative to the
578		 * mount destination path, and append it to the mount source
579		 * path.
580		 */
581		if (!strncmp(b->dest, path_inside_chroot, strlen(b->dest))) {
582			const char *relative_path =
583				path_inside_chroot + strlen(b->dest);
584			return append_external_path(b->src, relative_path);
585		}
586		b = b->next;
587	}
588
589	/* If there is a chroot path, append |path_inside_chroot| to that. */
590	if (j->chrootdir)
591		return append_external_path(j->chrootdir, path_inside_chroot);
592
593	/* No chroot, so the path outside is the same as it is inside. */
594	return strdup(path_inside_chroot);
595}
596
597void API minijail_mount_tmp(struct minijail *j)
598{
599	j->flags.mount_tmp = 1;
600}
601
602int API minijail_write_pid_file(struct minijail *j, const char *path)
603{
604	j->pid_file_path = strdup(path);
605	if (!j->pid_file_path)
606		return -ENOMEM;
607	j->flags.pid_file = 1;
608	return 0;
609}
610
611int API minijail_add_to_cgroup(struct minijail *j, const char *path)
612{
613	if (j->cgroup_count >= MAX_CGROUPS)
614		return -ENOMEM;
615	j->cgroups[j->cgroup_count] = strdup(path);
616	if (!j->cgroups[j->cgroup_count])
617		return -ENOMEM;
618	j->cgroup_count++;
619	j->flags.cgroups = 1;
620	return 0;
621}
622
623int API minijail_mount_with_data(struct minijail *j, const char *src,
624				 const char *dest, const char *type,
625				 unsigned long flags, const char *data)
626{
627	struct mountpoint *m;
628
629	if (*dest != '/')
630		return -EINVAL;
631	m = calloc(1, sizeof(*m));
632	if (!m)
633		return -ENOMEM;
634	m->dest = strdup(dest);
635	if (!m->dest)
636		goto error;
637	m->src = strdup(src);
638	if (!m->src)
639		goto error;
640	m->type = strdup(type);
641	if (!m->type)
642		goto error;
643	if (data) {
644		m->data = strdup(data);
645		if (!m->data)
646			goto error;
647		m->has_data = 1;
648	}
649	m->flags = flags;
650
651	info("mount %s -> %s type '%s'", src, dest, type);
652
653	/*
654	 * Force vfs namespacing so the mounts don't leak out into the
655	 * containing vfs namespace.
656	 */
657	minijail_namespace_vfs(j);
658
659	if (j->mounts_tail)
660		j->mounts_tail->next = m;
661	else
662		j->mounts_head = m;
663	j->mounts_tail = m;
664	j->mounts_count++;
665
666	return 0;
667
668error:
669	free(m->type);
670	free(m->src);
671	free(m->dest);
672	free(m);
673	return -ENOMEM;
674}
675
676int API minijail_mount(struct minijail *j, const char *src, const char *dest,
677		       const char *type, unsigned long flags)
678{
679	return minijail_mount_with_data(j, src, dest, type, flags, NULL);
680}
681
682int API minijail_bind(struct minijail *j, const char *src, const char *dest,
683		      int writeable)
684{
685	unsigned long flags = MS_BIND;
686
687	if (!writeable)
688		flags |= MS_RDONLY;
689
690	return minijail_mount(j, src, dest, "", flags);
691}
692
693void API minijail_parse_seccomp_filters(struct minijail *j, const char *path)
694{
695	if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL)) {
696		if ((errno == EINVAL) && can_softfail()) {
697			warn("not loading seccomp filter,"
698			     " seccomp not supported");
699			j->flags.seccomp_filter = 0;
700			j->flags.log_seccomp_filter = 0;
701			j->filter_len = 0;
702			j->filter_prog = NULL;
703			j->flags.no_new_privs = 0;
704		}
705	}
706	FILE *file = fopen(path, "r");
707	if (!file) {
708		pdie("failed to open seccomp filter file '%s'", path);
709	}
710
711	struct sock_fprog *fprog = malloc(sizeof(struct sock_fprog));
712	if (compile_filter(file, fprog, j->flags.log_seccomp_filter)) {
713		die("failed to compile seccomp filter BPF program in '%s'",
714		    path);
715	}
716
717	j->filter_len = fprog->len;
718	j->filter_prog = fprog;
719
720	fclose(file);
721}
722
723int API minijail_use_alt_syscall(struct minijail *j, const char *table)
724{
725	j->alt_syscall_table = strdup(table);
726	if (!j->alt_syscall_table)
727		return -ENOMEM;
728	j->flags.alt_syscall = 1;
729	return 0;
730}
731
732struct marshal_state {
733	size_t available;
734	size_t total;
735	char *buf;
736};
737
738void marshal_state_init(struct marshal_state *state, char *buf,
739			size_t available)
740{
741	state->available = available;
742	state->buf = buf;
743	state->total = 0;
744}
745
746void marshal_append(struct marshal_state *state, void *src, size_t length)
747{
748	size_t copy_len = MIN(state->available, length);
749
750	/* Up to |available| will be written. */
751	if (copy_len) {
752		memcpy(state->buf, src, copy_len);
753		state->buf += copy_len;
754		state->available -= copy_len;
755	}
756	/* |total| will contain the expected length. */
757	state->total += length;
758}
759
760static void minijail_marshal_mount(struct marshal_state *state,
761				   const struct mountpoint *m)
762{
763	marshal_append(state, m->src, strlen(m->src) + 1);
764	marshal_append(state, m->dest, strlen(m->dest) + 1);
765	marshal_append(state, m->type, strlen(m->type) + 1);
766	marshal_append(state, (char *)&m->has_data, sizeof(m->has_data));
767	if (m->has_data)
768		marshal_append(state, m->data, strlen(m->data) + 1);
769	marshal_append(state, (char *)&m->flags, sizeof(m->flags));
770}
771
772void minijail_marshal_helper(struct marshal_state *state,
773			     const struct minijail *j)
774{
775	struct mountpoint *m = NULL;
776	size_t i;
777
778	marshal_append(state, (char *)j, sizeof(*j));
779	if (j->user)
780		marshal_append(state, j->user, strlen(j->user) + 1);
781	if (j->suppl_gid_list) {
782		marshal_append(state, j->suppl_gid_list,
783			       j->suppl_gid_count * sizeof(gid_t));
784	}
785	if (j->chrootdir)
786		marshal_append(state, j->chrootdir, strlen(j->chrootdir) + 1);
787	if (j->alt_syscall_table) {
788		marshal_append(state, j->alt_syscall_table,
789			       strlen(j->alt_syscall_table) + 1);
790	}
791	if (j->flags.seccomp_filter && j->filter_prog) {
792		struct sock_fprog *fp = j->filter_prog;
793		marshal_append(state, (char *)fp->filter,
794			       fp->len * sizeof(struct sock_filter));
795	}
796	for (m = j->mounts_head; m; m = m->next) {
797		minijail_marshal_mount(state, m);
798	}
799	for (i = 0; i < j->cgroup_count; ++i)
800		marshal_append(state, j->cgroups[i], strlen(j->cgroups[i]) + 1);
801}
802
803size_t API minijail_size(const struct minijail *j)
804{
805	struct marshal_state state;
806	marshal_state_init(&state, NULL, 0);
807	minijail_marshal_helper(&state, j);
808	return state.total;
809}
810
811int minijail_marshal(const struct minijail *j, char *buf, size_t available)
812{
813	struct marshal_state state;
814	marshal_state_init(&state, buf, available);
815	minijail_marshal_helper(&state, j);
816	return (state.total > available);
817}
818
819/*
820 * consumebytes: consumes @length bytes from a buffer @buf of length @buflength
821 * @length    Number of bytes to consume
822 * @buf       Buffer to consume from
823 * @buflength Size of @buf
824 *
825 * Returns a pointer to the base of the bytes, or NULL for errors.
826 */
827void *consumebytes(size_t length, char **buf, size_t *buflength)
828{
829	char *p = *buf;
830	if (length > *buflength)
831		return NULL;
832	*buf += length;
833	*buflength -= length;
834	return p;
835}
836
837/*
838 * consumestr: consumes a C string from a buffer @buf of length @length
839 * @buf    Buffer to consume
840 * @length Length of buffer
841 *
842 * Returns a pointer to the base of the string, or NULL for errors.
843 */
844char *consumestr(char **buf, size_t *buflength)
845{
846	size_t len = strnlen(*buf, *buflength);
847	if (len == *buflength)
848		/* There's no null-terminator. */
849		return NULL;
850	return consumebytes(len + 1, buf, buflength);
851}
852
853int minijail_unmarshal(struct minijail *j, char *serialized, size_t length)
854{
855	size_t i;
856	size_t count;
857	int ret = -EINVAL;
858
859	if (length < sizeof(*j))
860		goto out;
861	memcpy((void *)j, serialized, sizeof(*j));
862	serialized += sizeof(*j);
863	length -= sizeof(*j);
864
865	/* Potentially stale pointers not used as signals. */
866	j->pid_file_path = NULL;
867	j->uidmap = NULL;
868	j->gidmap = NULL;
869	j->mounts_head = NULL;
870	j->mounts_tail = NULL;
871	j->filter_prog = NULL;
872
873	if (j->user) {		/* stale pointer */
874		char *user = consumestr(&serialized, &length);
875		if (!user)
876			goto clear_pointers;
877		j->user = strdup(user);
878		if (!j->user)
879			goto clear_pointers;
880	}
881
882	if (j->suppl_gid_list) {	/* stale pointer */
883		if (j->suppl_gid_count > NGROUPS_MAX) {
884			goto bad_gid_list;
885		}
886		size_t gid_list_size = j->suppl_gid_count * sizeof(gid_t);
887		void *gid_list_bytes =
888		    consumebytes(gid_list_size, &serialized, &length);
889		if (!gid_list_bytes)
890			goto bad_gid_list;
891
892		j->suppl_gid_list = calloc(j->suppl_gid_count, sizeof(gid_t));
893		if (!j->suppl_gid_list)
894			goto bad_gid_list;
895
896		memcpy(j->suppl_gid_list, gid_list_bytes, gid_list_size);
897	}
898
899	if (j->chrootdir) {	/* stale pointer */
900		char *chrootdir = consumestr(&serialized, &length);
901		if (!chrootdir)
902			goto bad_chrootdir;
903		j->chrootdir = strdup(chrootdir);
904		if (!j->chrootdir)
905			goto bad_chrootdir;
906	}
907
908	if (j->alt_syscall_table) {	/* stale pointer */
909		char *alt_syscall_table = consumestr(&serialized, &length);
910		if (!alt_syscall_table)
911			goto bad_syscall_table;
912		j->alt_syscall_table = strdup(alt_syscall_table);
913		if (!j->alt_syscall_table)
914			goto bad_syscall_table;
915	}
916
917	if (j->flags.seccomp_filter && j->filter_len > 0) {
918		size_t ninstrs = j->filter_len;
919		if (ninstrs > (SIZE_MAX / sizeof(struct sock_filter)) ||
920		    ninstrs > USHRT_MAX)
921			goto bad_filters;
922
923		size_t program_len = ninstrs * sizeof(struct sock_filter);
924		void *program = consumebytes(program_len, &serialized, &length);
925		if (!program)
926			goto bad_filters;
927
928		j->filter_prog = malloc(sizeof(struct sock_fprog));
929		if (!j->filter_prog)
930			goto bad_filters;
931
932		j->filter_prog->len = ninstrs;
933		j->filter_prog->filter = malloc(program_len);
934		if (!j->filter_prog->filter)
935			goto bad_filter_prog_instrs;
936
937		memcpy(j->filter_prog->filter, program, program_len);
938	}
939
940	count = j->mounts_count;
941	j->mounts_count = 0;
942	for (i = 0; i < count; ++i) {
943		unsigned long *flags;
944		int *has_data;
945		const char *dest;
946		const char *type;
947		const char *data = NULL;
948		const char *src = consumestr(&serialized, &length);
949		if (!src)
950			goto bad_mounts;
951		dest = consumestr(&serialized, &length);
952		if (!dest)
953			goto bad_mounts;
954		type = consumestr(&serialized, &length);
955		if (!type)
956			goto bad_mounts;
957		has_data = consumebytes(sizeof(*has_data), &serialized,
958					&length);
959		if (!has_data)
960			goto bad_mounts;
961		if (*has_data) {
962			data = consumestr(&serialized, &length);
963			if (!data)
964				goto bad_mounts;
965		}
966		flags = consumebytes(sizeof(*flags), &serialized, &length);
967		if (!flags)
968			goto bad_mounts;
969		if (minijail_mount_with_data(j, src, dest, type, *flags, data))
970			goto bad_mounts;
971	}
972
973	count = j->cgroup_count;
974	j->cgroup_count = 0;
975	for (i = 0; i < count; ++i) {
976		char *cgroup = consumestr(&serialized, &length);
977		if (!cgroup)
978			goto bad_cgroups;
979		j->cgroups[i] = strdup(cgroup);
980		if (!j->cgroups[i])
981			goto bad_cgroups;
982		++j->cgroup_count;
983	}
984
985	return 0;
986
987bad_cgroups:
988	while (j->mounts_head) {
989		struct mountpoint *m = j->mounts_head;
990		j->mounts_head = j->mounts_head->next;
991		free(m->data);
992		free(m->type);
993		free(m->dest);
994		free(m->src);
995		free(m);
996	}
997	for (i = 0; i < j->cgroup_count; ++i)
998		free(j->cgroups[i]);
999bad_mounts:
1000	if (j->flags.seccomp_filter && j->filter_len > 0) {
1001		free(j->filter_prog->filter);
1002		free(j->filter_prog);
1003	}
1004bad_filter_prog_instrs:
1005	if (j->filter_prog)
1006		free(j->filter_prog);
1007bad_filters:
1008	if (j->alt_syscall_table)
1009		free(j->alt_syscall_table);
1010bad_syscall_table:
1011	if (j->chrootdir)
1012		free(j->chrootdir);
1013bad_chrootdir:
1014	if (j->suppl_gid_list)
1015		free(j->suppl_gid_list);
1016bad_gid_list:
1017	if (j->user)
1018		free(j->user);
1019clear_pointers:
1020	j->user = NULL;
1021	j->suppl_gid_list = NULL;
1022	j->chrootdir = NULL;
1023	j->alt_syscall_table = NULL;
1024	j->cgroup_count = 0;
1025out:
1026	return ret;
1027}
1028
1029static void write_ugid_mappings(const struct minijail *j)
1030{
1031	int fd, ret, len;
1032	size_t sz;
1033	char fname[32];
1034
1035	sz = sizeof(fname);
1036	if (j->uidmap) {
1037		ret = snprintf(fname, sz, "/proc/%d/uid_map", j->initpid);
1038		if (ret < 0 || (size_t)ret >= sz)
1039			die("failed to write file name of uid_map");
1040		fd = open(fname, O_WRONLY | O_CLOEXEC);
1041		if (fd < 0)
1042			pdie("failed to open '%s'", fname);
1043		len = strlen(j->uidmap);
1044		if (write(fd, j->uidmap, len) < len)
1045			die("failed to set uid_map");
1046		close(fd);
1047	}
1048	if (j->gidmap) {
1049		ret = snprintf(fname, sz, "/proc/%d/gid_map", j->initpid);
1050		if (ret < 0 || (size_t)ret >= sz)
1051			die("failed to write file name of gid_map");
1052		fd = open(fname, O_WRONLY | O_CLOEXEC);
1053		if (fd < 0)
1054			pdie("failed to open '%s'", fname);
1055		len = strlen(j->gidmap);
1056		if (write(fd, j->gidmap, len) < len)
1057			die("failed to set gid_map");
1058		close(fd);
1059	}
1060}
1061
1062static void parent_setup_complete(int *pipe_fds)
1063{
1064	close(pipe_fds[0]);
1065	close(pipe_fds[1]);
1066}
1067
1068/*
1069 * wait_for_parent_setup: Called by the child process to wait for any
1070 * further parent-side setup to complete before continuing.
1071 */
1072static void wait_for_parent_setup(int *pipe_fds)
1073{
1074	char buf;
1075
1076	close(pipe_fds[1]);
1077
1078	/* Wait for parent to complete setup and close the pipe. */
1079	if (read(pipe_fds[0], &buf, 1) != 0)
1080		die("failed to sync with parent");
1081	close(pipe_fds[0]);
1082}
1083
1084static void enter_user_namespace(const struct minijail *j)
1085{
1086	if (j->uidmap && setresuid(0, 0, 0))
1087		pdie("setresuid");
1088	if (j->gidmap && setresgid(0, 0, 0))
1089		pdie("setresgid");
1090}
1091
1092/*
1093 * Make sure the mount target exists. Create it if needed and possible.
1094 */
1095static int setup_mount_destination(const char *source, const char *dest,
1096				   uid_t uid, uid_t gid)
1097{
1098	int rc;
1099	struct stat st_buf;
1100
1101	rc = stat(dest, &st_buf);
1102	if (rc == 0) /* destination exists */
1103		return 0;
1104
1105	/*
1106	 * Try to create the destination.
1107	 * Either make a directory or touch a file depending on the source type.
1108	 * If the source doesn't exist, assume it is a filesystem type such as
1109	 * "tmpfs" and create a directory to mount it on.
1110	 */
1111	rc = stat(source, &st_buf);
1112	if (rc || S_ISDIR(st_buf.st_mode) || S_ISBLK(st_buf.st_mode)) {
1113		if (mkdir(dest, 0700))
1114			return -errno;
1115	} else {
1116		int fd = open(dest, O_RDWR | O_CREAT, 0700);
1117		if (fd < 0)
1118			return -errno;
1119		close(fd);
1120	}
1121	return chown(dest, uid, gid);
1122}
1123
1124/*
1125 * mount_one: Applies mounts from @m for @j, recursing as needed.
1126 * @j Minijail these mounts are for
1127 * @m Head of list of mounts
1128 *
1129 * Returns 0 for success.
1130 */
1131static int mount_one(const struct minijail *j, struct mountpoint *m)
1132{
1133	int ret;
1134	char *dest;
1135	int remount_ro = 0;
1136
1137	/* |dest| has a leading "/". */
1138	if (asprintf(&dest, "%s%s", j->chrootdir, m->dest) < 0)
1139		return -ENOMEM;
1140
1141	if (setup_mount_destination(m->src, dest, j->uid, j->gid))
1142		pdie("creating mount target '%s' failed", dest);
1143
1144	/*
1145	 * R/O bind mounts have to be remounted since 'bind' and 'ro'
1146	 * can't both be specified in the original bind mount.
1147	 * Remount R/O after the initial mount.
1148	 */
1149	if ((m->flags & MS_BIND) && (m->flags & MS_RDONLY)) {
1150		remount_ro = 1;
1151		m->flags &= ~MS_RDONLY;
1152	}
1153
1154	ret = mount(m->src, dest, m->type, m->flags, m->data);
1155	if (ret)
1156		pdie("mount: %s -> %s", m->src, dest);
1157
1158	if (remount_ro) {
1159		m->flags |= MS_RDONLY;
1160		ret = mount(m->src, dest, NULL,
1161			    m->flags | MS_REMOUNT, m->data);
1162		if (ret)
1163			pdie("bind ro: %s -> %s", m->src, dest);
1164	}
1165
1166	free(dest);
1167	if (m->next)
1168		return mount_one(j, m->next);
1169	return ret;
1170}
1171
1172int enter_chroot(const struct minijail *j)
1173{
1174	int ret;
1175
1176	if (j->mounts_head && (ret = mount_one(j, j->mounts_head)))
1177		return ret;
1178
1179	if (chroot(j->chrootdir))
1180		return -errno;
1181
1182	if (chdir("/"))
1183		return -errno;
1184
1185	return 0;
1186}
1187
1188int enter_pivot_root(const struct minijail *j)
1189{
1190	int ret, oldroot, newroot;
1191
1192	if (j->mounts_head && (ret = mount_one(j, j->mounts_head)))
1193		return ret;
1194
1195	/*
1196	 * Keep the fd for both old and new root.
1197	 * It will be used in fchdir(2) later.
1198	 */
1199	oldroot = open("/", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1200	if (oldroot < 0)
1201		pdie("failed to open / for fchdir");
1202	newroot = open(j->chrootdir, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
1203	if (newroot < 0)
1204		pdie("failed to open %s for fchdir", j->chrootdir);
1205
1206	/*
1207	 * To ensure j->chrootdir is the root of a filesystem,
1208	 * do a self bind mount.
1209	 */
1210	if (mount(j->chrootdir, j->chrootdir, "bind", MS_BIND | MS_REC, ""))
1211		pdie("failed to bind mount '%s'", j->chrootdir);
1212	if (chdir(j->chrootdir))
1213		return -errno;
1214	if (syscall(SYS_pivot_root, ".", "."))
1215		pdie("pivot_root");
1216
1217	/*
1218	 * Now the old root is mounted on top of the new root. Use fchdir(2) to
1219	 * change to the old root and unmount it.
1220	 */
1221	if (fchdir(oldroot))
1222		pdie("failed to fchdir to old /");
1223
1224	/*
1225	 * If j->flags.skip_remount_private was enabled for minijail_enter(),
1226	 * there could be a shared mount point under |oldroot|. In that case,
1227	 * mounts under this shared mount point will be unmounted below, and
1228	 * this unmounting will propagate to the original mount namespace
1229	 * (because the mount point is shared). To prevent this unexpected
1230	 * unmounting, remove these mounts from their peer groups by recursively
1231	 * remounting them as MS_PRIVATE.
1232	 */
1233	if (mount(NULL, ".", NULL, MS_REC | MS_PRIVATE, NULL))
1234		pdie("failed to mount(/, private) before umount(/)");
1235	/* The old root might be busy, so use lazy unmount. */
1236	if (umount2(".", MNT_DETACH))
1237		pdie("umount(/)");
1238	/* Change back to the new root. */
1239	if (fchdir(newroot))
1240		return -errno;
1241	if (close(oldroot))
1242		return -errno;
1243	if (close(newroot))
1244		return -errno;
1245	if (chroot("/"))
1246		return -errno;
1247	/* Set correct CWD for getcwd(3). */
1248	if (chdir("/"))
1249		return -errno;
1250
1251	return 0;
1252}
1253
1254int mount_tmp(void)
1255{
1256	return mount("none", "/tmp", "tmpfs", 0, "size=64M,mode=777");
1257}
1258
1259int remount_proc_readonly(const struct minijail *j)
1260{
1261	const char *kProcPath = "/proc";
1262	const unsigned int kSafeFlags = MS_NODEV | MS_NOEXEC | MS_NOSUID;
1263	/*
1264	 * Right now, we're holding a reference to our parent's old mount of
1265	 * /proc in our namespace, which means using MS_REMOUNT here would
1266	 * mutate our parent's mount as well, even though we're in a VFS
1267	 * namespace (!). Instead, remove their mount from our namespace lazily
1268	 * (MNT_DETACH) and make our own.
1269	 */
1270	if (umount2(kProcPath, MNT_DETACH)) {
1271		/*
1272		 * If we are in a new user namespace, umount(2) will fail.
1273		 * See http://man7.org/linux/man-pages/man7/user_namespaces.7.html
1274		 */
1275		if (j->flags.userns) {
1276			info("umount(/proc, MNT_DETACH) failed, "
1277			     "this is expected when using user namespaces");
1278		} else {
1279			return -errno;
1280		}
1281	}
1282	if (mount("", kProcPath, "proc", kSafeFlags | MS_RDONLY, ""))
1283		return -errno;
1284	return 0;
1285}
1286
1287static void write_pid_to_path(pid_t pid, const char *path)
1288{
1289	FILE *fp = fopen(path, "w");
1290
1291	if (!fp)
1292		pdie("failed to open '%s'", path);
1293	if (fprintf(fp, "%d\n", (int)pid) < 0)
1294		pdie("fprintf(%s)", path);
1295	if (fclose(fp))
1296		pdie("fclose(%s)", path);
1297}
1298
1299static void write_pid_file(const struct minijail *j)
1300{
1301	write_pid_to_path(j->initpid, j->pid_file_path);
1302}
1303
1304static void add_to_cgroups(const struct minijail *j)
1305{
1306	size_t i;
1307
1308	for (i = 0; i < j->cgroup_count; ++i)
1309		write_pid_to_path(j->initpid, j->cgroups[i]);
1310}
1311
1312void drop_ugid(const struct minijail *j)
1313{
1314	if (j->flags.usergroups && j->flags.suppl_gids) {
1315		die("tried to inherit *and* set supplementary groups;"
1316		    " can only do one");
1317	}
1318
1319	if (j->flags.usergroups) {
1320		if (initgroups(j->user, j->usergid))
1321			pdie("initgroups");
1322	} else if (j->flags.suppl_gids) {
1323		if (setgroups(j->suppl_gid_count, j->suppl_gid_list)) {
1324			pdie("setgroups");
1325		}
1326	} else {
1327		/*
1328		 * Only attempt to clear supplementary groups if we are changing
1329		 * users.
1330		 */
1331		if ((j->uid || j->gid) && setgroups(0, NULL))
1332			pdie("setgroups");
1333	}
1334
1335	if (j->flags.gid && setresgid(j->gid, j->gid, j->gid))
1336		pdie("setresgid");
1337
1338	if (j->flags.uid && setresuid(j->uid, j->uid, j->uid))
1339		pdie("setresuid");
1340}
1341
1342/*
1343 * We specifically do not use cap_valid() as that only tells us the last
1344 * valid cap we were *compiled* against (i.e. what the version of kernel
1345 * headers says). If we run on a different kernel version, then it's not
1346 * uncommon for that to be less (if an older kernel) or more (if a newer
1347 * kernel).
1348 * Normally, we suck up the answer via /proc. On Android, not all processes are
1349 * guaranteed to be able to access '/proc/sys/kernel/cap_last_cap' so we
1350 * programmatically find the value by calling prctl(PR_CAPBSET_READ).
1351 */
1352static unsigned int get_last_valid_cap()
1353{
1354	unsigned int last_valid_cap = 0;
1355	if (is_android()) {
1356		for (; prctl(PR_CAPBSET_READ, last_valid_cap, 0, 0, 0) >= 0;
1357		     ++last_valid_cap);
1358
1359		/* |last_valid_cap| will be the first failing value. */
1360		if (last_valid_cap > 0) {
1361			last_valid_cap--;
1362		}
1363	} else {
1364		const char cap_file[] = "/proc/sys/kernel/cap_last_cap";
1365		FILE *fp = fopen(cap_file, "re");
1366		if (fscanf(fp, "%u", &last_valid_cap) != 1)
1367			pdie("fscanf(%s)", cap_file);
1368		fclose(fp);
1369	}
1370	return last_valid_cap;
1371}
1372
1373static void drop_capbset(uint64_t keep_mask, unsigned int last_valid_cap)
1374{
1375	const uint64_t one = 1;
1376	unsigned int i;
1377	for (i = 0; i < sizeof(keep_mask) * 8 && i <= last_valid_cap; ++i) {
1378		if (keep_mask & (one << i))
1379			continue;
1380		if (prctl(PR_CAPBSET_DROP, i))
1381			pdie("could not drop capability from bounding set");
1382	}
1383}
1384
1385void drop_caps(const struct minijail *j, unsigned int last_valid_cap)
1386{
1387	if (!j->flags.use_caps)
1388		return;
1389
1390	cap_t caps = cap_get_proc();
1391	cap_value_t flag[1];
1392	const uint64_t one = 1;
1393	unsigned int i;
1394	if (!caps)
1395		die("can't get process caps");
1396	if (cap_clear_flag(caps, CAP_INHERITABLE))
1397		die("can't clear inheritable caps");
1398	if (cap_clear_flag(caps, CAP_EFFECTIVE))
1399		die("can't clear effective caps");
1400	if (cap_clear_flag(caps, CAP_PERMITTED))
1401		die("can't clear permitted caps");
1402	for (i = 0; i < sizeof(j->caps) * 8 && i <= last_valid_cap; ++i) {
1403		/* Keep CAP_SETPCAP for dropping bounding set bits. */
1404		if (i != CAP_SETPCAP && !(j->caps & (one << i)))
1405			continue;
1406		flag[0] = i;
1407		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_SET))
1408			die("can't add effective cap");
1409		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_SET))
1410			die("can't add permitted cap");
1411		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_SET))
1412			die("can't add inheritable cap");
1413	}
1414	if (cap_set_proc(caps))
1415		die("can't apply initial cleaned capset");
1416
1417	/*
1418	 * Instead of dropping bounding set first, do it here in case
1419	 * the caller had a more permissive bounding set which could
1420	 * have been used above to raise a capability that wasn't already
1421	 * present. This requires CAP_SETPCAP, so we raised/kept it above.
1422	 */
1423	drop_capbset(j->caps, last_valid_cap);
1424
1425	/* If CAP_SETPCAP wasn't specifically requested, now we remove it. */
1426	if ((j->caps & (one << CAP_SETPCAP)) == 0) {
1427		flag[0] = CAP_SETPCAP;
1428		if (cap_set_flag(caps, CAP_EFFECTIVE, 1, flag, CAP_CLEAR))
1429			die("can't clear effective cap");
1430		if (cap_set_flag(caps, CAP_PERMITTED, 1, flag, CAP_CLEAR))
1431			die("can't clear permitted cap");
1432		if (cap_set_flag(caps, CAP_INHERITABLE, 1, flag, CAP_CLEAR))
1433			die("can't clear inheritable cap");
1434	}
1435
1436	if (cap_set_proc(caps))
1437		die("can't apply final cleaned capset");
1438
1439	cap_free(caps);
1440}
1441
1442void set_seccomp_filter(const struct minijail *j)
1443{
1444	/*
1445	 * Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
1446	 * in the kernel source tree for an explanation of the parameters.
1447	 */
1448	if (j->flags.no_new_privs) {
1449		if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0))
1450			pdie("prctl(PR_SET_NO_NEW_PRIVS)");
1451	}
1452
1453	/*
1454	 * Code running with ASan
1455	 * (https://github.com/google/sanitizers/wiki/AddressSanitizer)
1456	 * will make system calls not included in the syscall filter policy,
1457	 * which will likely crash the program. Skip setting seccomp filter in
1458	 * that case.
1459	 * 'running_with_asan()' has no inputs and is completely defined at
1460	 * build time, so this cannot be used by an attacker to skip setting
1461	 * seccomp filter.
1462	 */
1463	if (j->flags.seccomp_filter && running_with_asan()) {
1464		warn("running with ASan, not setting seccomp filter");
1465		return;
1466	}
1467
1468	/*
1469	 * If we're logging seccomp filter failures,
1470	 * install the SIGSYS handler first.
1471	 */
1472	if (j->flags.seccomp_filter && j->flags.log_seccomp_filter) {
1473		if (install_sigsys_handler())
1474			pdie("install SIGSYS handler");
1475		warn("logging seccomp filter failures");
1476	}
1477
1478	/*
1479	 * Install the syscall filter.
1480	 */
1481	if (j->flags.seccomp_filter) {
1482		if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
1483			  j->filter_prog)) {
1484			if ((errno == EINVAL) && can_softfail()) {
1485				warn("seccomp not supported");
1486				return;
1487			}
1488			pdie("prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER)");
1489		}
1490	}
1491}
1492
1493void API minijail_enter(const struct minijail *j)
1494{
1495	/*
1496	 * If we're dropping caps, get the last valid cap from /proc now,
1497	 * since /proc can be unmounted before drop_caps() is called.
1498	 */
1499	unsigned int last_valid_cap = 0;
1500	if (j->flags.capbset_drop || j->flags.use_caps)
1501		last_valid_cap = get_last_valid_cap();
1502
1503	if (j->flags.pids)
1504		die("tried to enter a pid-namespaced jail;"
1505		    " try minijail_run()?");
1506
1507	if (j->flags.usergroups && !j->user)
1508		die("usergroup inheritance without username");
1509
1510	/*
1511	 * We can't recover from failures if we've dropped privileges partially,
1512	 * so we don't even try. If any of our operations fail, we abort() the
1513	 * entire process.
1514	 */
1515	if (j->flags.enter_vfs && setns(j->mountns_fd, CLONE_NEWNS))
1516		pdie("setns(CLONE_NEWNS)");
1517
1518	if (j->flags.vfs) {
1519		if (unshare(CLONE_NEWNS))
1520			pdie("unshare(vfs)");
1521		/*
1522		 * Unless asked not to, remount all filesystems as private.
1523		 * If they are shared, new bind mounts will creep out of our
1524		 * namespace.
1525		 * https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
1526		 */
1527		if (!j->flags.skip_remount_private) {
1528			if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, NULL))
1529				pdie("mount(/, private)");
1530		}
1531	}
1532
1533	if (j->flags.ipc && unshare(CLONE_NEWIPC)) {
1534		pdie("unshare(ipc)");
1535	}
1536
1537	if (j->flags.enter_net) {
1538		if (setns(j->netns_fd, CLONE_NEWNET))
1539			pdie("setns(CLONE_NEWNET)");
1540	} else if (j->flags.net && unshare(CLONE_NEWNET)) {
1541		pdie("unshare(net)");
1542	}
1543
1544	if (j->flags.ns_cgroups && unshare(CLONE_NEWCGROUP))
1545		pdie("unshare(cgroups)");
1546
1547	if (j->flags.chroot && enter_chroot(j))
1548		pdie("chroot");
1549
1550	if (j->flags.pivot_root && enter_pivot_root(j))
1551		pdie("pivot_root");
1552
1553	if (j->flags.mount_tmp && mount_tmp())
1554		pdie("mount_tmp");
1555
1556	if (j->flags.remount_proc_ro && remount_proc_readonly(j))
1557		pdie("remount");
1558
1559	/*
1560	 * If we're only dropping capabilities from the bounding set, but not
1561	 * from the thread's (permitted|inheritable|effective) sets, do it now.
1562	 */
1563	if (j->flags.capbset_drop) {
1564		drop_capbset(j->cap_bset, last_valid_cap);
1565	}
1566
1567	if (j->flags.use_caps) {
1568		/*
1569		 * POSIX capabilities are a bit tricky. If we drop our
1570		 * capability to change uids, our attempt to use setuid()
1571		 * below will fail. Hang on to root caps across setuid(), then
1572		 * lock securebits.
1573		 */
1574		if (prctl(PR_SET_KEEPCAPS, 1))
1575			pdie("prctl(PR_SET_KEEPCAPS)");
1576
1577		/*
1578		 * Kernels 4.3+ define a new securebit
1579		 * (SECURE_NO_CAP_AMBIENT_RAISE), so using the SECURE_ALL_BITS
1580		 * and SECURE_ALL_LOCKS masks from newer kernel headers will
1581		 * return EPERM on older kernels. Detect this, and retry with
1582		 * the right mask for older (2.6.26-4.2) kernels.
1583		 */
1584		int securebits_ret = prctl(PR_SET_SECUREBITS,
1585					   SECURE_ALL_BITS | SECURE_ALL_LOCKS);
1586		if (securebits_ret < 0) {
1587			if (errno == EPERM) {
1588				/* Possibly running on kernel < 4.3. */
1589				securebits_ret = prctl(
1590				    PR_SET_SECUREBITS,
1591				    OLD_SECURE_ALL_BITS | OLD_SECURE_ALL_LOCKS);
1592			}
1593		}
1594		if (securebits_ret < 0)
1595			pdie("prctl(PR_SET_SECUREBITS)");
1596	}
1597
1598	if (j->flags.no_new_privs) {
1599		/*
1600		 * If we're setting no_new_privs, we can drop privileges
1601		 * before setting seccomp filter. This way filter policies
1602		 * don't need to allow privilege-dropping syscalls.
1603		 */
1604		drop_ugid(j);
1605		drop_caps(j, last_valid_cap);
1606		set_seccomp_filter(j);
1607	} else {
1608		/*
1609		 * If we're not setting no_new_privs,
1610		 * we need to set seccomp filter *before* dropping privileges.
1611		 * WARNING: this means that filter policies *must* allow
1612		 * setgroups()/setresgid()/setresuid() for dropping root and
1613		 * capget()/capset()/prctl() for dropping caps.
1614		 */
1615		set_seccomp_filter(j);
1616		drop_ugid(j);
1617		drop_caps(j, last_valid_cap);
1618	}
1619
1620	/*
1621	 * Select the specified alternate syscall table.  The table must not
1622	 * block prctl(2) if we're using seccomp as well.
1623	 */
1624	if (j->flags.alt_syscall) {
1625		if (prctl(PR_ALT_SYSCALL, 1, j->alt_syscall_table))
1626			pdie("prctl(PR_ALT_SYSCALL)");
1627	}
1628
1629	/*
1630	 * seccomp has to come last since it cuts off all the other
1631	 * privilege-dropping syscalls :)
1632	 */
1633	if (j->flags.seccomp && prctl(PR_SET_SECCOMP, 1)) {
1634		if ((errno == EINVAL) && can_softfail()) {
1635			warn("seccomp not supported");
1636			return;
1637		}
1638		pdie("prctl(PR_SET_SECCOMP)");
1639	}
1640}
1641
1642/* TODO(wad): will visibility affect this variable? */
1643static int init_exitstatus = 0;
1644
1645void init_term(int __attribute__ ((unused)) sig)
1646{
1647	_exit(init_exitstatus);
1648}
1649
1650int init(pid_t rootpid)
1651{
1652	pid_t pid;
1653	int status;
1654	/* So that we exit with the right status. */
1655	signal(SIGTERM, init_term);
1656	/* TODO(wad): self jail with seccomp filters here. */
1657	while ((pid = wait(&status)) > 0) {
1658		/*
1659		 * This loop will only end when either there are no processes
1660		 * left inside our pid namespace or we get a signal.
1661		 */
1662		if (pid == rootpid)
1663			init_exitstatus = status;
1664	}
1665	if (!WIFEXITED(init_exitstatus))
1666		_exit(MINIJAIL_ERR_INIT);
1667	_exit(WEXITSTATUS(init_exitstatus));
1668}
1669
1670int API minijail_from_fd(int fd, struct minijail *j)
1671{
1672	size_t sz = 0;
1673	size_t bytes = read(fd, &sz, sizeof(sz));
1674	char *buf;
1675	int r;
1676	if (sizeof(sz) != bytes)
1677		return -EINVAL;
1678	if (sz > USHRT_MAX)	/* arbitrary sanity check */
1679		return -E2BIG;
1680	buf = malloc(sz);
1681	if (!buf)
1682		return -ENOMEM;
1683	bytes = read(fd, buf, sz);
1684	if (bytes != sz) {
1685		free(buf);
1686		return -EINVAL;
1687	}
1688	r = minijail_unmarshal(j, buf, sz);
1689	free(buf);
1690	return r;
1691}
1692
1693int API minijail_to_fd(struct minijail *j, int fd)
1694{
1695	char *buf;
1696	size_t sz = minijail_size(j);
1697	ssize_t written;
1698	int r;
1699
1700	if (!sz)
1701		return -EINVAL;
1702	buf = malloc(sz);
1703	r = minijail_marshal(j, buf, sz);
1704	if (r) {
1705		free(buf);
1706		return r;
1707	}
1708	/* Sends [size][minijail]. */
1709	written = write(fd, &sz, sizeof(sz));
1710	if (written != sizeof(sz)) {
1711		free(buf);
1712		return -EFAULT;
1713	}
1714	written = write(fd, buf, sz);
1715	if (written < 0 || (size_t) written != sz) {
1716		free(buf);
1717		return -EFAULT;
1718	}
1719	free(buf);
1720	return 0;
1721}
1722
1723int setup_preload(void)
1724{
1725#if defined(__ANDROID__)
1726	/* Don't use LDPRELOAD on Brillo. */
1727	return 0;
1728#else
1729	char *oldenv = getenv(kLdPreloadEnvVar) ? : "";
1730	char *newenv = malloc(strlen(oldenv) + 2 + strlen(PRELOADPATH));
1731	if (!newenv)
1732		return -ENOMEM;
1733
1734	/* Only insert a separating space if we have something to separate... */
1735	sprintf(newenv, "%s%s%s", oldenv, strlen(oldenv) ? " " : "",
1736		PRELOADPATH);
1737
1738	/* setenv() makes a copy of the string we give it. */
1739	setenv(kLdPreloadEnvVar, newenv, 1);
1740	free(newenv);
1741	return 0;
1742#endif
1743}
1744
1745int setup_pipe(int fds[2])
1746{
1747	int r = pipe(fds);
1748	char fd_buf[11];
1749	if (r)
1750		return r;
1751	r = snprintf(fd_buf, sizeof(fd_buf), "%d", fds[0]);
1752	if (r <= 0)
1753		return -EINVAL;
1754	setenv(kFdEnvVar, fd_buf, 1);
1755	return 0;
1756}
1757
1758int setup_pipe_end(int fds[2], size_t index)
1759{
1760	if (index > 1)
1761		return -1;
1762
1763	close(fds[1 - index]);
1764	return fds[index];
1765}
1766
1767int setup_and_dupe_pipe_end(int fds[2], size_t index, int fd)
1768{
1769	if (index > 1)
1770		return -1;
1771
1772	close(fds[1 - index]);
1773	/* dup2(2) the corresponding end of the pipe into |fd|. */
1774	return dup2(fds[index], fd);
1775}
1776
1777int minijail_run_internal(struct minijail *j, const char *filename,
1778			  char *const argv[], pid_t *pchild_pid,
1779			  int *pstdin_fd, int *pstdout_fd, int *pstderr_fd,
1780			  int use_preload);
1781
1782int API minijail_run(struct minijail *j, const char *filename,
1783		     char *const argv[])
1784{
1785	return minijail_run_internal(j, filename, argv, NULL, NULL, NULL, NULL,
1786				     true);
1787}
1788
1789int API minijail_run_pid(struct minijail *j, const char *filename,
1790			 char *const argv[], pid_t *pchild_pid)
1791{
1792	return minijail_run_internal(j, filename, argv, pchild_pid,
1793				     NULL, NULL, NULL, true);
1794}
1795
1796int API minijail_run_pipe(struct minijail *j, const char *filename,
1797			  char *const argv[], int *pstdin_fd)
1798{
1799	return minijail_run_internal(j, filename, argv, NULL, pstdin_fd,
1800				     NULL, NULL, true);
1801}
1802
1803int API minijail_run_pid_pipes(struct minijail *j, const char *filename,
1804			       char *const argv[], pid_t *pchild_pid,
1805			       int *pstdin_fd, int *pstdout_fd, int *pstderr_fd)
1806{
1807	return minijail_run_internal(j, filename, argv, pchild_pid,
1808				     pstdin_fd, pstdout_fd, pstderr_fd, true);
1809}
1810
1811int API minijail_run_no_preload(struct minijail *j, const char *filename,
1812				char *const argv[])
1813{
1814	return minijail_run_internal(j, filename, argv, NULL, NULL, NULL, NULL,
1815				     false);
1816}
1817
1818int API minijail_run_pid_pipes_no_preload(struct minijail *j,
1819					  const char *filename,
1820					  char *const argv[],
1821					  pid_t *pchild_pid,
1822					  int *pstdin_fd, int *pstdout_fd,
1823					  int *pstderr_fd)
1824{
1825	return minijail_run_internal(j, filename, argv, pchild_pid,
1826				     pstdin_fd, pstdout_fd, pstderr_fd, false);
1827}
1828
1829int minijail_run_internal(struct minijail *j, const char *filename,
1830			  char *const argv[], pid_t *pchild_pid,
1831			  int *pstdin_fd, int *pstdout_fd, int *pstderr_fd,
1832			  int use_preload)
1833{
1834	char *oldenv, *oldenv_copy = NULL;
1835	pid_t child_pid;
1836	int pipe_fds[2];
1837	int stdin_fds[2];
1838	int stdout_fds[2];
1839	int stderr_fds[2];
1840	int child_sync_pipe_fds[2];
1841	int sync_child = 0;
1842	int ret;
1843	/* We need to remember this across the minijail_preexec() call. */
1844	int pid_namespace = j->flags.pids;
1845	int do_init = j->flags.do_init;
1846
1847	if (use_preload) {
1848		oldenv = getenv(kLdPreloadEnvVar);
1849		if (oldenv) {
1850			oldenv_copy = strdup(oldenv);
1851			if (!oldenv_copy)
1852				return -ENOMEM;
1853		}
1854
1855		if (setup_preload())
1856			return -EFAULT;
1857	}
1858
1859	if (!use_preload) {
1860		if (j->flags.use_caps && j->caps != 0)
1861			die("non-empty capabilities are not supported without LD_PRELOAD");
1862	}
1863
1864	/*
1865	 * Make the process group ID of this process equal to its PID, so that
1866	 * both the Minijail process and the jailed process can be killed
1867	 * together.
1868	 * Don't fail on EPERM, since setpgid(0, 0) can only EPERM when
1869	 * the process is already a process group leader.
1870	 */
1871	if (setpgid(0 /* use calling PID */, 0 /* make PGID = PID */)) {
1872		if (errno != EPERM) {
1873			pdie("setpgid(0, 0)");
1874		}
1875	}
1876
1877	if (use_preload) {
1878		/*
1879		 * Before we fork(2) and execve(2) the child process, we need
1880		 * to open a pipe(2) to send the minijail configuration over.
1881		 */
1882		if (setup_pipe(pipe_fds))
1883			return -EFAULT;
1884	}
1885
1886	/*
1887	 * If we want to write to the child process' standard input,
1888	 * create the pipe(2) now.
1889	 */
1890	if (pstdin_fd) {
1891		if (pipe(stdin_fds))
1892			return -EFAULT;
1893	}
1894
1895	/*
1896	 * If we want to read from the child process' standard output,
1897	 * create the pipe(2) now.
1898	 */
1899	if (pstdout_fd) {
1900		if (pipe(stdout_fds))
1901			return -EFAULT;
1902	}
1903
1904	/*
1905	 * If we want to read from the child process' standard error,
1906	 * create the pipe(2) now.
1907	 */
1908	if (pstderr_fd) {
1909		if (pipe(stderr_fds))
1910			return -EFAULT;
1911	}
1912
1913	/*
1914	 * If we want to set up a new uid/gid mapping in the user namespace,
1915	 * or if we need to add the child process to cgroups, create the pipe(2)
1916	 * to sync between parent and child.
1917	 */
1918	if (j->flags.userns || j->flags.cgroups) {
1919		sync_child = 1;
1920		if (pipe(child_sync_pipe_fds))
1921			return -EFAULT;
1922	}
1923
1924	/*
1925	 * Use sys_clone() if and only if we're creating a pid namespace.
1926	 *
1927	 * tl;dr: WARNING: do not mix pid namespaces and multithreading.
1928	 *
1929	 * In multithreaded programs, there are a bunch of locks inside libc,
1930	 * some of which may be held by other threads at the time that we call
1931	 * minijail_run_pid(). If we call fork(), glibc does its level best to
1932	 * ensure that we hold all of these locks before it calls clone()
1933	 * internally and drop them after clone() returns, but when we call
1934	 * sys_clone(2) directly, all that gets bypassed and we end up with a
1935	 * child address space where some of libc's important locks are held by
1936	 * other threads (which did not get cloned, and hence will never release
1937	 * those locks). This is okay so long as we call exec() immediately
1938	 * after, but a bunch of seemingly-innocent libc functions like setenv()
1939	 * take locks.
1940	 *
1941	 * Hence, only call sys_clone() if we need to, in order to get at pid
1942	 * namespacing. If we follow this path, the child's address space might
1943	 * have broken locks; you may only call functions that do not acquire
1944	 * any locks.
1945	 *
1946	 * Unfortunately, fork() acquires every lock it can get its hands on, as
1947	 * previously detailed, so this function is highly likely to deadlock
1948	 * later on (see "deadlock here") if we're multithreaded.
1949	 *
1950	 * We might hack around this by having the clone()d child (init of the
1951	 * pid namespace) return directly, rather than leaving the clone()d
1952	 * process hanging around to be init for the new namespace (and having
1953	 * its fork()ed child return in turn), but that process would be
1954	 * crippled with its libc locks potentially broken. We might try
1955	 * fork()ing in the parent before we clone() to ensure that we own all
1956	 * the locks, but then we have to have the forked child hanging around
1957	 * consuming resources (and possibly having file descriptors / shared
1958	 * memory regions / etc attached). We'd need to keep the child around to
1959	 * avoid having its children get reparented to init.
1960	 *
1961	 * TODO(ellyjones): figure out if the "forked child hanging around"
1962	 * problem is fixable or not. It would be nice if we worked in this
1963	 * case.
1964	 */
1965	if (pid_namespace) {
1966		int clone_flags = CLONE_NEWPID | SIGCHLD;
1967		if (j->flags.userns)
1968			clone_flags |= CLONE_NEWUSER;
1969		child_pid = syscall(SYS_clone, clone_flags, NULL);
1970	} else {
1971		child_pid = fork();
1972	}
1973
1974	if (child_pid < 0) {
1975		if (use_preload) {
1976			free(oldenv_copy);
1977		}
1978		die("failed to fork child");
1979	}
1980
1981	if (child_pid) {
1982		if (use_preload) {
1983			/* Restore parent's LD_PRELOAD. */
1984			if (oldenv_copy) {
1985				setenv(kLdPreloadEnvVar, oldenv_copy, 1);
1986				free(oldenv_copy);
1987			} else {
1988				unsetenv(kLdPreloadEnvVar);
1989			}
1990			unsetenv(kFdEnvVar);
1991		}
1992
1993		j->initpid = child_pid;
1994
1995		if (j->flags.pid_file)
1996			write_pid_file(j);
1997
1998		if (j->flags.cgroups)
1999			add_to_cgroups(j);
2000
2001		if (j->flags.userns)
2002			write_ugid_mappings(j);
2003
2004		if (sync_child)
2005			parent_setup_complete(child_sync_pipe_fds);
2006
2007		if (use_preload) {
2008			/* Send marshalled minijail. */
2009			close(pipe_fds[0]);	/* read endpoint */
2010			ret = minijail_to_fd(j, pipe_fds[1]);
2011			close(pipe_fds[1]);	/* write endpoint */
2012			if (ret) {
2013				kill(j->initpid, SIGKILL);
2014				die("failed to send marshalled minijail");
2015			}
2016		}
2017
2018		if (pchild_pid)
2019			*pchild_pid = child_pid;
2020
2021		/*
2022		 * If we want to write to the child process' standard input,
2023		 * set up the write end of the pipe.
2024		 */
2025		if (pstdin_fd)
2026			*pstdin_fd = setup_pipe_end(stdin_fds,
2027						    1 /* write end */);
2028
2029		/*
2030		 * If we want to read from the child process' standard output,
2031		 * set up the read end of the pipe.
2032		 */
2033		if (pstdout_fd)
2034			*pstdout_fd = setup_pipe_end(stdout_fds,
2035						     0 /* read end */);
2036
2037		/*
2038		 * If we want to read from the child process' standard error,
2039		 * set up the read end of the pipe.
2040		 */
2041		if (pstderr_fd)
2042			*pstderr_fd = setup_pipe_end(stderr_fds,
2043						     0 /* read end */);
2044
2045		return 0;
2046	}
2047	/* Child process. */
2048	free(oldenv_copy);
2049
2050	if (j->flags.reset_signal_mask) {
2051		sigset_t signal_mask;
2052		if (sigemptyset(&signal_mask) != 0)
2053			pdie("sigemptyset failed");
2054		if (sigprocmask(SIG_SETMASK, &signal_mask, NULL) != 0)
2055			pdie("sigprocmask failed");
2056	}
2057
2058	if (sync_child)
2059		wait_for_parent_setup(child_sync_pipe_fds);
2060
2061	if (j->flags.userns)
2062		enter_user_namespace(j);
2063
2064	/*
2065	 * If we want to write to the jailed process' standard input,
2066	 * set up the read end of the pipe.
2067	 */
2068	if (pstdin_fd) {
2069		if (setup_and_dupe_pipe_end(stdin_fds, 0 /* read end */,
2070					    STDIN_FILENO) < 0)
2071			die("failed to set up stdin pipe");
2072	}
2073
2074	/*
2075	 * If we want to read from the jailed process' standard output,
2076	 * set up the write end of the pipe.
2077	 */
2078	if (pstdout_fd) {
2079		if (setup_and_dupe_pipe_end(stdout_fds, 1 /* write end */,
2080					    STDOUT_FILENO) < 0)
2081			die("failed to set up stdout pipe");
2082	}
2083
2084	/*
2085	 * If we want to read from the jailed process' standard error,
2086	 * set up the write end of the pipe.
2087	 */
2088	if (pstderr_fd) {
2089		if (setup_and_dupe_pipe_end(stderr_fds, 1 /* write end */,
2090					    STDERR_FILENO) < 0)
2091			die("failed to set up stderr pipe");
2092	}
2093
2094	/* If running an init program, let it decide when/how to mount /proc. */
2095	if (pid_namespace && !do_init)
2096		j->flags.remount_proc_ro = 0;
2097
2098	if (use_preload) {
2099		/* Strip out flags that cannot be inherited across execve(2). */
2100		minijail_preexec(j);
2101	} else {
2102		/*
2103		 * If not using LD_PRELOAD, do all jailing before execve(2).
2104		 * Note that PID namespaces can only be entered on fork(2),
2105		 * so that flag is still cleared.
2106		 */
2107		j->flags.pids = 0;
2108	}
2109	/* Jail this process, then execve(2) the target. */
2110	minijail_enter(j);
2111
2112	if (pid_namespace && do_init) {
2113		/*
2114		 * pid namespace: this process will become init inside the new
2115		 * namespace. We don't want all programs we might exec to have
2116		 * to know how to be init. Normally (do_init == 1) we fork off
2117		 * a child to actually run the program. If |do_init == 0|, we
2118		 * let the program keep pid 1 and be init.
2119		 *
2120		 * If we're multithreaded, we'll probably deadlock here. See
2121		 * WARNING above.
2122		 */
2123		child_pid = fork();
2124		if (child_pid < 0)
2125			_exit(child_pid);
2126		else if (child_pid > 0)
2127			init(child_pid);	/* never returns */
2128	}
2129
2130	/*
2131	 * If we aren't pid-namespaced, or the jailed program asked to be init:
2132	 *   calling process
2133	 *   -> execve()-ing process
2134	 * If we are:
2135	 *   calling process
2136	 *   -> init()-ing process
2137	 *      -> execve()-ing process
2138	 */
2139	ret = execve(filename, argv, environ);
2140	if (ret == -1) {
2141		pwarn("execve(%s) failed", filename);
2142	}
2143	_exit(ret);
2144}
2145
2146int API minijail_kill(struct minijail *j)
2147{
2148	int st;
2149	if (kill(j->initpid, SIGTERM))
2150		return -errno;
2151	if (waitpid(j->initpid, &st, 0) < 0)
2152		return -errno;
2153	return st;
2154}
2155
2156int API minijail_wait(struct minijail *j)
2157{
2158	int st;
2159	if (waitpid(j->initpid, &st, 0) < 0)
2160		return -errno;
2161
2162	if (!WIFEXITED(st)) {
2163		int error_status = st;
2164		if (WIFSIGNALED(st)) {
2165			int signum = WTERMSIG(st);
2166			warn("child process %d received signal %d",
2167			     j->initpid, signum);
2168			/*
2169			 * We return MINIJAIL_ERR_JAIL if the process received
2170			 * SIGSYS, which happens when a syscall is blocked by
2171			 * seccomp filters.
2172			 * If not, we do what bash(1) does:
2173			 * $? = 128 + signum
2174			 */
2175			if (signum == SIGSYS) {
2176				error_status = MINIJAIL_ERR_JAIL;
2177			} else {
2178				error_status = 128 + signum;
2179			}
2180		}
2181		return error_status;
2182	}
2183
2184	int exit_status = WEXITSTATUS(st);
2185	if (exit_status != 0)
2186		info("child process %d exited with status %d",
2187		     j->initpid, exit_status);
2188
2189	return exit_status;
2190}
2191
2192void API minijail_destroy(struct minijail *j)
2193{
2194	size_t i;
2195
2196	if (j->flags.seccomp_filter && j->filter_prog) {
2197		free(j->filter_prog->filter);
2198		free(j->filter_prog);
2199	}
2200	while (j->mounts_head) {
2201		struct mountpoint *m = j->mounts_head;
2202		j->mounts_head = j->mounts_head->next;
2203		free(m->data);
2204		free(m->type);
2205		free(m->dest);
2206		free(m->src);
2207		free(m);
2208	}
2209	j->mounts_tail = NULL;
2210	if (j->user)
2211		free(j->user);
2212	if (j->suppl_gid_list)
2213		free(j->suppl_gid_list);
2214	if (j->chrootdir)
2215		free(j->chrootdir);
2216	if (j->pid_file_path)
2217		free(j->pid_file_path);
2218	if (j->uidmap)
2219		free(j->uidmap);
2220	if (j->gidmap)
2221		free(j->gidmap);
2222	if (j->alt_syscall_table)
2223		free(j->alt_syscall_table);
2224	for (i = 0; i < j->cgroup_count; ++i)
2225		free(j->cgroups[i]);
2226	free(j);
2227}
2228