sandbox_bpf.cc revision 010d83a9304c5a91596085d917d248abff47903a
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" 6 7// Some headers on Android are missing cdefs: crbug.com/172337. 8// (We can't use OS_ANDROID here since build_config.h is not included). 9#if defined(ANDROID) 10#include <sys/cdefs.h> 11#endif 12 13#include <errno.h> 14#include <fcntl.h> 15#include <string.h> 16#include <sys/prctl.h> 17#include <sys/stat.h> 18#include <sys/syscall.h> 19#include <sys/types.h> 20#include <time.h> 21#include <unistd.h> 22 23#include "base/compiler_specific.h" 24#include "base/logging.h" 25#include "base/memory/scoped_ptr.h" 26#include "base/posix/eintr_wrapper.h" 27#include "sandbox/linux/seccomp-bpf/codegen.h" 28#include "sandbox/linux/seccomp-bpf/sandbox_bpf_compatibility_policy.h" 29#include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h" 30#include "sandbox/linux/seccomp-bpf/syscall.h" 31#include "sandbox/linux/seccomp-bpf/syscall_iterator.h" 32#include "sandbox/linux/seccomp-bpf/verifier.h" 33 34namespace sandbox { 35 36namespace { 37 38const int kExpectedExitCode = 100; 39 40int popcount(uint32_t x) { 41 return __builtin_popcount(x); 42} 43 44#if !defined(NDEBUG) 45void WriteFailedStderrSetupMessage(int out_fd) { 46 const char* error_string = strerror(errno); 47 static const char msg[] = 48 "You have reproduced a puzzling issue.\n" 49 "Please, report to crbug.com/152530!\n" 50 "Failed to set up stderr: "; 51 if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg) - 1)) > 0 && error_string && 52 HANDLE_EINTR(write(out_fd, error_string, strlen(error_string))) > 0 && 53 HANDLE_EINTR(write(out_fd, "\n", 1))) { 54 } 55} 56#endif // !defined(NDEBUG) 57 58// We define a really simple sandbox policy. It is just good enough for us 59// to tell that the sandbox has actually been activated. 60ErrorCode ProbeEvaluator(SandboxBPF*, int sysnum, void*) __attribute__((const)); 61ErrorCode ProbeEvaluator(SandboxBPF*, int sysnum, void*) { 62 switch (sysnum) { 63 case __NR_getpid: 64 // Return EPERM so that we can check that the filter actually ran. 65 return ErrorCode(EPERM); 66 case __NR_exit_group: 67 // Allow exit() with a non-default return code. 68 return ErrorCode(ErrorCode::ERR_ALLOWED); 69 default: 70 // Make everything else fail in an easily recognizable way. 71 return ErrorCode(EINVAL); 72 } 73} 74 75void ProbeProcess(void) { 76 if (syscall(__NR_getpid) < 0 && errno == EPERM) { 77 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); 78 } 79} 80 81ErrorCode AllowAllEvaluator(SandboxBPF*, int sysnum, void*) { 82 if (!SandboxBPF::IsValidSyscallNumber(sysnum)) { 83 return ErrorCode(ENOSYS); 84 } 85 return ErrorCode(ErrorCode::ERR_ALLOWED); 86} 87 88void TryVsyscallProcess(void) { 89 time_t current_time; 90 // time() is implemented as a vsyscall. With an older glibc, with 91 // vsyscall=emulate and some versions of the seccomp BPF patch 92 // we may get SIGKILL-ed. Detect this! 93 if (time(¤t_time) != static_cast<time_t>(-1)) { 94 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); 95 } 96} 97 98bool IsSingleThreaded(int proc_fd) { 99 if (proc_fd < 0) { 100 // Cannot determine whether program is single-threaded. Hope for 101 // the best... 102 return true; 103 } 104 105 struct stat sb; 106 int task = -1; 107 if ((task = openat(proc_fd, "self/task", O_RDONLY | O_DIRECTORY)) < 0 || 108 fstat(task, &sb) != 0 || sb.st_nlink != 3 || IGNORE_EINTR(close(task))) { 109 if (task >= 0) { 110 if (IGNORE_EINTR(close(task))) { 111 } 112 } 113 return false; 114 } 115 return true; 116} 117 118bool IsDenied(const ErrorCode& code) { 119 return (code.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP || 120 (code.err() >= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MIN_ERRNO) && 121 code.err() <= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MAX_ERRNO)); 122} 123 124// Function that can be passed as a callback function to CodeGen::Traverse(). 125// Checks whether the "insn" returns an UnsafeTrap() ErrorCode. If so, it 126// sets the "bool" variable pointed to by "aux". 127void CheckForUnsafeErrorCodes(Instruction* insn, void* aux) { 128 bool* is_unsafe = static_cast<bool*>(aux); 129 if (!*is_unsafe) { 130 if (BPF_CLASS(insn->code) == BPF_RET && insn->k > SECCOMP_RET_TRAP && 131 insn->k - SECCOMP_RET_TRAP <= SECCOMP_RET_DATA) { 132 const ErrorCode& err = 133 Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA); 134 if (err.error_type() != ErrorCode::ET_INVALID && !err.safe()) { 135 *is_unsafe = true; 136 } 137 } 138 } 139} 140 141// A Trap() handler that returns an "errno" value. The value is encoded 142// in the "aux" parameter. 143intptr_t ReturnErrno(const struct arch_seccomp_data&, void* aux) { 144 // TrapFnc functions report error by following the native kernel convention 145 // of returning an exit code in the range of -1..-4096. They do not try to 146 // set errno themselves. The glibc wrapper that triggered the SIGSYS will 147 // ultimately do so for us. 148 int err = reinterpret_cast<intptr_t>(aux) & SECCOMP_RET_DATA; 149 return -err; 150} 151 152// Function that can be passed as a callback function to CodeGen::Traverse(). 153// Checks whether the "insn" returns an errno value from a BPF filter. If so, 154// it rewrites the instruction to instead call a Trap() handler that does 155// the same thing. "aux" is ignored. 156void RedirectToUserspace(Instruction* insn, void* aux) { 157 // When inside an UnsafeTrap() callback, we want to allow all system calls. 158 // This means, we must conditionally disable the sandbox -- and that's not 159 // something that kernel-side BPF filters can do, as they cannot inspect 160 // any state other than the syscall arguments. 161 // But if we redirect all error handlers to user-space, then we can easily 162 // make this decision. 163 // The performance penalty for this extra round-trip to user-space is not 164 // actually that bad, as we only ever pay it for denied system calls; and a 165 // typical program has very few of these. 166 SandboxBPF* sandbox = static_cast<SandboxBPF*>(aux); 167 if (BPF_CLASS(insn->code) == BPF_RET && 168 (insn->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { 169 insn->k = sandbox->Trap(ReturnErrno, 170 reinterpret_cast<void*>(insn->k & SECCOMP_RET_DATA)).err(); 171 } 172} 173 174// This wraps an existing policy and changes its behavior to match the changes 175// made by RedirectToUserspace(). This is part of the framework that allows BPF 176// evaluation in userland. 177// TODO(markus): document the code inside better. 178class RedirectToUserSpacePolicyWrapper : public SandboxBPFPolicy { 179 public: 180 explicit RedirectToUserSpacePolicyWrapper( 181 const SandboxBPFPolicy* wrapped_policy) 182 : wrapped_policy_(wrapped_policy) { 183 DCHECK(wrapped_policy_); 184 } 185 186 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler, 187 int system_call_number) const OVERRIDE { 188 ErrorCode err = 189 wrapped_policy_->EvaluateSyscall(sandbox_compiler, system_call_number); 190 if ((err.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { 191 return sandbox_compiler->Trap( 192 ReturnErrno, reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA)); 193 } 194 return err; 195 } 196 197 private: 198 const SandboxBPFPolicy* wrapped_policy_; 199 DISALLOW_COPY_AND_ASSIGN(RedirectToUserSpacePolicyWrapper); 200}; 201 202intptr_t BPFFailure(const struct arch_seccomp_data&, void* aux) { 203 SANDBOX_DIE(static_cast<char*>(aux)); 204} 205 206} // namespace 207 208SandboxBPF::SandboxBPF() 209 : quiet_(false), 210 proc_fd_(-1), 211 conds_(new Conds), 212 sandbox_has_started_(false) {} 213 214SandboxBPF::~SandboxBPF() { 215 // It is generally unsafe to call any memory allocator operations or to even 216 // call arbitrary destructors after having installed a new policy. We just 217 // have no way to tell whether this policy would allow the system calls that 218 // the constructors can trigger. 219 // So, we normally destroy all of our complex state prior to starting the 220 // sandbox. But this won't happen, if the Sandbox object was created and 221 // never actually used to set up a sandbox. So, just in case, we are 222 // destroying any remaining state. 223 // The "if ()" statements are technically superfluous. But let's be explicit 224 // that we really don't want to run any code, when we already destroyed 225 // objects before setting up the sandbox. 226 if (conds_) { 227 delete conds_; 228 } 229} 230 231bool SandboxBPF::IsValidSyscallNumber(int sysnum) { 232 return SyscallIterator::IsValid(sysnum); 233} 234 235bool SandboxBPF::RunFunctionInPolicy(void (*code_in_sandbox)(), 236 EvaluateSyscall syscall_evaluator, 237 void* aux) { 238 // Block all signals before forking a child process. This prevents an 239 // attacker from manipulating our test by sending us an unexpected signal. 240 sigset_t old_mask, new_mask; 241 if (sigfillset(&new_mask) || sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) { 242 SANDBOX_DIE("sigprocmask() failed"); 243 } 244 int fds[2]; 245 if (pipe2(fds, O_NONBLOCK | O_CLOEXEC)) { 246 SANDBOX_DIE("pipe() failed"); 247 } 248 249 if (fds[0] <= 2 || fds[1] <= 2) { 250 SANDBOX_DIE("Process started without standard file descriptors"); 251 } 252 253 // This code is using fork() and should only ever run single-threaded. 254 // Most of the code below is "async-signal-safe" and only minor changes 255 // would be needed to support threads. 256 DCHECK(IsSingleThreaded(proc_fd_)); 257 pid_t pid = fork(); 258 if (pid < 0) { 259 // Die if we cannot fork(). We would probably fail a little later 260 // anyway, as the machine is likely very close to running out of 261 // memory. 262 // But what we don't want to do is return "false", as a crafty 263 // attacker might cause fork() to fail at will and could trick us 264 // into running without a sandbox. 265 sigprocmask(SIG_SETMASK, &old_mask, NULL); // OK, if it fails 266 SANDBOX_DIE("fork() failed unexpectedly"); 267 } 268 269 // In the child process 270 if (!pid) { 271 // Test a very simple sandbox policy to verify that we can 272 // successfully turn on sandboxing. 273 Die::EnableSimpleExit(); 274 275 errno = 0; 276 if (IGNORE_EINTR(close(fds[0]))) { 277 // This call to close() has been failing in strange ways. See 278 // crbug.com/152530. So we only fail in debug mode now. 279#if !defined(NDEBUG) 280 WriteFailedStderrSetupMessage(fds[1]); 281 SANDBOX_DIE(NULL); 282#endif 283 } 284 if (HANDLE_EINTR(dup2(fds[1], 2)) != 2) { 285 // Stderr could very well be a file descriptor to .xsession-errors, or 286 // another file, which could be backed by a file system that could cause 287 // dup2 to fail while trying to close stderr. It's important that we do 288 // not fail on trying to close stderr. 289 // If dup2 fails here, we will continue normally, this means that our 290 // parent won't cause a fatal failure if something writes to stderr in 291 // this child. 292#if !defined(NDEBUG) 293 // In DEBUG builds, we still want to get a report. 294 WriteFailedStderrSetupMessage(fds[1]); 295 SANDBOX_DIE(NULL); 296#endif 297 } 298 if (IGNORE_EINTR(close(fds[1]))) { 299 // This call to close() has been failing in strange ways. See 300 // crbug.com/152530. So we only fail in debug mode now. 301#if !defined(NDEBUG) 302 WriteFailedStderrSetupMessage(fds[1]); 303 SANDBOX_DIE(NULL); 304#endif 305 } 306 307 SetSandboxPolicyDeprecated(syscall_evaluator, aux); 308 if (!StartSandbox(PROCESS_SINGLE_THREADED)) { 309 SANDBOX_DIE(NULL); 310 } 311 312 // Run our code in the sandbox. 313 code_in_sandbox(); 314 315 // code_in_sandbox() is not supposed to return here. 316 SANDBOX_DIE(NULL); 317 } 318 319 // In the parent process. 320 if (IGNORE_EINTR(close(fds[1]))) { 321 SANDBOX_DIE("close() failed"); 322 } 323 if (sigprocmask(SIG_SETMASK, &old_mask, NULL)) { 324 SANDBOX_DIE("sigprocmask() failed"); 325 } 326 int status; 327 if (HANDLE_EINTR(waitpid(pid, &status, 0)) != pid) { 328 SANDBOX_DIE("waitpid() failed unexpectedly"); 329 } 330 bool rc = WIFEXITED(status) && WEXITSTATUS(status) == kExpectedExitCode; 331 332 // If we fail to support sandboxing, there might be an additional 333 // error message. If so, this was an entirely unexpected and fatal 334 // failure. We should report the failure and somebody must fix 335 // things. This is probably a security-critical bug in the sandboxing 336 // code. 337 if (!rc) { 338 char buf[4096]; 339 ssize_t len = HANDLE_EINTR(read(fds[0], buf, sizeof(buf) - 1)); 340 if (len > 0) { 341 while (len > 1 && buf[len - 1] == '\n') { 342 --len; 343 } 344 buf[len] = '\000'; 345 SANDBOX_DIE(buf); 346 } 347 } 348 if (IGNORE_EINTR(close(fds[0]))) { 349 SANDBOX_DIE("close() failed"); 350 } 351 352 return rc; 353} 354 355bool SandboxBPF::KernelSupportSeccompBPF() { 356 return RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) && 357 RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0); 358} 359 360SandboxBPF::SandboxStatus SandboxBPF::SupportsSeccompSandbox(int proc_fd) { 361 // It the sandbox is currently active, we clearly must have support for 362 // sandboxing. 363 if (status_ == STATUS_ENABLED) { 364 return status_; 365 } 366 367 // Even if the sandbox was previously available, something might have 368 // changed in our run-time environment. Check one more time. 369 if (status_ == STATUS_AVAILABLE) { 370 if (!IsSingleThreaded(proc_fd)) { 371 status_ = STATUS_UNAVAILABLE; 372 } 373 return status_; 374 } 375 376 if (status_ == STATUS_UNAVAILABLE && IsSingleThreaded(proc_fd)) { 377 // All state transitions resulting in STATUS_UNAVAILABLE are immediately 378 // preceded by STATUS_AVAILABLE. Furthermore, these transitions all 379 // happen, if and only if they are triggered by the process being multi- 380 // threaded. 381 // In other words, if a single-threaded process is currently in the 382 // STATUS_UNAVAILABLE state, it is safe to assume that sandboxing is 383 // actually available. 384 status_ = STATUS_AVAILABLE; 385 return status_; 386 } 387 388 // If we have not previously checked for availability of the sandbox or if 389 // we otherwise don't believe to have a good cached value, we have to 390 // perform a thorough check now. 391 if (status_ == STATUS_UNKNOWN) { 392 // We create our own private copy of a "Sandbox" object. This ensures that 393 // the object does not have any policies configured, that might interfere 394 // with the tests done by "KernelSupportSeccompBPF()". 395 SandboxBPF sandbox; 396 397 // By setting "quiet_ = true" we suppress messages for expected and benign 398 // failures (e.g. if the current kernel lacks support for BPF filters). 399 sandbox.quiet_ = true; 400 sandbox.set_proc_fd(proc_fd); 401 status_ = sandbox.KernelSupportSeccompBPF() ? STATUS_AVAILABLE 402 : STATUS_UNSUPPORTED; 403 404 // As we are performing our tests from a child process, the run-time 405 // environment that is visible to the sandbox is always guaranteed to be 406 // single-threaded. Let's check here whether the caller is single- 407 // threaded. Otherwise, we mark the sandbox as temporarily unavailable. 408 if (status_ == STATUS_AVAILABLE && !IsSingleThreaded(proc_fd)) { 409 status_ = STATUS_UNAVAILABLE; 410 } 411 } 412 return status_; 413} 414 415void SandboxBPF::set_proc_fd(int proc_fd) { proc_fd_ = proc_fd; } 416 417bool SandboxBPF::StartSandbox(SandboxThreadState thread_state) { 418 CHECK(thread_state == PROCESS_SINGLE_THREADED || 419 thread_state == PROCESS_MULTI_THREADED); 420 421 if (status_ == STATUS_UNSUPPORTED || status_ == STATUS_UNAVAILABLE) { 422 SANDBOX_DIE( 423 "Trying to start sandbox, even though it is known to be " 424 "unavailable"); 425 return false; 426 } else if (sandbox_has_started_ || !conds_) { 427 SANDBOX_DIE( 428 "Cannot repeatedly start sandbox. Create a separate Sandbox " 429 "object instead."); 430 return false; 431 } 432 if (proc_fd_ < 0) { 433 proc_fd_ = open("/proc", O_RDONLY | O_DIRECTORY); 434 } 435 if (proc_fd_ < 0) { 436 // For now, continue in degraded mode, if we can't access /proc. 437 // In the future, we might want to tighten this requirement. 438 } 439 440 if (thread_state == PROCESS_SINGLE_THREADED && !IsSingleThreaded(proc_fd_)) { 441 SANDBOX_DIE("Cannot start sandbox, if process is already multi-threaded"); 442 return false; 443 } 444 445 // We no longer need access to any files in /proc. We want to do this 446 // before installing the filters, just in case that our policy denies 447 // close(). 448 if (proc_fd_ >= 0) { 449 if (IGNORE_EINTR(close(proc_fd_))) { 450 SANDBOX_DIE("Failed to close file descriptor for /proc"); 451 return false; 452 } 453 proc_fd_ = -1; 454 } 455 456 // Install the filters. 457 InstallFilter(thread_state); 458 459 // We are now inside the sandbox. 460 status_ = STATUS_ENABLED; 461 462 return true; 463} 464 465void SandboxBPF::PolicySanityChecks(SandboxBPFPolicy* policy) { 466 for (SyscallIterator iter(true); !iter.Done();) { 467 uint32_t sysnum = iter.Next(); 468 if (!IsDenied(policy->EvaluateSyscall(this, sysnum))) { 469 SANDBOX_DIE( 470 "Policies should deny system calls that are outside the " 471 "expected range (typically MIN_SYSCALL..MAX_SYSCALL)"); 472 } 473 } 474 return; 475} 476 477// Deprecated API, supported with a wrapper to the new API. 478void SandboxBPF::SetSandboxPolicyDeprecated(EvaluateSyscall syscall_evaluator, 479 void* aux) { 480 if (sandbox_has_started_ || !conds_) { 481 SANDBOX_DIE("Cannot change policy after sandbox has started"); 482 } 483 SetSandboxPolicy(new CompatibilityPolicy<void>(syscall_evaluator, aux)); 484} 485 486// Don't take a scoped_ptr here, polymorphism make their use awkward. 487void SandboxBPF::SetSandboxPolicy(SandboxBPFPolicy* policy) { 488 DCHECK(!policy_); 489 if (sandbox_has_started_ || !conds_) { 490 SANDBOX_DIE("Cannot change policy after sandbox has started"); 491 } 492 PolicySanityChecks(policy); 493 policy_.reset(policy); 494} 495 496void SandboxBPF::InstallFilter(SandboxThreadState thread_state) { 497 // We want to be very careful in not imposing any requirements on the 498 // policies that are set with SetSandboxPolicy(). This means, as soon as 499 // the sandbox is active, we shouldn't be relying on libraries that could 500 // be making system calls. This, for example, means we should avoid 501 // using the heap and we should avoid using STL functions. 502 // Temporarily copy the contents of the "program" vector into a 503 // stack-allocated array; and then explicitly destroy that object. 504 // This makes sure we don't ex- or implicitly call new/delete after we 505 // installed the BPF filter program in the kernel. Depending on the 506 // system memory allocator that is in effect, these operators can result 507 // in system calls to things like munmap() or brk(). 508 Program* program = AssembleFilter(false /* force_verification */); 509 510 struct sock_filter bpf[program->size()]; 511 const struct sock_fprog prog = {static_cast<unsigned short>(program->size()), 512 bpf}; 513 memcpy(bpf, &(*program)[0], sizeof(bpf)); 514 delete program; 515 516 // Make an attempt to release memory that is no longer needed here, rather 517 // than in the destructor. Try to avoid as much as possible to presume of 518 // what will be possible to do in the new (sandboxed) execution environment. 519 delete conds_; 520 conds_ = NULL; 521 policy_.reset(); 522 523 // Install BPF filter program 524 if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 525 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to enable no-new-privs"); 526 } else { 527 if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) { 528 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to turn on BPF filters"); 529 } 530 } 531 532 // TODO(rsesek): Always try to engage the sandbox with the 533 // PROCESS_MULTI_THREADED path first, and if that fails, assert that the 534 // process IsSingleThreaded() or SANDBOX_DIE. 535 536 if (thread_state == PROCESS_MULTI_THREADED) { 537 // TODO(rsesek): Move these to a more reasonable place once the kernel 538 // patch has landed upstream and these values are formalized. 539 #define PR_SECCOMP_EXT 41 540 #define SECCOMP_EXT_ACT 1 541 #define SECCOMP_EXT_ACT_TSYNC 1 542 if (prctl(PR_SECCOMP_EXT, SECCOMP_EXT_ACT, SECCOMP_EXT_ACT_TSYNC, 0, 0)) { 543 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to synchronize threadgroup " 544 "BPF filters."); 545 } 546 } 547 548 sandbox_has_started_ = true; 549} 550 551SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) { 552#if !defined(NDEBUG) 553 force_verification = true; 554#endif 555 556 // Verify that the user pushed a policy. 557 DCHECK(policy_); 558 559 // Assemble the BPF filter program. 560 CodeGen* gen = new CodeGen(); 561 if (!gen) { 562 SANDBOX_DIE("Out of memory"); 563 } 564 565 // If the architecture doesn't match SECCOMP_ARCH, disallow the 566 // system call. 567 Instruction* tail; 568 Instruction* head = gen->MakeInstruction( 569 BPF_LD + BPF_W + BPF_ABS, 570 SECCOMP_ARCH_IDX, 571 tail = gen->MakeInstruction( 572 BPF_JMP + BPF_JEQ + BPF_K, 573 SECCOMP_ARCH, 574 NULL, 575 gen->MakeInstruction( 576 BPF_RET + BPF_K, 577 Kill("Invalid audit architecture in BPF filter")))); 578 579 bool has_unsafe_traps = false; 580 { 581 // Evaluate all possible system calls and group their ErrorCodes into 582 // ranges of identical codes. 583 Ranges ranges; 584 FindRanges(&ranges); 585 586 // Compile the system call ranges to an optimized BPF jumptable 587 Instruction* jumptable = 588 AssembleJumpTable(gen, ranges.begin(), ranges.end()); 589 590 // If there is at least one UnsafeTrap() in our program, the entire sandbox 591 // is unsafe. We need to modify the program so that all non- 592 // SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then 593 // allow us to temporarily disable sandboxing rules inside of callbacks to 594 // UnsafeTrap(). 595 gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps); 596 597 // Grab the system call number, so that we can implement jump tables. 598 Instruction* load_nr = 599 gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX); 600 601 // If our BPF program has unsafe jumps, enable support for them. This 602 // test happens very early in the BPF filter program. Even before we 603 // consider looking at system call numbers. 604 // As support for unsafe jumps essentially defeats all the security 605 // measures that the sandbox provides, we print a big warning message -- 606 // and of course, we make sure to only ever enable this feature if it 607 // is actually requested by the sandbox policy. 608 if (has_unsafe_traps) { 609 if (SandboxSyscall(-1) == -1 && errno == ENOSYS) { 610 SANDBOX_DIE( 611 "Support for UnsafeTrap() has not yet been ported to this " 612 "architecture"); 613 } 614 615 if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask) 616 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) || 617 !policy_->EvaluateSyscall(this, __NR_rt_sigreturn) 618 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) 619#if defined(__NR_sigprocmask) 620 || 621 !policy_->EvaluateSyscall(this, __NR_sigprocmask) 622 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) 623#endif 624#if defined(__NR_sigreturn) 625 || 626 !policy_->EvaluateSyscall(this, __NR_sigreturn) 627 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) 628#endif 629 ) { 630 SANDBOX_DIE( 631 "Invalid seccomp policy; if using UnsafeTrap(), you must " 632 "unconditionally allow sigreturn() and sigprocmask()"); 633 } 634 635 if (!Trap::EnableUnsafeTrapsInSigSysHandler()) { 636 // We should never be able to get here, as UnsafeTrap() should never 637 // actually return a valid ErrorCode object unless the user set the 638 // CHROME_SANDBOX_DEBUGGING environment variable; and therefore, 639 // "has_unsafe_traps" would always be false. But better double-check 640 // than enabling dangerous code. 641 SANDBOX_DIE("We'd rather die than enable unsafe traps"); 642 } 643 gen->Traverse(jumptable, RedirectToUserspace, this); 644 645 // Allow system calls, if they originate from our magic return address 646 // (which we can query by calling SandboxSyscall(-1)). 647 uintptr_t syscall_entry_point = 648 static_cast<uintptr_t>(SandboxSyscall(-1)); 649 uint32_t low = static_cast<uint32_t>(syscall_entry_point); 650#if __SIZEOF_POINTER__ > 4 651 uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32); 652#endif 653 654 // BPF cannot do native 64bit comparisons. On 64bit architectures, we 655 // have to compare both 32bit halves of the instruction pointer. If they 656 // match what we expect, we return ERR_ALLOWED. If either or both don't 657 // match, we continue evalutating the rest of the sandbox policy. 658 Instruction* escape_hatch = gen->MakeInstruction( 659 BPF_LD + BPF_W + BPF_ABS, 660 SECCOMP_IP_LSB_IDX, 661 gen->MakeInstruction( 662 BPF_JMP + BPF_JEQ + BPF_K, 663 low, 664#if __SIZEOF_POINTER__ > 4 665 gen->MakeInstruction( 666 BPF_LD + BPF_W + BPF_ABS, 667 SECCOMP_IP_MSB_IDX, 668 gen->MakeInstruction( 669 BPF_JMP + BPF_JEQ + BPF_K, 670 hi, 671#endif 672 gen->MakeInstruction(BPF_RET + BPF_K, 673 ErrorCode(ErrorCode::ERR_ALLOWED)), 674#if __SIZEOF_POINTER__ > 4 675 load_nr)), 676#endif 677 load_nr)); 678 gen->JoinInstructions(tail, escape_hatch); 679 } else { 680 gen->JoinInstructions(tail, load_nr); 681 } 682 tail = load_nr; 683 684// On Intel architectures, verify that system call numbers are in the 685// expected number range. The older i386 and x86-64 APIs clear bit 30 686// on all system calls. The newer x32 API always sets bit 30. 687#if defined(__i386__) || defined(__x86_64__) 688 Instruction* invalidX32 = gen->MakeInstruction( 689 BPF_RET + BPF_K, Kill("Illegal mixing of system call ABIs").err_); 690 Instruction* checkX32 = 691#if defined(__x86_64__) && defined(__ILP32__) 692 gen->MakeInstruction( 693 BPF_JMP + BPF_JSET + BPF_K, 0x40000000, 0, invalidX32); 694#else 695 gen->MakeInstruction( 696 BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, 0); 697#endif 698 gen->JoinInstructions(tail, checkX32); 699 tail = checkX32; 700#endif 701 702 // Append jump table to our pre-amble 703 gen->JoinInstructions(tail, jumptable); 704 } 705 706 // Turn the DAG into a vector of instructions. 707 Program* program = new Program(); 708 gen->Compile(head, program); 709 delete gen; 710 711 // Make sure compilation resulted in BPF program that executes 712 // correctly. Otherwise, there is an internal error in our BPF compiler. 713 // There is really nothing the caller can do until the bug is fixed. 714 if (force_verification) { 715 // Verification is expensive. We only perform this step, if we are 716 // compiled in debug mode, or if the caller explicitly requested 717 // verification. 718 VerifyProgram(*program, has_unsafe_traps); 719 } 720 721 return program; 722} 723 724void SandboxBPF::VerifyProgram(const Program& program, bool has_unsafe_traps) { 725 // If we previously rewrote the BPF program so that it calls user-space 726 // whenever we return an "errno" value from the filter, then we have to 727 // wrap our system call evaluator to perform the same operation. Otherwise, 728 // the verifier would also report a mismatch in return codes. 729 scoped_ptr<const RedirectToUserSpacePolicyWrapper> redirected_policy( 730 new RedirectToUserSpacePolicyWrapper(policy_.get())); 731 732 const char* err = NULL; 733 if (!Verifier::VerifyBPF(this, 734 program, 735 has_unsafe_traps ? *redirected_policy : *policy_, 736 &err)) { 737 CodeGen::PrintProgram(program); 738 SANDBOX_DIE(err); 739 } 740} 741 742void SandboxBPF::FindRanges(Ranges* ranges) { 743 // Please note that "struct seccomp_data" defines system calls as a signed 744 // int32_t, but BPF instructions always operate on unsigned quantities. We 745 // deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL, 746 // and then verifying that the rest of the number range (both positive and 747 // negative) all return the same ErrorCode. 748 uint32_t old_sysnum = 0; 749 ErrorCode old_err = policy_->EvaluateSyscall(this, old_sysnum); 750 ErrorCode invalid_err = policy_->EvaluateSyscall(this, MIN_SYSCALL - 1); 751 752 for (SyscallIterator iter(false); !iter.Done();) { 753 uint32_t sysnum = iter.Next(); 754 ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum)); 755 if (!iter.IsValid(sysnum) && !invalid_err.Equals(err)) { 756 // A proper sandbox policy should always treat system calls outside of 757 // the range MIN_SYSCALL..MAX_SYSCALL (i.e. anything that returns 758 // "false" for SyscallIterator::IsValid()) identically. Typically, all 759 // of these system calls would be denied with the same ErrorCode. 760 SANDBOX_DIE("Invalid seccomp policy"); 761 } 762 if (!err.Equals(old_err) || iter.Done()) { 763 ranges->push_back(Range(old_sysnum, sysnum - 1, old_err)); 764 old_sysnum = sysnum; 765 old_err = err; 766 } 767 } 768} 769 770Instruction* SandboxBPF::AssembleJumpTable(CodeGen* gen, 771 Ranges::const_iterator start, 772 Ranges::const_iterator stop) { 773 // We convert the list of system call ranges into jump table that performs 774 // a binary search over the ranges. 775 // As a sanity check, we need to have at least one distinct ranges for us 776 // to be able to build a jump table. 777 if (stop - start <= 0) { 778 SANDBOX_DIE("Invalid set of system call ranges"); 779 } else if (stop - start == 1) { 780 // If we have narrowed things down to a single range object, we can 781 // return from the BPF filter program. 782 return RetExpression(gen, start->err); 783 } 784 785 // Pick the range object that is located at the mid point of our list. 786 // We compare our system call number against the lowest valid system call 787 // number in this range object. If our number is lower, it is outside of 788 // this range object. If it is greater or equal, it might be inside. 789 Ranges::const_iterator mid = start + (stop - start) / 2; 790 791 // Sub-divide the list of ranges and continue recursively. 792 Instruction* jf = AssembleJumpTable(gen, start, mid); 793 Instruction* jt = AssembleJumpTable(gen, mid, stop); 794 return gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, mid->from, jt, jf); 795} 796 797Instruction* SandboxBPF::RetExpression(CodeGen* gen, const ErrorCode& err) { 798 if (err.error_type_ == ErrorCode::ET_COND) { 799 return CondExpression(gen, err); 800 } else { 801 return gen->MakeInstruction(BPF_RET + BPF_K, err); 802 } 803} 804 805Instruction* SandboxBPF::CondExpression(CodeGen* gen, const ErrorCode& cond) { 806 // We can only inspect the six system call arguments that are passed in 807 // CPU registers. 808 if (cond.argno_ < 0 || cond.argno_ >= 6) { 809 SANDBOX_DIE( 810 "Internal compiler error; invalid argument number " 811 "encountered"); 812 } 813 814 // BPF programs operate on 32bit entities. Load both halfs of the 64bit 815 // system call argument and then generate suitable conditional statements. 816 Instruction* msb_head = gen->MakeInstruction( 817 BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_MSB_IDX(cond.argno_)); 818 Instruction* msb_tail = msb_head; 819 Instruction* lsb_head = gen->MakeInstruction( 820 BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_LSB_IDX(cond.argno_)); 821 Instruction* lsb_tail = lsb_head; 822 823 // Emit a suitable comparison statement. 824 switch (cond.op_) { 825 case ErrorCode::OP_EQUAL: 826 // Compare the least significant bits for equality 827 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 828 static_cast<uint32_t>(cond.value_), 829 RetExpression(gen, *cond.passed_), 830 RetExpression(gen, *cond.failed_)); 831 gen->JoinInstructions(lsb_head, lsb_tail); 832 833 // If we are looking at a 64bit argument, we need to also compare the 834 // most significant bits. 835 if (cond.width_ == ErrorCode::TP_64BIT) { 836 msb_tail = 837 gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 838 static_cast<uint32_t>(cond.value_ >> 32), 839 lsb_head, 840 RetExpression(gen, *cond.failed_)); 841 gen->JoinInstructions(msb_head, msb_tail); 842 } 843 break; 844 case ErrorCode::OP_HAS_ALL_BITS: 845 // Check the bits in the LSB half of the system call argument. Our 846 // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is 847 // different from the kernel's BPF_JSET operation which passes, if any of 848 // the bits are set. 849 // Of course, if there is only a single set bit (or none at all), then 850 // things get easier. 851 { 852 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); 853 int lsb_bit_count = popcount(lsb_bits); 854 if (lsb_bit_count == 0) { 855 // No bits are set in the LSB half. The test will always pass. 856 lsb_head = RetExpression(gen, *cond.passed_); 857 lsb_tail = NULL; 858 } else if (lsb_bit_count == 1) { 859 // Exactly one bit is set in the LSB half. We can use the BPF_JSET 860 // operator. 861 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 862 lsb_bits, 863 RetExpression(gen, *cond.passed_), 864 RetExpression(gen, *cond.failed_)); 865 gen->JoinInstructions(lsb_head, lsb_tail); 866 } else { 867 // More than one bit is set in the LSB half. We need to combine 868 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact 869 // set in the system call argument. 870 gen->JoinInstructions( 871 lsb_head, 872 gen->MakeInstruction(BPF_ALU + BPF_AND + BPF_K, 873 lsb_bits, 874 lsb_tail = gen->MakeInstruction( 875 BPF_JMP + BPF_JEQ + BPF_K, 876 lsb_bits, 877 RetExpression(gen, *cond.passed_), 878 RetExpression(gen, *cond.failed_)))); 879 } 880 } 881 882 // If we are looking at a 64bit argument, we need to also check the bits 883 // in the MSB half of the system call argument. 884 if (cond.width_ == ErrorCode::TP_64BIT) { 885 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); 886 int msb_bit_count = popcount(msb_bits); 887 if (msb_bit_count == 0) { 888 // No bits are set in the MSB half. The test will always pass. 889 msb_head = lsb_head; 890 } else if (msb_bit_count == 1) { 891 // Exactly one bit is set in the MSB half. We can use the BPF_JSET 892 // operator. 893 msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 894 msb_bits, 895 lsb_head, 896 RetExpression(gen, *cond.failed_)); 897 gen->JoinInstructions(msb_head, msb_tail); 898 } else { 899 // More than one bit is set in the MSB half. We need to combine 900 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact 901 // set in the system call argument. 902 gen->JoinInstructions( 903 msb_head, 904 gen->MakeInstruction( 905 BPF_ALU + BPF_AND + BPF_K, 906 msb_bits, 907 gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 908 msb_bits, 909 lsb_head, 910 RetExpression(gen, *cond.failed_)))); 911 } 912 } 913 break; 914 case ErrorCode::OP_HAS_ANY_BITS: 915 // Check the bits in the LSB half of the system call argument. Our 916 // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps 917 // nicely to the kernel's BPF_JSET operation. 918 { 919 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); 920 if (!lsb_bits) { 921 // No bits are set in the LSB half. The test will always fail. 922 lsb_head = RetExpression(gen, *cond.failed_); 923 lsb_tail = NULL; 924 } else { 925 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 926 lsb_bits, 927 RetExpression(gen, *cond.passed_), 928 RetExpression(gen, *cond.failed_)); 929 gen->JoinInstructions(lsb_head, lsb_tail); 930 } 931 } 932 933 // If we are looking at a 64bit argument, we need to also check the bits 934 // in the MSB half of the system call argument. 935 if (cond.width_ == ErrorCode::TP_64BIT) { 936 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); 937 if (!msb_bits) { 938 // No bits are set in the MSB half. The test will always fail. 939 msb_head = lsb_head; 940 } else { 941 msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, 942 msb_bits, 943 RetExpression(gen, *cond.passed_), 944 lsb_head); 945 gen->JoinInstructions(msb_head, msb_tail); 946 } 947 } 948 break; 949 default: 950 // TODO(markus): Need to add support for OP_GREATER 951 SANDBOX_DIE("Not implemented"); 952 break; 953 } 954 955 // Ensure that we never pass a 64bit value, when we only expect a 32bit 956 // value. This is somewhat complicated by the fact that on 64bit systems, 957 // callers could legitimately pass in a non-zero value in the MSB, iff the 958 // LSB has been sign-extended into the MSB. 959 if (cond.width_ == ErrorCode::TP_32BIT) { 960 if (cond.value_ >> 32) { 961 SANDBOX_DIE( 962 "Invalid comparison of a 32bit system call argument " 963 "against a 64bit constant; this test is always false."); 964 } 965 966 Instruction* invalid_64bit = RetExpression(gen, Unexpected64bitArgument()); 967#if __SIZEOF_POINTER__ > 4 968 invalid_64bit = gen->MakeInstruction( 969 BPF_JMP + BPF_JEQ + BPF_K, 970 0xFFFFFFFF, 971 gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 972 SECCOMP_ARG_LSB_IDX(cond.argno_), 973 gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, 974 0x80000000, 975 lsb_head, 976 invalid_64bit)), 977 invalid_64bit); 978#endif 979 gen->JoinInstructions( 980 msb_tail, 981 gen->MakeInstruction( 982 BPF_JMP + BPF_JEQ + BPF_K, 0, lsb_head, invalid_64bit)); 983 } 984 985 return msb_head; 986} 987 988ErrorCode SandboxBPF::Unexpected64bitArgument() { 989 return Kill("Unexpected 64bit argument detected"); 990} 991 992ErrorCode SandboxBPF::Trap(Trap::TrapFnc fnc, const void* aux) { 993 return Trap::MakeTrap(fnc, aux, true /* Safe Trap */); 994} 995 996ErrorCode SandboxBPF::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) { 997 return Trap::MakeTrap(fnc, aux, false /* Unsafe Trap */); 998} 999 1000intptr_t SandboxBPF::ForwardSyscall(const struct arch_seccomp_data& args) { 1001 return SandboxSyscall(args.nr, 1002 static_cast<intptr_t>(args.args[0]), 1003 static_cast<intptr_t>(args.args[1]), 1004 static_cast<intptr_t>(args.args[2]), 1005 static_cast<intptr_t>(args.args[3]), 1006 static_cast<intptr_t>(args.args[4]), 1007 static_cast<intptr_t>(args.args[5])); 1008} 1009 1010ErrorCode SandboxBPF::Cond(int argno, 1011 ErrorCode::ArgType width, 1012 ErrorCode::Operation op, 1013 uint64_t value, 1014 const ErrorCode& passed, 1015 const ErrorCode& failed) { 1016 return ErrorCode(argno, 1017 width, 1018 op, 1019 value, 1020 &*conds_->insert(passed).first, 1021 &*conds_->insert(failed).first); 1022} 1023 1024ErrorCode SandboxBPF::Kill(const char* msg) { 1025 return Trap(BPFFailure, const_cast<char*>(msg)); 1026} 1027 1028SandboxBPF::SandboxStatus SandboxBPF::status_ = STATUS_UNKNOWN; 1029 1030} // namespace sandbox 1031