1 2/*--------------------------------------------------------------------*/ 3/*--- Platform-specific syscalls stuff. syswrap-amd64-linux.c ---*/ 4/*--------------------------------------------------------------------*/ 5 6/* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2000-2015 Nicholas Nethercote 11 njn@valgrind.org 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 26 02111-1307, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29*/ 30 31#if defined(VGP_amd64_linux) 32 33#include "pub_core_basics.h" 34#include "pub_core_vki.h" 35#include "pub_core_vkiscnums.h" 36#include "pub_core_threadstate.h" 37#include "pub_core_aspacemgr.h" 38#include "pub_core_debuglog.h" 39#include "pub_core_options.h" 40#include "pub_core_libcbase.h" 41#include "pub_core_libcassert.h" 42#include "pub_core_libcprint.h" 43#include "pub_core_libcproc.h" 44#include "pub_core_libcsignal.h" 45#include "pub_core_scheduler.h" 46#include "pub_core_sigframe.h" 47#include "pub_core_signals.h" 48#include "pub_core_syscall.h" 49#include "pub_core_syswrap.h" 50#include "pub_core_tooliface.h" 51 52#include "priv_types_n_macros.h" 53#include "priv_syswrap-generic.h" /* for decls of generic wrappers */ 54#include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */ 55#include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */ 56#include "priv_syswrap-main.h" 57 58 59/* --------------------------------------------------------------------- 60 clone() handling 61 ------------------------------------------------------------------ */ 62 63/* Call f(arg1), but first switch stacks, using 'stack' as the new 64 stack, and use 'retaddr' as f's return-to address. Also, clear all 65 the integer registers before entering f. */ 66__attribute__((noreturn)) 67void ML_(call_on_new_stack_0_1) ( Addr stack, 68 Addr retaddr, 69 void (*f)(Word), 70 Word arg1 ); 71// %rdi == stack 72// %rsi == retaddr 73// %rdx == f 74// %rcx == arg1 75asm( 76".text\n" 77".globl vgModuleLocal_call_on_new_stack_0_1\n" 78"vgModuleLocal_call_on_new_stack_0_1:\n" 79" movq %rdi, %rsp\n" // set stack 80" pushq %rsi\n" // retaddr to stack 81" pushq %rdx\n" // f to stack 82" pushq %rcx\n" // arg1 to stack 83" movq $0, %rax\n" // zero all GP regs 84" movq $0, %rbx\n" 85" movq $0, %rcx\n" 86" movq $0, %rdx\n" 87" movq $0, %rsi\n" 88" movq $0, %rdi\n" 89" movq $0, %rbp\n" 90" movq $0, %r8\n" 91" movq $0, %r9\n" 92" movq $0, %r10\n" 93" movq $0, %r11\n" 94" movq $0, %r12\n" 95" movq $0, %r13\n" 96" movq $0, %r14\n" 97" movq $0, %r15\n" 98" popq %rdi\n" // arg1 to correct arg reg 99" ret\n" // jump to f 100" ud2\n" // should never get here 101".previous\n" 102); 103 104/* 105 Perform a clone system call. clone is strange because it has 106 fork()-like return-twice semantics, so it needs special 107 handling here. 108 109 Upon entry, we have: 110 111 int (*fn)(void*) in %rdi 112 void* child_stack in %rsi 113 int flags in %rdx 114 void* arg in %rcx 115 pid_t* child_tid in %r8 116 pid_t* parent_tid in %r9 117 void* tls_ptr at 8(%rsp) 118 119 System call requires: 120 121 int $__NR_clone in %rax 122 int flags in %rdi 123 void* child_stack in %rsi 124 pid_t* parent_tid in %rdx 125 pid_t* child_tid in %r10 126 void* tls_ptr in %r8 127 128 Returns a Long encoded in the linux-amd64 way, not a SysRes. 129 */ 130#define __NR_CLONE VG_STRINGIFY(__NR_clone) 131#define __NR_EXIT VG_STRINGIFY(__NR_exit) 132 133extern 134Long do_syscall_clone_amd64_linux ( Word (*fn)(void *), 135 void* stack, 136 Long flags, 137 void* arg, 138 Long* child_tid, 139 Long* parent_tid, 140 vki_modify_ldt_t * ); 141asm( 142".text\n" 143".globl do_syscall_clone_amd64_linux\n" 144"do_syscall_clone_amd64_linux:\n" 145 // set up child stack, temporarily preserving fn and arg 146" subq $16, %rsi\n" // make space on stack 147" movq %rcx, 8(%rsi)\n" // save arg 148" movq %rdi, 0(%rsi)\n" // save fn 149 150 // setup syscall 151" movq $"__NR_CLONE", %rax\n" // syscall number 152" movq %rdx, %rdi\n" // syscall arg1: flags 153 // %rsi already setup // syscall arg2: child_stack 154" movq %r9, %rdx\n" // syscall arg3: parent_tid 155" movq %r8, %r10\n" // syscall arg4: child_tid 156" movq 8(%rsp), %r8\n" // syscall arg5: tls_ptr 157 158" syscall\n" // clone() 159 160" testq %rax, %rax\n" // child if retval == 0 161" jnz 1f\n" 162 163 // CHILD - call thread function 164" pop %rax\n" // pop fn 165" pop %rdi\n" // pop fn arg1: arg 166" call *%rax\n" // call fn 167 168 // exit with result 169" movq %rax, %rdi\n" // arg1: return value from fn 170" movq $"__NR_EXIT", %rax\n" 171 172" syscall\n" 173 174 // Exit returned?! 175" ud2\n" 176 177"1:\n" // PARENT or ERROR 178" ret\n" 179".previous\n" 180); 181 182#undef __NR_CLONE 183#undef __NR_EXIT 184 185 186// forward declaration 187static void setup_child ( ThreadArchState*, ThreadArchState* ); 188 189/* 190 When a client clones, we need to keep track of the new thread. This means: 191 1. allocate a ThreadId+ThreadState+stack for the thread 192 193 2. initialize the thread's new VCPU state 194 195 3. create the thread using the same args as the client requested, 196 but using the scheduler entrypoint for EIP, and a separate stack 197 for ESP. 198 */ 199static SysRes do_clone ( ThreadId ptid, 200 ULong flags, Addr rsp, 201 Long* parent_tidptr, 202 Long* child_tidptr, 203 Addr tlsaddr ) 204{ 205 static const Bool debug = False; 206 207 ThreadId ctid = VG_(alloc_ThreadState)(); 208 ThreadState* ptst = VG_(get_ThreadState)(ptid); 209 ThreadState* ctst = VG_(get_ThreadState)(ctid); 210 UWord* stack; 211 SysRes res; 212 Long rax; 213 vki_sigset_t blockall, savedmask; 214 215 VG_(sigfillset)(&blockall); 216 217 vg_assert(VG_(is_running_thread)(ptid)); 218 vg_assert(VG_(is_valid_tid)(ctid)); 219 220 stack = (UWord*)ML_(allocstack)(ctid); 221 if (stack == NULL) { 222 res = VG_(mk_SysRes_Error)( VKI_ENOMEM ); 223 goto out; 224 } 225 226 /* Copy register state 227 228 Both parent and child return to the same place, and the code 229 following the clone syscall works out which is which, so we 230 don't need to worry about it. 231 232 The parent gets the child's new tid returned from clone, but the 233 child gets 0. 234 235 If the clone call specifies a NULL rsp for the new thread, then 236 it actually gets a copy of the parent's rsp. 237 */ 238 setup_child( &ctst->arch, &ptst->arch ); 239 240 /* Make sys_clone appear to have returned Success(0) in the 241 child. */ 242 ctst->arch.vex.guest_RAX = 0; 243 244 if (rsp != 0) 245 ctst->arch.vex.guest_RSP = rsp; 246 247 ctst->os_state.parent = ptid; 248 249 /* inherit signal mask */ 250 ctst->sig_mask = ptst->sig_mask; 251 ctst->tmp_sig_mask = ptst->sig_mask; 252 253 /* Start the child with its threadgroup being the same as the 254 parent's. This is so that any exit_group calls that happen 255 after the child is created but before it sets its 256 os_state.threadgroup field for real (in thread_wrapper in 257 syswrap-linux.c), really kill the new thread. a.k.a this avoids 258 a race condition in which the thread is unkillable (via 259 exit_group) because its threadgroup is not set. The race window 260 is probably only a few hundred or a few thousand cycles long. 261 See #226116. */ 262 ctst->os_state.threadgroup = ptst->os_state.threadgroup; 263 264 ML_(guess_and_register_stack) (rsp, ctst); 265 266 /* Assume the clone will succeed, and tell any tool that wants to 267 know that this thread has come into existence. If the clone 268 fails, we'll send out a ll_exit notification for it at the out: 269 label below, to clean up. */ 270 vg_assert(VG_(owns_BigLock_LL)(ptid)); 271 VG_TRACK ( pre_thread_ll_create, ptid, ctid ); 272 273 if (flags & VKI_CLONE_SETTLS) { 274 if (debug) 275 VG_(printf)("clone child has SETTLS: tls at %#lx\n", tlsaddr); 276 ctst->arch.vex.guest_FS_CONST = tlsaddr; 277 } 278 279 flags &= ~VKI_CLONE_SETTLS; 280 281 /* start the thread with everything blocked */ 282 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask); 283 284 /* Create the new thread */ 285 rax = do_syscall_clone_amd64_linux( 286 ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid], 287 child_tidptr, parent_tidptr, NULL 288 ); 289 res = VG_(mk_SysRes_amd64_linux)( rax ); 290 291 VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL); 292 293 out: 294 if (sr_isError(res)) { 295 /* clone failed */ 296 VG_(cleanup_thread)(&ctst->arch); 297 ctst->status = VgTs_Empty; 298 /* oops. Better tell the tool the thread exited in a hurry :-) */ 299 VG_TRACK( pre_thread_ll_exit, ctid ); 300 } 301 302 return res; 303} 304 305 306/* --------------------------------------------------------------------- 307 More thread stuff 308 ------------------------------------------------------------------ */ 309 310void VG_(cleanup_thread) ( ThreadArchState *arch ) 311{ 312} 313 314void setup_child ( /*OUT*/ ThreadArchState *child, 315 /*IN*/ ThreadArchState *parent ) 316{ 317 /* We inherit our parent's guest state. */ 318 child->vex = parent->vex; 319 child->vex_shadow1 = parent->vex_shadow1; 320 child->vex_shadow2 = parent->vex_shadow2; 321} 322 323 324/* --------------------------------------------------------------------- 325 PRE/POST wrappers for AMD64/Linux-specific syscalls 326 ------------------------------------------------------------------ */ 327 328#define PRE(name) DEFN_PRE_TEMPLATE(amd64_linux, name) 329#define POST(name) DEFN_POST_TEMPLATE(amd64_linux, name) 330 331/* Add prototypes for the wrappers declared here, so that gcc doesn't 332 harass us for not having prototypes. Really this is a kludge -- 333 the right thing to do is to make these wrappers 'static' since they 334 aren't visible outside this file, but that requires even more macro 335 magic. */ 336DECL_TEMPLATE(amd64_linux, sys_clone); 337DECL_TEMPLATE(amd64_linux, sys_rt_sigreturn); 338DECL_TEMPLATE(amd64_linux, sys_arch_prctl); 339DECL_TEMPLATE(amd64_linux, sys_ptrace); 340DECL_TEMPLATE(amd64_linux, sys_fadvise64); 341DECL_TEMPLATE(amd64_linux, sys_mmap); 342DECL_TEMPLATE(amd64_linux, sys_syscall184); 343 344 345PRE(sys_clone) 346{ 347 ULong cloneflags; 348 349 PRINT("sys_clone ( %lx, %#lx, %#lx, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5); 350 PRE_REG_READ2(int, "clone", 351 unsigned long, flags, 352 void *, child_stack); 353 354 if (ARG1 & VKI_CLONE_PARENT_SETTID) { 355 if (VG_(tdict).track_pre_reg_read) { 356 PRA3("clone", int *, parent_tidptr); 357 } 358 PRE_MEM_WRITE("clone(parent_tidptr)", ARG3, sizeof(Int)); 359 if (!VG_(am_is_valid_for_client)(ARG3, sizeof(Int), VKI_PROT_WRITE)) { 360 SET_STATUS_Failure( VKI_EFAULT ); 361 return; 362 } 363 } 364 if (ARG1 & VKI_CLONE_SETTLS) { 365 if (VG_(tdict).track_pre_reg_read) { 366 PRA4("clone", vki_modify_ldt_t *, tlsinfo); 367 } 368 PRE_MEM_READ("clone(tlsinfo)", ARG4, sizeof(vki_modify_ldt_t)); 369 if (!VG_(am_is_valid_for_client)(ARG4, sizeof(vki_modify_ldt_t), 370 VKI_PROT_READ)) { 371 SET_STATUS_Failure( VKI_EFAULT ); 372 return; 373 } 374 } 375 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) { 376 if (VG_(tdict).track_pre_reg_read) { 377 PRA5("clone", int *, child_tidptr); 378 } 379 PRE_MEM_WRITE("clone(child_tidptr)", ARG4, sizeof(Int)); 380 if (!VG_(am_is_valid_for_client)(ARG4, sizeof(Int), VKI_PROT_WRITE)) { 381 SET_STATUS_Failure( VKI_EFAULT ); 382 return; 383 } 384 } 385 386 cloneflags = ARG1; 387 388 if (!ML_(client_signal_OK)(ARG1 & VKI_CSIGNAL)) { 389 SET_STATUS_Failure( VKI_EINVAL ); 390 return; 391 } 392 393 /* Only look at the flags we really care about */ 394 switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS 395 | VKI_CLONE_FILES | VKI_CLONE_VFORK)) { 396 case VKI_CLONE_VM | VKI_CLONE_FS | VKI_CLONE_FILES: 397 /* thread creation */ 398 SET_STATUS_from_SysRes( 399 do_clone(tid, 400 ARG1, /* flags */ 401 (Addr)ARG2, /* child ESP */ 402 (Long *)ARG3, /* parent_tidptr */ 403 (Long *)ARG4, /* child_tidptr */ 404 (Addr)ARG5)); /* set_tls */ 405 break; 406 407 case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */ 408 /* FALLTHROUGH - assume vfork == fork */ 409 cloneflags &= ~(VKI_CLONE_VFORK | VKI_CLONE_VM); 410 411 case 0: /* plain fork */ 412 SET_STATUS_from_SysRes( 413 ML_(do_fork_clone)(tid, 414 cloneflags, /* flags */ 415 (Int *)ARG3, /* parent_tidptr */ 416 (Int *)ARG4)); /* child_tidptr */ 417 break; 418 419 default: 420 /* should we just ENOSYS? */ 421 VG_(message)(Vg_UserMsg, 422 "Unsupported clone() flags: 0x%lx\n", ARG1); 423 VG_(message)(Vg_UserMsg, 424 "\n"); 425 VG_(message)(Vg_UserMsg, 426 "The only supported clone() uses are:\n"); 427 VG_(message)(Vg_UserMsg, 428 " - via a threads library (LinuxThreads or NPTL)\n"); 429 VG_(message)(Vg_UserMsg, 430 " - via the implementation of fork or vfork\n"); 431 VG_(unimplemented) 432 ("Valgrind does not support general clone()."); 433 } 434 435 if (SUCCESS) { 436 if (ARG1 & VKI_CLONE_PARENT_SETTID) 437 POST_MEM_WRITE(ARG3, sizeof(Int)); 438 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) 439 POST_MEM_WRITE(ARG4, sizeof(Int)); 440 441 /* Thread creation was successful; let the child have the chance 442 to run */ 443 *flags |= SfYieldAfter; 444 } 445} 446 447PRE(sys_rt_sigreturn) 448{ 449 /* This isn't really a syscall at all - it's a misuse of the 450 syscall mechanism by m_sigframe. VG_(sigframe_create) sets the 451 return address of the signal frames it creates to be a short 452 piece of code which does this "syscall". The only purpose of 453 the syscall is to call VG_(sigframe_destroy), which restores the 454 thread's registers from the frame and then removes it. 455 Consequently we must ask the syswrap driver logic not to write 456 back the syscall "result" as that would overwrite the 457 just-restored register state. */ 458 459 ThreadState* tst; 460 PRINT("sys_rt_sigreturn ( )"); 461 462 vg_assert(VG_(is_valid_tid)(tid)); 463 vg_assert(tid >= 1 && tid < VG_N_THREADS); 464 vg_assert(VG_(is_running_thread)(tid)); 465 466 /* Adjust RSP to point to start of frame; skip back up over handler 467 ret addr */ 468 tst = VG_(get_ThreadState)(tid); 469 tst->arch.vex.guest_RSP -= sizeof(Addr); 470 471 /* This is only so that the RIP is (might be) useful to report if 472 something goes wrong in the sigreturn. JRS 20070318: no idea 473 what this is for */ 474 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch); 475 476 /* Restore register state from frame and remove it, as 477 described above */ 478 VG_(sigframe_destroy)(tid, True); 479 480 /* Tell the driver not to update the guest state with the "result", 481 and set a bogus result to keep it happy. */ 482 *flags |= SfNoWriteResult; 483 SET_STATUS_Success(0); 484 485 /* Check to see if any signals arose as a result of this. */ 486 *flags |= SfPollAfter; 487} 488 489PRE(sys_arch_prctl) 490{ 491 ThreadState* tst; 492 PRINT( "arch_prctl ( %ld, %lx )", SARG1, ARG2 ); 493 494 vg_assert(VG_(is_valid_tid)(tid)); 495 vg_assert(tid >= 1 && tid < VG_N_THREADS); 496 vg_assert(VG_(is_running_thread)(tid)); 497 498 // Nb: can't use "ARG2".."ARG5" here because that's our own macro... 499 PRE_REG_READ2(long, "arch_prctl", 500 int, option, unsigned long, arg2); 501 // XXX: totally wrong... we need to look at the 'option' arg, and do 502 // PRE_MEM_READs/PRE_MEM_WRITEs as necessary... 503 504 /* "do" the syscall ourselves; the kernel never sees it */ 505 if (ARG1 == VKI_ARCH_SET_FS) { 506 tst = VG_(get_ThreadState)(tid); 507 tst->arch.vex.guest_FS_CONST = ARG2; 508 } 509 else if (ARG1 == VKI_ARCH_GET_FS) { 510 PRE_MEM_WRITE("arch_prctl(addr)", ARG2, sizeof(unsigned long)); 511 tst = VG_(get_ThreadState)(tid); 512 *(unsigned long *)ARG2 = tst->arch.vex.guest_FS_CONST; 513 POST_MEM_WRITE(ARG2, sizeof(unsigned long)); 514 } 515 else if (ARG1 == VKI_ARCH_SET_GS) { 516 tst = VG_(get_ThreadState)(tid); 517 tst->arch.vex.guest_GS_CONST = ARG2; 518 } 519 else if (ARG1 == VKI_ARCH_GET_GS) { 520 PRE_MEM_WRITE("arch_prctl(addr)", ARG2, sizeof(unsigned long)); 521 tst = VG_(get_ThreadState)(tid); 522 *(unsigned long *)ARG2 = tst->arch.vex.guest_GS_CONST; 523 POST_MEM_WRITE(ARG2, sizeof(unsigned long)); 524 } 525 else { 526 VG_(core_panic)("Unsupported arch_prctl option"); 527 } 528 529 /* Note; the Status writeback to guest state that happens after 530 this wrapper returns does not change guest_FS_CONST or guest_GS_CONST; 531 hence that direct assignment to the guest state is safe here. */ 532 SET_STATUS_Success( 0 ); 533} 534 535// Parts of this are amd64-specific, but the *PEEK* cases are generic. 536// 537// ARG3 is only used for pointers into the traced process's address 538// space and for offsets into the traced process's struct 539// user_regs_struct. It is never a pointer into this process's memory 540// space, and we should therefore not check anything it points to. 541PRE(sys_ptrace) 542{ 543 PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4); 544 PRE_REG_READ4(int, "ptrace", 545 long, request, long, pid, long, addr, long, data); 546 switch (ARG1) { 547 case VKI_PTRACE_PEEKTEXT: 548 case VKI_PTRACE_PEEKDATA: 549 case VKI_PTRACE_PEEKUSR: 550 PRE_MEM_WRITE( "ptrace(peek)", ARG4, 551 sizeof (long)); 552 break; 553 case VKI_PTRACE_GETREGS: 554 PRE_MEM_WRITE( "ptrace(getregs)", ARG4, 555 sizeof (struct vki_user_regs_struct)); 556 break; 557 case VKI_PTRACE_GETFPREGS: 558 PRE_MEM_WRITE( "ptrace(getfpregs)", ARG4, 559 sizeof (struct vki_user_i387_struct)); 560 break; 561 case VKI_PTRACE_SETREGS: 562 PRE_MEM_READ( "ptrace(setregs)", ARG4, 563 sizeof (struct vki_user_regs_struct)); 564 break; 565 case VKI_PTRACE_SETFPREGS: 566 PRE_MEM_READ( "ptrace(setfpregs)", ARG4, 567 sizeof (struct vki_user_i387_struct)); 568 break; 569 case VKI_PTRACE_GETEVENTMSG: 570 PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long)); 571 break; 572 case VKI_PTRACE_GETSIGINFO: 573 PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t)); 574 break; 575 case VKI_PTRACE_SETSIGINFO: 576 PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t)); 577 break; 578 case VKI_PTRACE_GETREGSET: 579 ML_(linux_PRE_getregset)(tid, ARG3, ARG4); 580 break; 581 case VKI_PTRACE_SETREGSET: 582 ML_(linux_PRE_setregset)(tid, ARG3, ARG4); 583 break; 584 default: 585 break; 586 } 587} 588 589POST(sys_ptrace) 590{ 591 switch (ARG1) { 592 case VKI_PTRACE_PEEKTEXT: 593 case VKI_PTRACE_PEEKDATA: 594 case VKI_PTRACE_PEEKUSR: 595 POST_MEM_WRITE( ARG4, sizeof (long)); 596 break; 597 case VKI_PTRACE_GETREGS: 598 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_regs_struct)); 599 break; 600 case VKI_PTRACE_GETFPREGS: 601 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_i387_struct)); 602 break; 603 case VKI_PTRACE_GETEVENTMSG: 604 POST_MEM_WRITE( ARG4, sizeof(unsigned long)); 605 break; 606 case VKI_PTRACE_GETSIGINFO: 607 /* XXX: This is a simplification. Different parts of the 608 * siginfo_t are valid depending on the type of signal. 609 */ 610 POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t)); 611 break; 612 case VKI_PTRACE_GETREGSET: 613 ML_(linux_POST_getregset)(tid, ARG3, ARG4); 614 break; 615 default: 616 break; 617 } 618} 619 620PRE(sys_fadvise64) 621{ 622 PRINT("sys_fadvise64 ( %ld, %ld, %lu, %ld )", SARG1, SARG2, ARG3, SARG4); 623 PRE_REG_READ4(long, "fadvise64", 624 int, fd, vki_loff_t, offset, vki_size_t, len, int, advice); 625} 626 627PRE(sys_mmap) 628{ 629 SysRes r; 630 631 PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, %ld )", 632 ARG1, ARG2, SARG3, SARG4, SARG5, SARG6 ); 633 PRE_REG_READ6(long, "mmap", 634 unsigned long, start, unsigned long, length, 635 int, prot, int, flags, int, fd, vki_off_t, offset); 636 637 r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 ); 638 SET_STATUS_from_SysRes(r); 639} 640 641 642/* --------------------------------------------------------------- 643 PRE/POST wrappers for AMD64/Linux-variant specific syscalls 644 ------------------------------------------------------------ */ 645 646PRE(sys_syscall184) 647{ 648 Int err; 649 650 /* 184 is used by sys_bproc. If we're not on a declared bproc 651 variant, fail in the usual way, since it is otherwise unused. */ 652 653 if (!KernelVariantiS(KernelVariant_bproc, VG_(clo_kernel_variant))) { 654 PRINT("non-existent syscall! (syscall 184)"); 655 PRE_REG_READ0(long, "ni_syscall(184)"); 656 SET_STATUS_Failure( VKI_ENOSYS ); 657 return; 658 } 659 660 err = ML_(linux_variant_PRE_sys_bproc)( ARG1, ARG2, ARG3, 661 ARG4, ARG5, ARG6 ); 662 if (err) { 663 SET_STATUS_Failure( err ); 664 return; 665 } 666 /* Let it go through. */ 667 *flags |= SfMayBlock; /* who knows? play safe. */ 668} 669 670POST(sys_syscall184) 671{ 672 ML_(linux_variant_POST_sys_bproc)( ARG1, ARG2, ARG3, 673 ARG4, ARG5, ARG6 ); 674} 675 676#undef PRE 677#undef POST 678 679 680/* --------------------------------------------------------------------- 681 The AMD64/Linux syscall table 682 ------------------------------------------------------------------ */ 683 684/* Add an amd64-linux specific wrapper to a syscall table. */ 685#define PLAX_(const, name) WRAPPER_ENTRY_X_(amd64_linux, const, name) 686#define PLAXY(const, name) WRAPPER_ENTRY_XY(amd64_linux, const, name) 687 688// This table maps from __NR_xxx syscall numbers (from 689// linux/include/asm-x86_64/unistd.h) to the appropriate PRE/POST sys_foo() 690// wrappers on AMD64 (as per sys_call_table in 691// linux/arch/x86_64/kernel/entry.S). 692// 693// When implementing these wrappers, you need to work out if the wrapper is 694// generic, Linux-only (but arch-independent), or AMD64/Linux only. 695 696static SyscallTableEntry syscall_table[] = { 697 GENXY(__NR_read, sys_read), // 0 698 GENX_(__NR_write, sys_write), // 1 699 GENXY(__NR_open, sys_open), // 2 700 GENXY(__NR_close, sys_close), // 3 701 GENXY(__NR_stat, sys_newstat), // 4 702 703 GENXY(__NR_fstat, sys_newfstat), // 5 704 GENXY(__NR_lstat, sys_newlstat), // 6 705 GENXY(__NR_poll, sys_poll), // 7 706 LINX_(__NR_lseek, sys_lseek), // 8 707 PLAX_(__NR_mmap, sys_mmap), // 9 708 709 GENXY(__NR_mprotect, sys_mprotect), // 10 710 GENXY(__NR_munmap, sys_munmap), // 11 711 GENX_(__NR_brk, sys_brk), // 12 712 LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 13 713 LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 14 714 715 PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 15 716 LINXY(__NR_ioctl, sys_ioctl), // 16 717 GENXY(__NR_pread64, sys_pread64), // 17 718 GENX_(__NR_pwrite64, sys_pwrite64), // 18 719 GENXY(__NR_readv, sys_readv), // 19 720 721 GENX_(__NR_writev, sys_writev), // 20 722 GENX_(__NR_access, sys_access), // 21 723 LINXY(__NR_pipe, sys_pipe), // 22 724 GENX_(__NR_select, sys_select), // 23 725 LINX_(__NR_sched_yield, sys_sched_yield), // 24 726 727 GENX_(__NR_mremap, sys_mremap), // 25 728 GENX_(__NR_msync, sys_msync), // 26 729 GENXY(__NR_mincore, sys_mincore), // 27 730 GENX_(__NR_madvise, sys_madvise), // 28 731 LINX_(__NR_shmget, sys_shmget), // 29 732 733 LINXY(__NR_shmat, wrap_sys_shmat), // 30 734 LINXY(__NR_shmctl, sys_shmctl), // 31 735 GENXY(__NR_dup, sys_dup), // 32 736 GENXY(__NR_dup2, sys_dup2), // 33 737 GENX_(__NR_pause, sys_pause), // 34 738 739 GENXY(__NR_nanosleep, sys_nanosleep), // 35 740 GENXY(__NR_getitimer, sys_getitimer), // 36 741 GENX_(__NR_alarm, sys_alarm), // 37 742 GENXY(__NR_setitimer, sys_setitimer), // 38 743 GENX_(__NR_getpid, sys_getpid), // 39 744 745 LINXY(__NR_sendfile, sys_sendfile), // 40 746 LINXY(__NR_socket, sys_socket), // 41 747 LINX_(__NR_connect, sys_connect), // 42 748 LINXY(__NR_accept, sys_accept), // 43 749 LINX_(__NR_sendto, sys_sendto), // 44 750 751 LINXY(__NR_recvfrom, sys_recvfrom), // 45 752 LINX_(__NR_sendmsg, sys_sendmsg), // 46 753 LINXY(__NR_recvmsg, sys_recvmsg), // 47 754 LINX_(__NR_shutdown, sys_shutdown), // 48 755 LINX_(__NR_bind, sys_bind), // 49 756 757 LINX_(__NR_listen, sys_listen), // 50 758 LINXY(__NR_getsockname, sys_getsockname), // 51 759 LINXY(__NR_getpeername, sys_getpeername), // 52 760 LINXY(__NR_socketpair, sys_socketpair), // 53 761 LINX_(__NR_setsockopt, sys_setsockopt), // 54 762 763 LINXY(__NR_getsockopt, sys_getsockopt), // 55 764 PLAX_(__NR_clone, sys_clone), // 56 765 GENX_(__NR_fork, sys_fork), // 57 766 GENX_(__NR_vfork, sys_fork), // 58 treat as fork 767 GENX_(__NR_execve, sys_execve), // 59 768 769 GENX_(__NR_exit, sys_exit), // 60 770 GENXY(__NR_wait4, sys_wait4), // 61 771 GENX_(__NR_kill, sys_kill), // 62 772 GENXY(__NR_uname, sys_newuname), // 63 773 LINX_(__NR_semget, sys_semget), // 64 774 775 LINX_(__NR_semop, sys_semop), // 65 776 LINXY(__NR_semctl, sys_semctl), // 66 777 LINXY(__NR_shmdt, sys_shmdt), // 67 778 LINX_(__NR_msgget, sys_msgget), // 68 779 LINX_(__NR_msgsnd, sys_msgsnd), // 69 780 781 LINXY(__NR_msgrcv, sys_msgrcv), // 70 782 LINXY(__NR_msgctl, sys_msgctl), // 71 783 LINXY(__NR_fcntl, sys_fcntl), // 72 784 GENX_(__NR_flock, sys_flock), // 73 785 GENX_(__NR_fsync, sys_fsync), // 74 786 787 GENX_(__NR_fdatasync, sys_fdatasync), // 75 788 GENX_(__NR_truncate, sys_truncate), // 76 789 GENX_(__NR_ftruncate, sys_ftruncate), // 77 790 GENXY(__NR_getdents, sys_getdents), // 78 791 GENXY(__NR_getcwd, sys_getcwd), // 79 792 793 GENX_(__NR_chdir, sys_chdir), // 80 794 GENX_(__NR_fchdir, sys_fchdir), // 81 795 GENX_(__NR_rename, sys_rename), // 82 796 GENX_(__NR_mkdir, sys_mkdir), // 83 797 GENX_(__NR_rmdir, sys_rmdir), // 84 798 799 GENXY(__NR_creat, sys_creat), // 85 800 GENX_(__NR_link, sys_link), // 86 801 GENX_(__NR_unlink, sys_unlink), // 87 802 GENX_(__NR_symlink, sys_symlink), // 88 803 GENX_(__NR_readlink, sys_readlink), // 89 804 805 GENX_(__NR_chmod, sys_chmod), // 90 806 GENX_(__NR_fchmod, sys_fchmod), // 91 807 GENX_(__NR_chown, sys_chown), // 92 808 GENX_(__NR_fchown, sys_fchown), // 93 809 GENX_(__NR_lchown, sys_lchown), // 94 810 811 GENX_(__NR_umask, sys_umask), // 95 812 GENXY(__NR_gettimeofday, sys_gettimeofday), // 96 813 GENXY(__NR_getrlimit, sys_getrlimit), // 97 814 GENXY(__NR_getrusage, sys_getrusage), // 98 815 LINXY(__NR_sysinfo, sys_sysinfo), // 99 816 817 GENXY(__NR_times, sys_times), // 100 818 PLAXY(__NR_ptrace, sys_ptrace), // 101 819 GENX_(__NR_getuid, sys_getuid), // 102 820 LINXY(__NR_syslog, sys_syslog), // 103 821 GENX_(__NR_getgid, sys_getgid), // 104 822 823 GENX_(__NR_setuid, sys_setuid), // 105 824 GENX_(__NR_setgid, sys_setgid), // 106 825 GENX_(__NR_geteuid, sys_geteuid), // 107 826 GENX_(__NR_getegid, sys_getegid), // 108 827 GENX_(__NR_setpgid, sys_setpgid), // 109 828 829 GENX_(__NR_getppid, sys_getppid), // 110 830 GENX_(__NR_getpgrp, sys_getpgrp), // 111 831 GENX_(__NR_setsid, sys_setsid), // 112 832 GENX_(__NR_setreuid, sys_setreuid), // 113 833 GENX_(__NR_setregid, sys_setregid), // 114 834 835 GENXY(__NR_getgroups, sys_getgroups), // 115 836 GENX_(__NR_setgroups, sys_setgroups), // 116 837 LINX_(__NR_setresuid, sys_setresuid), // 117 838 LINXY(__NR_getresuid, sys_getresuid), // 118 839 LINX_(__NR_setresgid, sys_setresgid), // 119 840 841 LINXY(__NR_getresgid, sys_getresgid), // 120 842 GENX_(__NR_getpgid, sys_getpgid), // 121 843 LINX_(__NR_setfsuid, sys_setfsuid), // 122 844 LINX_(__NR_setfsgid, sys_setfsgid), // 123 845 GENX_(__NR_getsid, sys_getsid), // 124 846 847 LINXY(__NR_capget, sys_capget), // 125 848 LINX_(__NR_capset, sys_capset), // 126 849 LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 127 850 LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 128 851 LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 129 852 853 LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 130 854 GENXY(__NR_sigaltstack, sys_sigaltstack), // 131 855 LINX_(__NR_utime, sys_utime), // 132 856 GENX_(__NR_mknod, sys_mknod), // 133 857 // (__NR_uselib, sys_uselib), // 134 858 859 LINX_(__NR_personality, sys_personality), // 135 860 // (__NR_ustat, sys_ustat), // 136 861 GENXY(__NR_statfs, sys_statfs), // 137 862 GENXY(__NR_fstatfs, sys_fstatfs), // 138 863 // (__NR_sysfs, sys_sysfs), // 139 864 865 GENX_(__NR_getpriority, sys_getpriority), // 140 866 GENX_(__NR_setpriority, sys_setpriority), // 141 867 LINXY(__NR_sched_setparam, sys_sched_setparam), // 142 868 LINXY(__NR_sched_getparam, sys_sched_getparam), // 143 869 LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 144 870 871 LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 145 872 LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max), // 146 873 LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min), // 147 874 LINXY(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 148 875 GENX_(__NR_mlock, sys_mlock), // 149 876 877 GENX_(__NR_munlock, sys_munlock), // 150 878 GENX_(__NR_mlockall, sys_mlockall), // 151 879 LINX_(__NR_munlockall, sys_munlockall), // 152 880 LINX_(__NR_vhangup, sys_vhangup), // 153 881 // (__NR_modify_ldt, sys_modify_ldt), // 154 882 883 LINX_(__NR_pivot_root, sys_pivot_root), // 155 884 LINXY(__NR__sysctl, sys_sysctl), // 156 885 LINXY(__NR_prctl, sys_prctl), // 157 886 PLAX_(__NR_arch_prctl, sys_arch_prctl), // 158 887 LINXY(__NR_adjtimex, sys_adjtimex), // 159 888 889 GENX_(__NR_setrlimit, sys_setrlimit), // 160 890 GENX_(__NR_chroot, sys_chroot), // 161 891 GENX_(__NR_sync, sys_sync), // 162 892 // (__NR_acct, sys_acct), // 163 893 GENX_(__NR_settimeofday, sys_settimeofday), // 164 894 895 LINX_(__NR_mount, sys_mount), // 165 896 LINX_(__NR_umount2, sys_umount), // 166 897 // (__NR_swapon, sys_swapon), // 167 898 // (__NR_swapoff, sys_swapoff), // 168 899 // (__NR_reboot, sys_reboot), // 169 900 901 GENX_(__NR_sethostname, sys_sethostname), // 170 902 // (__NR_setdomainname, sys_setdomainname), // 171 903 GENX_(__NR_iopl, sys_iopl), // 172 904 LINX_(__NR_ioperm, sys_ioperm), // 173 905 GENX_(__NR_create_module, sys_ni_syscall), // 174 906 907 LINX_(__NR_init_module, sys_init_module), // 175 908 LINX_(__NR_delete_module, sys_delete_module), // 176 909 // (__NR_get_kernel_syms, sys_ni_syscall), // 177 910 // (__NR_query_module, sys_ni_syscall), // 178 911 LINX_(__NR_quotactl, sys_quotactl), // 179 912 913 // (__NR_nfsservctl, sys_nfsservctl), // 180 914 // (__NR_getpmsg, sys_ni_syscall), // 181 915 // (__NR_putpmsg, sys_ni_syscall), // 182 916 // (__NR_afs_syscall, sys_ni_syscall), // 183 917 PLAXY(184, sys_syscall184), // 184 // sys_bproc? 918 919 // (__NR_security, sys_ni_syscall), // 185 920 LINX_(__NR_gettid, sys_gettid), // 186 921 LINX_(__NR_readahead, sys_readahead), // 187 922 LINX_(__NR_setxattr, sys_setxattr), // 188 923 LINX_(__NR_lsetxattr, sys_lsetxattr), // 189 924 925 LINX_(__NR_fsetxattr, sys_fsetxattr), // 190 926 LINXY(__NR_getxattr, sys_getxattr), // 191 927 LINXY(__NR_lgetxattr, sys_lgetxattr), // 192 928 LINXY(__NR_fgetxattr, sys_fgetxattr), // 193 929 LINXY(__NR_listxattr, sys_listxattr), // 194 930 931 LINXY(__NR_llistxattr, sys_llistxattr), // 195 932 LINXY(__NR_flistxattr, sys_flistxattr), // 196 933 LINX_(__NR_removexattr, sys_removexattr), // 197 934 LINX_(__NR_lremovexattr, sys_lremovexattr), // 198 935 LINX_(__NR_fremovexattr, sys_fremovexattr), // 199 936 937 LINXY(__NR_tkill, sys_tkill), // 200 938 GENXY(__NR_time, sys_time), /*was sys_time64*/ // 201 939 LINXY(__NR_futex, sys_futex), // 202 940 LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 203 941 LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 204 942 943 // (__NR_set_thread_area, sys_ni_syscall), // 205 944 LINXY(__NR_io_setup, sys_io_setup), // 206 945 LINX_(__NR_io_destroy, sys_io_destroy), // 207 946 LINXY(__NR_io_getevents, sys_io_getevents), // 208 947 LINX_(__NR_io_submit, sys_io_submit), // 209 948 949 LINXY(__NR_io_cancel, sys_io_cancel), // 210 950 // (__NR_get_thread_area, sys_ni_syscall), // 211 951 LINXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 212 952 LINXY(__NR_epoll_create, sys_epoll_create), // 213 953 // (__NR_epoll_ctl_old, sys_ni_syscall), // 214 954 955 // (__NR_epoll_wait_old, sys_ni_syscall), // 215 956 // (__NR_remap_file_pages, sys_remap_file_pages)// 216 957 GENXY(__NR_getdents64, sys_getdents64), // 217 958 LINX_(__NR_set_tid_address, sys_set_tid_address),// 218 959 // (__NR_restart_syscall, sys_restart_syscall),// 219 960 961 LINX_(__NR_semtimedop, sys_semtimedop), // 220 962 PLAX_(__NR_fadvise64, sys_fadvise64), // 221 963 LINXY(__NR_timer_create, sys_timer_create), // 222 964 LINXY(__NR_timer_settime, sys_timer_settime), // 223 965 LINXY(__NR_timer_gettime, sys_timer_gettime), // 224 966 967 LINX_(__NR_timer_getoverrun, sys_timer_getoverrun), // 225 968 LINX_(__NR_timer_delete, sys_timer_delete), // 226 969 LINX_(__NR_clock_settime, sys_clock_settime), // 227 970 LINXY(__NR_clock_gettime, sys_clock_gettime), // 228 971 LINXY(__NR_clock_getres, sys_clock_getres), // 229 972 973 LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// 230 974 LINX_(__NR_exit_group, sys_exit_group), // 231 975 LINXY(__NR_epoll_wait, sys_epoll_wait), // 232 976 LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 233 977 LINXY(__NR_tgkill, sys_tgkill), // 234 978 979 GENX_(__NR_utimes, sys_utimes), // 235 980 // (__NR_vserver, sys_ni_syscall), // 236 981 LINX_(__NR_mbind, sys_mbind), // 237 982 LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 238 983 LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 239 984 985 LINXY(__NR_mq_open, sys_mq_open), // 240 986 LINX_(__NR_mq_unlink, sys_mq_unlink), // 241 987 LINX_(__NR_mq_timedsend, sys_mq_timedsend), // 242 988 LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// 243 989 LINX_(__NR_mq_notify, sys_mq_notify), // 244 990 991 LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // 245 992 // (__NR_kexec_load, sys_ni_syscall), // 246 993 LINXY(__NR_waitid, sys_waitid), // 247 994 LINX_(__NR_add_key, sys_add_key), // 248 995 LINX_(__NR_request_key, sys_request_key), // 249 996 997 LINXY(__NR_keyctl, sys_keyctl), // 250 998 LINX_(__NR_ioprio_set, sys_ioprio_set), // 251 999 LINX_(__NR_ioprio_get, sys_ioprio_get), // 252 1000 LINX_(__NR_inotify_init, sys_inotify_init), // 253 1001 LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 254 1002 1003 LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 255 1004// LINX_(__NR_migrate_pages, sys_migrate_pages), // 256 1005 LINXY(__NR_openat, sys_openat), // 257 1006 LINX_(__NR_mkdirat, sys_mkdirat), // 258 1007 LINX_(__NR_mknodat, sys_mknodat), // 259 1008 1009 LINX_(__NR_fchownat, sys_fchownat), // 260 1010 LINX_(__NR_futimesat, sys_futimesat), // 261 1011 LINXY(__NR_newfstatat, sys_newfstatat), // 262 1012 LINX_(__NR_unlinkat, sys_unlinkat), // 263 1013 LINX_(__NR_renameat, sys_renameat), // 264 1014 1015 LINX_(__NR_linkat, sys_linkat), // 265 1016 LINX_(__NR_symlinkat, sys_symlinkat), // 266 1017 LINX_(__NR_readlinkat, sys_readlinkat), // 267 1018 LINX_(__NR_fchmodat, sys_fchmodat), // 268 1019 LINX_(__NR_faccessat, sys_faccessat), // 269 1020 1021 LINX_(__NR_pselect6, sys_pselect6), // 270 1022 LINXY(__NR_ppoll, sys_ppoll), // 271 1023 LINX_(__NR_unshare, sys_unshare), // 272 1024 LINX_(__NR_set_robust_list, sys_set_robust_list), // 273 1025 LINXY(__NR_get_robust_list, sys_get_robust_list), // 274 1026 1027 LINX_(__NR_splice, sys_splice), // 275 1028 LINX_(__NR_tee, sys_tee), // 276 1029 LINX_(__NR_sync_file_range, sys_sync_file_range), // 277 1030 LINXY(__NR_vmsplice, sys_vmsplice), // 278 1031 LINXY(__NR_move_pages, sys_move_pages), // 279 1032 1033 LINX_(__NR_utimensat, sys_utimensat), // 280 1034 LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 281 1035 LINXY(__NR_signalfd, sys_signalfd), // 282 1036 LINXY(__NR_timerfd_create, sys_timerfd_create), // 283 1037 LINXY(__NR_eventfd, sys_eventfd), // 284 1038 1039 LINX_(__NR_fallocate, sys_fallocate), // 285 1040 LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 286 1041 LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 287 1042 LINXY(__NR_accept4, sys_accept4), // 288 1043 LINXY(__NR_signalfd4, sys_signalfd4), // 289 1044 1045 LINXY(__NR_eventfd2, sys_eventfd2), // 290 1046 LINXY(__NR_epoll_create1, sys_epoll_create1), // 291 1047 LINXY(__NR_dup3, sys_dup3), // 292 1048 LINXY(__NR_pipe2, sys_pipe2), // 293 1049 LINXY(__NR_inotify_init1, sys_inotify_init1), // 294 1050 1051 LINXY(__NR_preadv, sys_preadv), // 295 1052 LINX_(__NR_pwritev, sys_pwritev), // 296 1053 LINXY(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo),// 297 1054 LINXY(__NR_perf_event_open, sys_perf_event_open), // 298 1055 LINXY(__NR_recvmmsg, sys_recvmmsg), // 299 1056 1057 LINXY(__NR_fanotify_init, sys_fanotify_init), // 300 1058 LINX_(__NR_fanotify_mark, sys_fanotify_mark), // 301 1059 LINXY(__NR_prlimit64, sys_prlimit64), // 302 1060 LINXY(__NR_name_to_handle_at, sys_name_to_handle_at),// 303 1061 LINXY(__NR_open_by_handle_at, sys_open_by_handle_at),// 304 1062 1063 LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 305 1064 LINX_(__NR_syncfs, sys_syncfs), // 306 1065 LINXY(__NR_sendmmsg, sys_sendmmsg), // 307 1066// LINX_(__NR_setns, sys_ni_syscall), // 308 1067 LINXY(__NR_getcpu, sys_getcpu), // 309 1068 1069 LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 310 1070 LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 311 1071 LINX_(__NR_kcmp, sys_kcmp), // 312 1072// LIN__(__NR_finit_module, sys_ni_syscall), // 313 1073// LIN__(__NR_sched_setattr, sys_ni_syscall), // 314 1074 1075// LIN__(__NR_sched_getattr, sys_ni_syscall), // 315 1076// LIN__(__NR_renameat2, sys_ni_syscall), // 316 1077// LIN__(__NR_seccomp, sys_ni_syscall), // 317 1078 LINXY(__NR_getrandom, sys_getrandom), // 318 1079 LINXY(__NR_memfd_create, sys_memfd_create) // 319 1080 1081// LIN__(__NR_kexec_file_load, sys_ni_syscall), // 320 1082// LIN__(__NR_bpf, sys_ni_syscall) // 321 1083}; 1084 1085SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno ) 1086{ 1087 const UInt syscall_table_size 1088 = sizeof(syscall_table) / sizeof(syscall_table[0]); 1089 1090 /* Is it in the contiguous initial section of the table? */ 1091 if (sysno < syscall_table_size) { 1092 SyscallTableEntry* sys = &syscall_table[sysno]; 1093 if (sys->before == NULL) 1094 return NULL; /* no entry */ 1095 else 1096 return sys; 1097 } 1098 1099 /* Can't find a wrapper */ 1100 return NULL; 1101} 1102 1103#endif // defined(VGP_amd64_linux) 1104 1105/*--------------------------------------------------------------------*/ 1106/*--- end ---*/ 1107/*--------------------------------------------------------------------*/ 1108