syswrap-x86-linux.c revision 5fc7da201dec005bec8d9a5e71581c2de8f9b5da
1 2/*--------------------------------------------------------------------*/ 3/*--- Platform-specific syscalls stuff. syswrap-x86-linux.c ---*/ 4/*--------------------------------------------------------------------*/ 5 6/* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2000-2008 Nicholas Nethercote 11 njn@valgrind.org 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 26 02111-1307, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29*/ 30 31/* TODO/FIXME jrs 20050207: assignments to the syscall return result 32 in interrupted_syscall() need to be reviewed. They don't seem 33 to assign the shadow state. 34*/ 35 36#include "pub_core_basics.h" 37#include "pub_core_vki.h" 38#include "pub_core_vkiscnums.h" 39#include "pub_core_threadstate.h" 40#include "pub_core_debuginfo.h" // VG_(di_notify_mmap) 41#include "pub_core_aspacemgr.h" 42#include "pub_core_debuglog.h" 43#include "pub_core_libcbase.h" 44#include "pub_core_libcassert.h" 45#include "pub_core_libcprint.h" 46#include "pub_core_libcproc.h" 47#include "pub_core_libcsignal.h" 48#include "pub_core_mallocfree.h" 49#include "pub_core_options.h" 50#include "pub_core_scheduler.h" 51#include "pub_core_sigframe.h" // For VG_(sigframe_destroy)() 52#include "pub_core_signals.h" 53#include "pub_core_syscall.h" 54#include "pub_core_syswrap.h" 55#include "pub_core_tooliface.h" 56#include "pub_core_stacks.h" // VG_(register_stack) 57 58#include "priv_types_n_macros.h" 59#include "priv_syswrap-generic.h" /* for decls of generic wrappers */ 60#include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */ 61#include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */ 62#include "priv_syswrap-main.h" 63 64 65/* --------------------------------------------------------------------- 66 clone() handling 67 ------------------------------------------------------------------ */ 68 69/* Call f(arg1), but first switch stacks, using 'stack' as the new 70 stack, and use 'retaddr' as f's return-to address. Also, clear all 71 the integer registers before entering f.*/ 72__attribute__((noreturn)) 73void ML_(call_on_new_stack_0_1) ( Addr stack, 74 Addr retaddr, 75 void (*f)(Word), 76 Word arg1 ); 77// 4(%esp) == stack 78// 8(%esp) == retaddr 79// 12(%esp) == f 80// 16(%esp) == arg1 81asm( 82".text\n" 83".globl vgModuleLocal_call_on_new_stack_0_1\n" 84"vgModuleLocal_call_on_new_stack_0_1:\n" 85" movl %esp, %esi\n" // remember old stack pointer 86" movl 4(%esi), %esp\n" // set stack 87" pushl 16(%esi)\n" // arg1 to stack 88" pushl 8(%esi)\n" // retaddr to stack 89" pushl 12(%esi)\n" // f to stack 90" movl $0, %eax\n" // zero all GP regs 91" movl $0, %ebx\n" 92" movl $0, %ecx\n" 93" movl $0, %edx\n" 94" movl $0, %esi\n" 95" movl $0, %edi\n" 96" movl $0, %ebp\n" 97" ret\n" // jump to f 98" ud2\n" // should never get here 99".previous\n" 100); 101 102 103/* 104 Perform a clone system call. clone is strange because it has 105 fork()-like return-twice semantics, so it needs special 106 handling here. 107 108 Upon entry, we have: 109 110 int (fn)(void*) in 0+FSZ(%esp) 111 void* child_stack in 4+FSZ(%esp) 112 int flags in 8+FSZ(%esp) 113 void* arg in 12+FSZ(%esp) 114 pid_t* child_tid in 16+FSZ(%esp) 115 pid_t* parent_tid in 20+FSZ(%esp) 116 void* tls_ptr in 24+FSZ(%esp) 117 118 System call requires: 119 120 int $__NR_clone in %eax 121 int flags in %ebx 122 void* child_stack in %ecx 123 pid_t* parent_tid in %edx 124 pid_t* child_tid in %edi 125 void* tls_ptr in %esi 126 127 Returns an Int encoded in the linux-x86 way, not a SysRes. 128 */ 129#define FSZ "4+4+4+4" /* frame size = retaddr+ebx+edi+esi */ 130#define __NR_CLONE VG_STRINGIFY(__NR_clone) 131#define __NR_EXIT VG_STRINGIFY(__NR_exit) 132 133extern 134Int do_syscall_clone_x86_linux ( Word (*fn)(void *), 135 void* stack, 136 Int flags, 137 void* arg, 138 Int* child_tid, 139 Int* parent_tid, 140 vki_modify_ldt_t * ); 141asm( 142".text\n" 143"do_syscall_clone_x86_linux:\n" 144" push %ebx\n" 145" push %edi\n" 146" push %esi\n" 147 148 /* set up child stack with function and arg */ 149" movl 4+"FSZ"(%esp), %ecx\n" /* syscall arg2: child stack */ 150" movl 12+"FSZ"(%esp), %ebx\n" /* fn arg */ 151" movl 0+"FSZ"(%esp), %eax\n" /* fn */ 152" lea -8(%ecx), %ecx\n" /* make space on stack */ 153" movl %ebx, 4(%ecx)\n" /* fn arg */ 154" movl %eax, 0(%ecx)\n" /* fn */ 155 156 /* get other args to clone */ 157" movl 8+"FSZ"(%esp), %ebx\n" /* syscall arg1: flags */ 158" movl 20+"FSZ"(%esp), %edx\n" /* syscall arg3: parent tid * */ 159" movl 16+"FSZ"(%esp), %edi\n" /* syscall arg5: child tid * */ 160" movl 24+"FSZ"(%esp), %esi\n" /* syscall arg4: tls_ptr * */ 161" movl $"__NR_CLONE", %eax\n" 162" int $0x80\n" /* clone() */ 163" testl %eax, %eax\n" /* child if retval == 0 */ 164" jnz 1f\n" 165 166 /* CHILD - call thread function */ 167" popl %eax\n" 168" call *%eax\n" /* call fn */ 169 170 /* exit with result */ 171" movl %eax, %ebx\n" /* arg1: return value from fn */ 172" movl $"__NR_EXIT", %eax\n" 173" int $0x80\n" 174 175 /* Hm, exit returned */ 176" ud2\n" 177 178"1:\n" /* PARENT or ERROR */ 179" pop %esi\n" 180" pop %edi\n" 181" pop %ebx\n" 182" ret\n" 183".previous\n" 184); 185 186#undef FSZ 187#undef __NR_CLONE 188#undef __NR_EXIT 189 190 191// forward declarations 192static void setup_child ( ThreadArchState*, ThreadArchState*, Bool ); 193static SysRes sys_set_thread_area ( ThreadId, vki_modify_ldt_t* ); 194 195/* 196 When a client clones, we need to keep track of the new thread. This means: 197 1. allocate a ThreadId+ThreadState+stack for the the thread 198 199 2. initialize the thread's new VCPU state 200 201 3. create the thread using the same args as the client requested, 202 but using the scheduler entrypoint for EIP, and a separate stack 203 for ESP. 204 */ 205static SysRes do_clone ( ThreadId ptid, 206 UInt flags, Addr esp, 207 Int* parent_tidptr, 208 Int* child_tidptr, 209 vki_modify_ldt_t *tlsinfo) 210{ 211 static const Bool debug = False; 212 213 ThreadId ctid = VG_(alloc_ThreadState)(); 214 ThreadState* ptst = VG_(get_ThreadState)(ptid); 215 ThreadState* ctst = VG_(get_ThreadState)(ctid); 216 UWord* stack; 217 NSegment const* seg; 218 SysRes res; 219 Int eax; 220 vki_sigset_t blockall, savedmask; 221 222 VG_(sigfillset)(&blockall); 223 224 vg_assert(VG_(is_running_thread)(ptid)); 225 vg_assert(VG_(is_valid_tid)(ctid)); 226 227 stack = (UWord*)ML_(allocstack)(ctid); 228 if (stack == NULL) { 229 res = VG_(mk_SysRes_Error)( VKI_ENOMEM ); 230 goto out; 231 } 232 233 /* Copy register state 234 235 Both parent and child return to the same place, and the code 236 following the clone syscall works out which is which, so we 237 don't need to worry about it. 238 239 The parent gets the child's new tid returned from clone, but the 240 child gets 0. 241 242 If the clone call specifies a NULL esp for the new thread, then 243 it actually gets a copy of the parent's esp. 244 */ 245 /* Note: the clone call done by the Quadrics Elan3 driver specifies 246 clone flags of 0xF00, and it seems to rely on the assumption 247 that the child inherits a copy of the parent's GDT. 248 setup_child takes care of setting that up. */ 249 setup_child( &ctst->arch, &ptst->arch, True ); 250 251 /* Make sys_clone appear to have returned Success(0) in the 252 child. */ 253 ctst->arch.vex.guest_EAX = 0; 254 255 if (esp != 0) 256 ctst->arch.vex.guest_ESP = esp; 257 258 ctst->os_state.parent = ptid; 259 260 /* inherit signal mask */ 261 ctst->sig_mask = ptst->sig_mask; 262 ctst->tmp_sig_mask = ptst->sig_mask; 263 264 /* We don't really know where the client stack is, because its 265 allocated by the client. The best we can do is look at the 266 memory mappings and try to derive some useful information. We 267 assume that esp starts near its highest possible value, and can 268 only go down to the start of the mmaped segment. */ 269 seg = VG_(am_find_nsegment)((Addr)esp); 270 if (seg && seg->kind != SkResvn) { 271 ctst->client_stack_highest_word = (Addr)VG_PGROUNDUP(esp); 272 ctst->client_stack_szB = ctst->client_stack_highest_word - seg->start; 273 274 VG_(register_stack)(seg->start, ctst->client_stack_highest_word); 275 276 if (debug) 277 VG_(printf)("tid %d: guessed client stack range %p-%p\n", 278 ctid, seg->start, VG_PGROUNDUP(esp)); 279 } else { 280 VG_(message)(Vg_UserMsg, "!? New thread %d starts with ESP(%p) unmapped\n", 281 ctid, esp); 282 ctst->client_stack_szB = 0; 283 } 284 285 /* Assume the clone will succeed, and tell any tool that wants to 286 know that this thread has come into existence. We cannot defer 287 it beyond this point because sys_set_thread_area, just below, 288 causes tCheck to assert by making references to the new ThreadId 289 if we don't state the new thread exists prior to that point. 290 If the clone fails, we'll send out a ll_exit notification for it 291 at the out: label below, to clean up. */ 292 VG_TRACK ( pre_thread_ll_create, ptid, ctid ); 293 294 if (flags & VKI_CLONE_SETTLS) { 295 if (debug) 296 VG_(printf)("clone child has SETTLS: tls info at %p: idx=%d " 297 "base=%p limit=%x; esp=%p fs=%x gs=%x\n", 298 tlsinfo, tlsinfo->entry_number, 299 tlsinfo->base_addr, tlsinfo->limit, 300 ptst->arch.vex.guest_ESP, 301 ctst->arch.vex.guest_FS, ctst->arch.vex.guest_GS); 302 res = sys_set_thread_area(ctid, tlsinfo); 303 if (res.isError) 304 goto out; 305 } 306 307 flags &= ~VKI_CLONE_SETTLS; 308 309 /* start the thread with everything blocked */ 310 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask); 311 312 /* Create the new thread */ 313 eax = do_syscall_clone_x86_linux( 314 ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid], 315 child_tidptr, parent_tidptr, NULL 316 ); 317 res = VG_(mk_SysRes_x86_linux)( eax ); 318 319 VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL); 320 321 out: 322 if (res.isError) { 323 /* clone failed */ 324 VG_(cleanup_thread)(&ctst->arch); 325 ctst->status = VgTs_Empty; 326 /* oops. Better tell the tool the thread exited in a hurry :-) */ 327 VG_TRACK( pre_thread_ll_exit, ctid ); 328 } 329 330 return res; 331} 332 333 334/* --------------------------------------------------------------------- 335 LDT/GDT simulation 336 ------------------------------------------------------------------ */ 337 338/* Details of the LDT simulation 339 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 340 341 When a program runs natively, the linux kernel allows each *thread* 342 in it to have its own LDT. Almost all programs never do this -- 343 it's wildly unportable, after all -- and so the kernel never 344 allocates the structure, which is just as well as an LDT occupies 345 64k of memory (8192 entries of size 8 bytes). 346 347 A thread may choose to modify its LDT entries, by doing the 348 __NR_modify_ldt syscall. In such a situation the kernel will then 349 allocate an LDT structure for it. Each LDT entry is basically a 350 (base, limit) pair. A virtual address in a specific segment is 351 translated to a linear address by adding the segment's base value. 352 In addition, the virtual address must not exceed the limit value. 353 354 To use an LDT entry, a thread loads one of the segment registers 355 (%cs, %ss, %ds, %es, %fs, %gs) with the index of the LDT entry (0 356 .. 8191) it wants to use. In fact, the required value is (index << 357 3) + 7, but that's not important right now. Any normal instruction 358 which includes an addressing mode can then be made relative to that 359 LDT entry by prefixing the insn with a so-called segment-override 360 prefix, a byte which indicates which of the 6 segment registers 361 holds the LDT index. 362 363 Now, a key constraint is that valgrind's address checks operate in 364 terms of linear addresses. So we have to explicitly translate 365 virtual addrs into linear addrs, and that means doing a complete 366 LDT simulation. 367 368 Calls to modify_ldt are intercepted. For each thread, we maintain 369 an LDT (with the same normally-never-allocated optimisation that 370 the kernel does). This is updated as expected via calls to 371 modify_ldt. 372 373 When a thread does an amode calculation involving a segment 374 override prefix, the relevant LDT entry for the thread is 375 consulted. It all works. 376 377 There is a conceptual problem, which appears when switching back to 378 native execution, either temporarily to pass syscalls to the 379 kernel, or permanently, when debugging V. Problem at such points 380 is that it's pretty pointless to copy the simulated machine's 381 segment registers to the real machine, because we'd also need to 382 copy the simulated LDT into the real one, and that's prohibitively 383 expensive. 384 385 Fortunately it looks like no syscalls rely on the segment regs or 386 LDT being correct, so we can get away with it. Apart from that the 387 simulation is pretty straightforward. All 6 segment registers are 388 tracked, although only %ds, %es, %fs and %gs are allowed as 389 prefixes. Perhaps it could be restricted even more than that -- I 390 am not sure what is and isn't allowed in user-mode. 391*/ 392 393/* Translate a struct modify_ldt_ldt_s to a VexGuestX86SegDescr, using 394 the Linux kernel's logic (cut-n-paste of code in 395 linux/kernel/ldt.c). */ 396 397static 398void translate_to_hw_format ( /* IN */ vki_modify_ldt_t* inn, 399 /* OUT */ VexGuestX86SegDescr* out, 400 Int oldmode ) 401{ 402 UInt entry_1, entry_2; 403 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 404 405 if (0) 406 VG_(printf)("translate_to_hw_format: base %p, limit %d\n", 407 inn->base_addr, inn->limit ); 408 409 /* Allow LDTs to be cleared by the user. */ 410 if (inn->base_addr == 0 && inn->limit == 0) { 411 if (oldmode || 412 (inn->contents == 0 && 413 inn->read_exec_only == 1 && 414 inn->seg_32bit == 0 && 415 inn->limit_in_pages == 0 && 416 inn->seg_not_present == 1 && 417 inn->useable == 0 )) { 418 entry_1 = 0; 419 entry_2 = 0; 420 goto install; 421 } 422 } 423 424 entry_1 = ((inn->base_addr & 0x0000ffff) << 16) | 425 (inn->limit & 0x0ffff); 426 entry_2 = (inn->base_addr & 0xff000000) | 427 ((inn->base_addr & 0x00ff0000) >> 16) | 428 (inn->limit & 0xf0000) | 429 ((inn->read_exec_only ^ 1) << 9) | 430 (inn->contents << 10) | 431 ((inn->seg_not_present ^ 1) << 15) | 432 (inn->seg_32bit << 22) | 433 (inn->limit_in_pages << 23) | 434 0x7000; 435 if (!oldmode) 436 entry_2 |= (inn->useable << 20); 437 438 /* Install the new entry ... */ 439 install: 440 out->LdtEnt.Words.word1 = entry_1; 441 out->LdtEnt.Words.word2 = entry_2; 442} 443 444/* Create a zeroed-out GDT. */ 445static VexGuestX86SegDescr* alloc_zeroed_x86_GDT ( void ) 446{ 447 Int nbytes = VEX_GUEST_X86_GDT_NENT * sizeof(VexGuestX86SegDescr); 448 return VG_(arena_calloc)(VG_AR_CORE, nbytes, 1); 449} 450 451/* Create a zeroed-out LDT. */ 452static VexGuestX86SegDescr* alloc_zeroed_x86_LDT ( void ) 453{ 454 Int nbytes = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr); 455 return VG_(arena_calloc)(VG_AR_CORE, nbytes, 1); 456} 457 458/* Free up an LDT or GDT allocated by the above fns. */ 459static void free_LDT_or_GDT ( VexGuestX86SegDescr* dt ) 460{ 461 vg_assert(dt); 462 VG_(arena_free)(VG_AR_CORE, (void*)dt); 463} 464 465/* Copy contents between two existing LDTs. */ 466static void copy_LDT_from_to ( VexGuestX86SegDescr* src, 467 VexGuestX86SegDescr* dst ) 468{ 469 Int i; 470 vg_assert(src); 471 vg_assert(dst); 472 for (i = 0; i < VEX_GUEST_X86_LDT_NENT; i++) 473 dst[i] = src[i]; 474} 475 476/* Copy contents between two existing GDTs. */ 477static void copy_GDT_from_to ( VexGuestX86SegDescr* src, 478 VexGuestX86SegDescr* dst ) 479{ 480 Int i; 481 vg_assert(src); 482 vg_assert(dst); 483 for (i = 0; i < VEX_GUEST_X86_GDT_NENT; i++) 484 dst[i] = src[i]; 485} 486 487/* Free this thread's DTs, if it has any. */ 488static void deallocate_LGDTs_for_thread ( VexGuestX86State* vex ) 489{ 490 vg_assert(sizeof(HWord) == sizeof(void*)); 491 492 if (0) 493 VG_(printf)("deallocate_LGDTs_for_thread: " 494 "ldt = 0x%x, gdt = 0x%x\n", 495 vex->guest_LDT, vex->guest_GDT ); 496 497 if (vex->guest_LDT != (HWord)NULL) { 498 free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_LDT ); 499 vex->guest_LDT = (HWord)NULL; 500 } 501 502 if (vex->guest_GDT != (HWord)NULL) { 503 free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_GDT ); 504 vex->guest_GDT = (HWord)NULL; 505 } 506} 507 508 509/* 510 * linux/kernel/ldt.c 511 * 512 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds 513 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 514 */ 515 516/* 517 * read_ldt() is not really atomic - this is not a problem since 518 * synchronization of reads and writes done to the LDT has to be 519 * assured by user-space anyway. Writes are atomic, to protect 520 * the security checks done on new descriptors. 521 */ 522static 523SysRes read_ldt ( ThreadId tid, UChar* ptr, UInt bytecount ) 524{ 525 SysRes res; 526 UInt i, size; 527 UChar* ldt; 528 529 if (0) 530 VG_(printf)("read_ldt: tid = %d, ptr = %p, bytecount = %d\n", 531 tid, ptr, bytecount ); 532 533 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 534 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 535 536 ldt = (Char*)(VG_(threads)[tid].arch.vex.guest_LDT); 537 res = VG_(mk_SysRes_Success)( 0 ); 538 if (ldt == NULL) 539 /* LDT not allocated, meaning all entries are null */ 540 goto out; 541 542 size = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr); 543 if (size > bytecount) 544 size = bytecount; 545 546 res = VG_(mk_SysRes_Success)( size ); 547 for (i = 0; i < size; i++) 548 ptr[i] = ldt[i]; 549 550 out: 551 return res; 552} 553 554 555static 556SysRes write_ldt ( ThreadId tid, void* ptr, UInt bytecount, Int oldmode ) 557{ 558 SysRes res; 559 VexGuestX86SegDescr* ldt; 560 vki_modify_ldt_t* ldt_info; 561 562 if (0) 563 VG_(printf)("write_ldt: tid = %d, ptr = %p, " 564 "bytecount = %d, oldmode = %d\n", 565 tid, ptr, bytecount, oldmode ); 566 567 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 568 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 569 570 ldt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_LDT; 571 ldt_info = (vki_modify_ldt_t*)ptr; 572 573 res = VG_(mk_SysRes_Error)( VKI_EINVAL ); 574 if (bytecount != sizeof(vki_modify_ldt_t)) 575 goto out; 576 577 res = VG_(mk_SysRes_Error)( VKI_EINVAL ); 578 if (ldt_info->entry_number >= VEX_GUEST_X86_LDT_NENT) 579 goto out; 580 if (ldt_info->contents == 3) { 581 if (oldmode) 582 goto out; 583 if (ldt_info->seg_not_present == 0) 584 goto out; 585 } 586 587 /* If this thread doesn't have an LDT, we'd better allocate it 588 now. */ 589 if (ldt == (HWord)NULL) { 590 ldt = alloc_zeroed_x86_LDT(); 591 VG_(threads)[tid].arch.vex.guest_LDT = (HWord)ldt; 592 } 593 594 /* Install the new entry ... */ 595 translate_to_hw_format ( ldt_info, &ldt[ldt_info->entry_number], oldmode ); 596 res = VG_(mk_SysRes_Success)( 0 ); 597 598 out: 599 return res; 600} 601 602 603static SysRes sys_modify_ldt ( ThreadId tid, 604 Int func, void* ptr, UInt bytecount ) 605{ 606 SysRes ret = VG_(mk_SysRes_Error)( VKI_ENOSYS ); 607 608 switch (func) { 609 case 0: 610 ret = read_ldt(tid, ptr, bytecount); 611 break; 612 case 1: 613 ret = write_ldt(tid, ptr, bytecount, 1); 614 break; 615 case 2: 616 VG_(unimplemented)("sys_modify_ldt: func == 2"); 617 /* god knows what this is about */ 618 /* ret = read_default_ldt(ptr, bytecount); */ 619 /*UNREACHED*/ 620 break; 621 case 0x11: 622 ret = write_ldt(tid, ptr, bytecount, 0); 623 break; 624 } 625 return ret; 626} 627 628 629static SysRes sys_set_thread_area ( ThreadId tid, vki_modify_ldt_t* info ) 630{ 631 Int idx; 632 VexGuestX86SegDescr* gdt; 633 634 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 635 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 636 637 if (info == NULL) 638 return VG_(mk_SysRes_Error)( VKI_EFAULT ); 639 640 gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT; 641 642 /* If the thread doesn't have a GDT, allocate it now. */ 643 if (!gdt) { 644 gdt = alloc_zeroed_x86_GDT(); 645 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt; 646 } 647 648 idx = info->entry_number; 649 650 if (idx == -1) { 651 /* Find and use the first free entry. */ 652 for (idx = 0; idx < VEX_GUEST_X86_GDT_NENT; idx++) { 653 if (gdt[idx].LdtEnt.Words.word1 == 0 654 && gdt[idx].LdtEnt.Words.word2 == 0) 655 break; 656 } 657 658 if (idx == VEX_GUEST_X86_GDT_NENT) 659 return VG_(mk_SysRes_Error)( VKI_ESRCH ); 660 } else if (idx < 0 || idx >= VEX_GUEST_X86_GDT_NENT) { 661 return VG_(mk_SysRes_Error)( VKI_EINVAL ); 662 } 663 664 translate_to_hw_format(info, &gdt[idx], 0); 665 666 VG_TRACK( pre_mem_write, Vg_CoreSysCall, tid, 667 "set_thread_area(info->entry)", 668 (Addr) & info->entry_number, sizeof(unsigned int) ); 669 info->entry_number = idx; 670 VG_TRACK( post_mem_write, Vg_CoreSysCall, tid, 671 (Addr) & info->entry_number, sizeof(unsigned int) ); 672 673 return VG_(mk_SysRes_Success)( 0 ); 674} 675 676 677static SysRes sys_get_thread_area ( ThreadId tid, vki_modify_ldt_t* info ) 678{ 679 Int idx; 680 VexGuestX86SegDescr* gdt; 681 682 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 683 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 684 685 if (info == NULL) 686 return VG_(mk_SysRes_Error)( VKI_EFAULT ); 687 688 idx = info->entry_number; 689 690 if (idx < 0 || idx >= VEX_GUEST_X86_GDT_NENT) 691 return VG_(mk_SysRes_Error)( VKI_EINVAL ); 692 693 gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT; 694 695 /* If the thread doesn't have a GDT, allocate it now. */ 696 if (!gdt) { 697 gdt = alloc_zeroed_x86_GDT(); 698 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt; 699 } 700 701 info->base_addr = ( gdt[idx].LdtEnt.Bits.BaseHi << 24 ) | 702 ( gdt[idx].LdtEnt.Bits.BaseMid << 16 ) | 703 gdt[idx].LdtEnt.Bits.BaseLow; 704 info->limit = ( gdt[idx].LdtEnt.Bits.LimitHi << 16 ) | 705 gdt[idx].LdtEnt.Bits.LimitLow; 706 info->seg_32bit = gdt[idx].LdtEnt.Bits.Default_Big; 707 info->contents = ( gdt[idx].LdtEnt.Bits.Type >> 2 ) & 0x3; 708 info->read_exec_only = ( gdt[idx].LdtEnt.Bits.Type & 0x1 ) ^ 0x1; 709 info->limit_in_pages = gdt[idx].LdtEnt.Bits.Granularity; 710 info->seg_not_present = gdt[idx].LdtEnt.Bits.Pres ^ 0x1; 711 info->useable = gdt[idx].LdtEnt.Bits.Sys; 712 info->reserved = 0; 713 714 return VG_(mk_SysRes_Success)( 0 ); 715} 716 717/* --------------------------------------------------------------------- 718 More thread stuff 719 ------------------------------------------------------------------ */ 720 721void VG_(cleanup_thread) ( ThreadArchState* arch ) 722{ 723 /* Release arch-specific resources held by this thread. */ 724 /* On x86, we have to dump the LDT and GDT. */ 725 deallocate_LGDTs_for_thread( &arch->vex ); 726} 727 728 729static void setup_child ( /*OUT*/ ThreadArchState *child, 730 /*IN*/ ThreadArchState *parent, 731 Bool inherit_parents_GDT ) 732{ 733 /* We inherit our parent's guest state. */ 734 child->vex = parent->vex; 735 child->vex_shadow = parent->vex_shadow; 736 737 /* We inherit our parent's LDT. */ 738 if (parent->vex.guest_LDT == (HWord)NULL) { 739 /* We hope this is the common case. */ 740 child->vex.guest_LDT = (HWord)NULL; 741 } else { 742 /* No luck .. we have to take a copy of the parent's. */ 743 child->vex.guest_LDT = (HWord)alloc_zeroed_x86_LDT(); 744 copy_LDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_LDT, 745 (VexGuestX86SegDescr*)child->vex.guest_LDT ); 746 } 747 748 /* Either we start with an empty GDT (the usual case) or inherit a 749 copy of our parents' one (Quadrics Elan3 driver -style clone 750 only). */ 751 child->vex.guest_GDT = (HWord)NULL; 752 753 if (inherit_parents_GDT && parent->vex.guest_GDT != (HWord)NULL) { 754 child->vex.guest_GDT = (HWord)alloc_zeroed_x86_GDT(); 755 copy_GDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_GDT, 756 (VexGuestX86SegDescr*)child->vex.guest_GDT ); 757 } 758} 759 760 761/* --------------------------------------------------------------------- 762 PRE/POST wrappers for x86/Linux-specific syscalls 763 ------------------------------------------------------------------ */ 764 765#define PRE(name) DEFN_PRE_TEMPLATE(x86_linux, name) 766#define POST(name) DEFN_POST_TEMPLATE(x86_linux, name) 767 768/* Add prototypes for the wrappers declared here, so that gcc doesn't 769 harass us for not having prototypes. Really this is a kludge -- 770 the right thing to do is to make these wrappers 'static' since they 771 aren't visible outside this file, but that requires even more macro 772 magic. */ 773DECL_TEMPLATE(x86_linux, sys_socketcall); 774DECL_TEMPLATE(x86_linux, sys_stat64); 775DECL_TEMPLATE(x86_linux, sys_fstatat64); 776DECL_TEMPLATE(x86_linux, sys_fstat64); 777DECL_TEMPLATE(x86_linux, sys_lstat64); 778DECL_TEMPLATE(x86_linux, sys_clone); 779DECL_TEMPLATE(x86_linux, old_mmap); 780DECL_TEMPLATE(x86_linux, sys_mmap2); 781DECL_TEMPLATE(x86_linux, sys_sigreturn); 782DECL_TEMPLATE(x86_linux, sys_ipc); 783DECL_TEMPLATE(x86_linux, sys_rt_sigreturn); 784DECL_TEMPLATE(x86_linux, sys_modify_ldt); 785DECL_TEMPLATE(x86_linux, sys_set_thread_area); 786DECL_TEMPLATE(x86_linux, sys_get_thread_area); 787DECL_TEMPLATE(x86_linux, sys_ptrace); 788DECL_TEMPLATE(x86_linux, sys_sigaction); 789DECL_TEMPLATE(x86_linux, sys_sigsuspend); 790DECL_TEMPLATE(x86_linux, old_select); 791DECL_TEMPLATE(x86_linux, sys_vm86old); 792DECL_TEMPLATE(x86_linux, sys_vm86); 793DECL_TEMPLATE(x86_linux, sys_syscall223); 794 795PRE(old_select) 796{ 797 /* struct sel_arg_struct { 798 unsigned long n; 799 fd_set *inp, *outp, *exp; 800 struct timeval *tvp; 801 }; 802 */ 803 PRE_REG_READ1(long, "old_select", struct sel_arg_struct *, args); 804 PRE_MEM_READ( "old_select(args)", ARG1, 5*sizeof(UWord) ); 805 *flags |= SfMayBlock; 806 { 807 UInt* arg_struct = (UInt*)ARG1; 808 UInt a1, a2, a3, a4, a5; 809 810 a1 = arg_struct[0]; 811 a2 = arg_struct[1]; 812 a3 = arg_struct[2]; 813 a4 = arg_struct[3]; 814 a5 = arg_struct[4]; 815 816 PRINT("old_select ( %d, %p, %p, %p, %p )", a1,a2,a3,a4,a5); 817 if (a2 != (Addr)NULL) 818 PRE_MEM_READ( "old_select(readfds)", a2, a1/8 /* __FD_SETSIZE/8 */ ); 819 if (a3 != (Addr)NULL) 820 PRE_MEM_READ( "old_select(writefds)", a3, a1/8 /* __FD_SETSIZE/8 */ ); 821 if (a4 != (Addr)NULL) 822 PRE_MEM_READ( "old_select(exceptfds)", a4, a1/8 /* __FD_SETSIZE/8 */ ); 823 if (a5 != (Addr)NULL) 824 PRE_MEM_READ( "old_select(timeout)", a5, sizeof(struct vki_timeval) ); 825 } 826} 827 828PRE(sys_clone) 829{ 830 UInt cloneflags; 831 832 PRINT("sys_clone ( %x, %p, %p, %p, %p )",ARG1,ARG2,ARG3,ARG4,ARG5); 833 PRE_REG_READ5(int, "clone", 834 unsigned long, flags, 835 void *, child_stack, 836 int *, parent_tidptr, 837 vki_modify_ldt_t *, tlsinfo, 838 int *, child_tidptr); 839 840 if (ARG1 & VKI_CLONE_PARENT_SETTID) { 841 PRE_MEM_WRITE("clone(parent_tidptr)", ARG3, sizeof(Int)); 842 if (!VG_(am_is_valid_for_client)(ARG3, sizeof(Int), 843 VKI_PROT_WRITE)) { 844 SET_STATUS_Failure( VKI_EFAULT ); 845 return; 846 } 847 } 848 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) { 849 PRE_MEM_WRITE("clone(child_tidptr)", ARG5, sizeof(Int)); 850 if (!VG_(am_is_valid_for_client)(ARG5, sizeof(Int), 851 VKI_PROT_WRITE)) { 852 SET_STATUS_Failure( VKI_EFAULT ); 853 return; 854 } 855 } 856 if (ARG1 & VKI_CLONE_SETTLS) { 857 PRE_MEM_READ("clone(tls_user_desc)", ARG4, sizeof(vki_modify_ldt_t)); 858 if (!VG_(am_is_valid_for_client)(ARG4, sizeof(vki_modify_ldt_t), 859 VKI_PROT_READ)) { 860 SET_STATUS_Failure( VKI_EFAULT ); 861 return; 862 } 863 } 864 865 cloneflags = ARG1; 866 867 if (!ML_(client_signal_OK)(ARG1 & VKI_CSIGNAL)) { 868 SET_STATUS_Failure( VKI_EINVAL ); 869 return; 870 } 871 872 /* Be ultra-paranoid and filter out any clone-variants we don't understand: 873 - ??? specifies clone flags of 0x100011 874 - ??? specifies clone flags of 0x1200011. 875 - NPTL specifies clone flags of 0x7D0F00. 876 - The Quadrics Elan3 driver specifies clone flags of 0xF00. 877 - Newer Quadrics Elan3 drivers with NTPL support specify 0x410F00. 878 Everything else is rejected. 879 */ 880 if ( 881 1 || 882 /* 11 Nov 05: for the time being, disable this ultra-paranoia. 883 The switch below probably does a good enough job. */ 884 (cloneflags == 0x100011 || cloneflags == 0x1200011 885 || cloneflags == 0x7D0F00 886 || cloneflags == 0x790F00 887 || cloneflags == 0x3D0F00 888 || cloneflags == 0x410F00 889 || cloneflags == 0xF00 890 || cloneflags == 0xF21)) { 891 /* OK */ 892 } 893 else { 894 /* Nah. We don't like it. Go away. */ 895 goto reject; 896 } 897 898 /* Only look at the flags we really care about */ 899 switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS 900 | VKI_CLONE_FILES | VKI_CLONE_VFORK)) { 901 case VKI_CLONE_VM | VKI_CLONE_FS | VKI_CLONE_FILES: 902 /* thread creation */ 903 SET_STATUS_from_SysRes( 904 do_clone(tid, 905 ARG1, /* flags */ 906 (Addr)ARG2, /* child ESP */ 907 (Int *)ARG3, /* parent_tidptr */ 908 (Int *)ARG5, /* child_tidptr */ 909 (vki_modify_ldt_t *)ARG4)); /* set_tls */ 910 break; 911 912 case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */ 913 /* FALLTHROUGH - assume vfork == fork */ 914 cloneflags &= ~(VKI_CLONE_VFORK | VKI_CLONE_VM); 915 916 case 0: /* plain fork */ 917 SET_STATUS_from_SysRes( 918 ML_(do_fork_clone)(tid, 919 cloneflags, /* flags */ 920 (Int *)ARG3, /* parent_tidptr */ 921 (Int *)ARG5)); /* child_tidptr */ 922 break; 923 924 default: 925 reject: 926 /* should we just ENOSYS? */ 927 VG_(message)(Vg_UserMsg, ""); 928 VG_(message)(Vg_UserMsg, "Unsupported clone() flags: 0x%x", ARG1); 929 VG_(message)(Vg_UserMsg, ""); 930 VG_(message)(Vg_UserMsg, "The only supported clone() uses are:"); 931 VG_(message)(Vg_UserMsg, " - via a threads library (LinuxThreads or NPTL)"); 932 VG_(message)(Vg_UserMsg, " - via the implementation of fork or vfork"); 933 VG_(message)(Vg_UserMsg, " - for the Quadrics Elan3 user-space driver"); 934 VG_(unimplemented) 935 ("Valgrind does not support general clone()."); 936 } 937 938 if (SUCCESS) { 939 if (ARG1 & VKI_CLONE_PARENT_SETTID) 940 POST_MEM_WRITE(ARG3, sizeof(Int)); 941 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) 942 POST_MEM_WRITE(ARG5, sizeof(Int)); 943 944 /* Thread creation was successful; let the child have the chance 945 to run */ 946 *flags |= SfYieldAfter; 947 } 948} 949 950PRE(sys_sigreturn) 951{ 952 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for 953 an explanation of what follows. */ 954 955 ThreadState* tst; 956 PRINT("sys_sigreturn ( )"); 957 958 vg_assert(VG_(is_valid_tid)(tid)); 959 vg_assert(tid >= 1 && tid < VG_N_THREADS); 960 vg_assert(VG_(is_running_thread)(tid)); 961 962 /* Adjust esp to point to start of frame; skip back up over 963 sigreturn sequence's "popl %eax" and handler ret addr */ 964 tst = VG_(get_ThreadState)(tid); 965 tst->arch.vex.guest_ESP -= sizeof(Addr)+sizeof(Word); 966 /* XXX why does ESP change differ from rt_sigreturn case below? */ 967 968 /* This is only so that the EIP is (might be) useful to report if 969 something goes wrong in the sigreturn */ 970 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch); 971 972 /* Restore register state from frame and remove it */ 973 VG_(sigframe_destroy)(tid, False); 974 975 /* Tell the driver not to update the guest state with the "result", 976 and set a bogus result to keep it happy. */ 977 *flags |= SfNoWriteResult; 978 SET_STATUS_Success(0); 979 980 /* Check to see if any signals arose as a result of this. */ 981 *flags |= SfPollAfter; 982} 983 984PRE(sys_rt_sigreturn) 985{ 986 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for 987 an explanation of what follows. */ 988 989 ThreadState* tst; 990 PRINT("sys_rt_sigreturn ( )"); 991 992 vg_assert(VG_(is_valid_tid)(tid)); 993 vg_assert(tid >= 1 && tid < VG_N_THREADS); 994 vg_assert(VG_(is_running_thread)(tid)); 995 996 /* Adjust esp to point to start of frame; skip back up over handler 997 ret addr */ 998 tst = VG_(get_ThreadState)(tid); 999 tst->arch.vex.guest_ESP -= sizeof(Addr); 1000 /* XXX why does ESP change differ from sigreturn case above? */ 1001 1002 /* This is only so that the EIP is (might be) useful to report if 1003 something goes wrong in the sigreturn */ 1004 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch); 1005 1006 /* Restore register state from frame and remove it */ 1007 VG_(sigframe_destroy)(tid, True); 1008 1009 /* Tell the driver not to update the guest state with the "result", 1010 and set a bogus result to keep it happy. */ 1011 *flags |= SfNoWriteResult; 1012 SET_STATUS_Success(0); 1013 1014 /* Check to see if any signals arose as a result of this. */ 1015 *flags |= SfPollAfter; 1016} 1017 1018PRE(sys_modify_ldt) 1019{ 1020 PRINT("sys_modify_ldt ( %d, %p, %d )", ARG1,ARG2,ARG3); 1021 PRE_REG_READ3(int, "modify_ldt", int, func, void *, ptr, 1022 unsigned long, bytecount); 1023 1024 if (ARG1 == 0) { 1025 /* read the LDT into ptr */ 1026 PRE_MEM_WRITE( "modify_ldt(ptr)", ARG2, ARG3 ); 1027 } 1028 if (ARG1 == 1 || ARG1 == 0x11) { 1029 /* write the LDT with the entry pointed at by ptr */ 1030 PRE_MEM_READ( "modify_ldt(ptr)", ARG2, sizeof(vki_modify_ldt_t) ); 1031 } 1032 /* "do" the syscall ourselves; the kernel never sees it */ 1033 SET_STATUS_from_SysRes( sys_modify_ldt( tid, ARG1, (void*)ARG2, ARG3 ) ); 1034 1035 if (ARG1 == 0 && SUCCESS && RES > 0) { 1036 POST_MEM_WRITE( ARG2, RES ); 1037 } 1038} 1039 1040PRE(sys_set_thread_area) 1041{ 1042 PRINT("sys_set_thread_area ( %p )", ARG1); 1043 PRE_REG_READ1(int, "set_thread_area", struct user_desc *, u_info) 1044 PRE_MEM_READ( "set_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) ); 1045 1046 /* "do" the syscall ourselves; the kernel never sees it */ 1047 SET_STATUS_from_SysRes( sys_set_thread_area( tid, (void *)ARG1 ) ); 1048} 1049 1050PRE(sys_get_thread_area) 1051{ 1052 PRINT("sys_get_thread_area ( %p )", ARG1); 1053 PRE_REG_READ1(int, "get_thread_area", struct user_desc *, u_info) 1054 PRE_MEM_WRITE( "get_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) ); 1055 1056 /* "do" the syscall ourselves; the kernel never sees it */ 1057 SET_STATUS_from_SysRes( sys_get_thread_area( tid, (void *)ARG1 ) ); 1058 1059 if (SUCCESS) { 1060 POST_MEM_WRITE( ARG1, sizeof(vki_modify_ldt_t) ); 1061 } 1062} 1063 1064// Parts of this are x86-specific, but the *PEEK* cases are generic. 1065// 1066// ARG3 is only used for pointers into the traced process's address 1067// space and for offsets into the traced process's struct 1068// user_regs_struct. It is never a pointer into this process's memory 1069// space, and we should therefore not check anything it points to. 1070PRE(sys_ptrace) 1071{ 1072 PRINT("sys_ptrace ( %d, %d, %p, %p )", ARG1,ARG2,ARG3,ARG4); 1073 PRE_REG_READ4(int, "ptrace", 1074 long, request, long, pid, long, addr, long, data); 1075 switch (ARG1) { 1076 case VKI_PTRACE_PEEKTEXT: 1077 case VKI_PTRACE_PEEKDATA: 1078 case VKI_PTRACE_PEEKUSR: 1079 PRE_MEM_WRITE( "ptrace(peek)", ARG4, 1080 sizeof (long)); 1081 break; 1082 case VKI_PTRACE_GETREGS: 1083 PRE_MEM_WRITE( "ptrace(getregs)", ARG4, 1084 sizeof (struct vki_user_regs_struct)); 1085 break; 1086 case VKI_PTRACE_GETFPREGS: 1087 PRE_MEM_WRITE( "ptrace(getfpregs)", ARG4, 1088 sizeof (struct vki_user_i387_struct)); 1089 break; 1090 case VKI_PTRACE_GETFPXREGS: 1091 PRE_MEM_WRITE( "ptrace(getfpxregs)", ARG4, 1092 sizeof(struct vki_user_fxsr_struct) ); 1093 break; 1094 case VKI_PTRACE_SETREGS: 1095 PRE_MEM_READ( "ptrace(setregs)", ARG4, 1096 sizeof (struct vki_user_regs_struct)); 1097 break; 1098 case VKI_PTRACE_SETFPREGS: 1099 PRE_MEM_READ( "ptrace(setfpregs)", ARG4, 1100 sizeof (struct vki_user_i387_struct)); 1101 break; 1102 case VKI_PTRACE_SETFPXREGS: 1103 PRE_MEM_READ( "ptrace(setfpxregs)", ARG4, 1104 sizeof(struct vki_user_fxsr_struct) ); 1105 break; 1106 case VKI_PTRACE_GETEVENTMSG: 1107 PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long)); 1108 break; 1109 case VKI_PTRACE_GETSIGINFO: 1110 PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t)); 1111 break; 1112 case VKI_PTRACE_SETSIGINFO: 1113 PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t)); 1114 break; 1115 default: 1116 break; 1117 } 1118} 1119 1120POST(sys_ptrace) 1121{ 1122 switch (ARG1) { 1123 case VKI_PTRACE_PEEKTEXT: 1124 case VKI_PTRACE_PEEKDATA: 1125 case VKI_PTRACE_PEEKUSR: 1126 POST_MEM_WRITE( ARG4, sizeof (long)); 1127 break; 1128 case VKI_PTRACE_GETREGS: 1129 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_regs_struct)); 1130 break; 1131 case VKI_PTRACE_GETFPREGS: 1132 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_i387_struct)); 1133 break; 1134 case VKI_PTRACE_GETFPXREGS: 1135 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_fxsr_struct) ); 1136 break; 1137 case VKI_PTRACE_GETEVENTMSG: 1138 POST_MEM_WRITE( ARG4, sizeof(unsigned long)); 1139 break; 1140 case VKI_PTRACE_GETSIGINFO: 1141 /* XXX: This is a simplification. Different parts of the 1142 * siginfo_t are valid depending on the type of signal. 1143 */ 1144 POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t)); 1145 break; 1146 default: 1147 break; 1148 } 1149} 1150 1151static Addr deref_Addr ( ThreadId tid, Addr a, Char* s ) 1152{ 1153 Addr* a_p = (Addr*)a; 1154 PRE_MEM_READ( s, (Addr)a_p, sizeof(Addr) ); 1155 return *a_p; 1156} 1157 1158PRE(sys_ipc) 1159{ 1160 PRINT("sys_ipc ( %d, %d, %d, %d, %p, %d )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6); 1161 // XXX: this is simplistic -- some args are not used in all circumstances. 1162 PRE_REG_READ6(int, "ipc", 1163 vki_uint, call, int, first, int, second, int, third, 1164 void *, ptr, long, fifth) 1165 1166 switch (ARG1 /* call */) { 1167 case VKI_SEMOP: 1168 ML_(generic_PRE_sys_semop)( tid, ARG2, ARG5, ARG3 ); 1169 *flags |= SfMayBlock; 1170 break; 1171 case VKI_SEMGET: 1172 break; 1173 case VKI_SEMCTL: 1174 { 1175 UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" ); 1176 ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg ); 1177 break; 1178 } 1179 case VKI_SEMTIMEDOP: 1180 ML_(generic_PRE_sys_semtimedop)( tid, ARG2, ARG5, ARG3, ARG6 ); 1181 *flags |= SfMayBlock; 1182 break; 1183 case VKI_MSGSND: 1184 ML_(linux_PRE_sys_msgsnd)( tid, ARG2, ARG5, ARG3, ARG4 ); 1185 if ((ARG4 & VKI_IPC_NOWAIT) == 0) 1186 *flags |= SfMayBlock; 1187 break; 1188 case VKI_MSGRCV: 1189 { 1190 Addr msgp; 1191 Word msgtyp; 1192 1193 msgp = deref_Addr( tid, 1194 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp), 1195 "msgrcv(msgp)" ); 1196 msgtyp = deref_Addr( tid, 1197 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp), 1198 "msgrcv(msgp)" ); 1199 1200 ML_(linux_PRE_sys_msgrcv)( tid, ARG2, msgp, ARG3, msgtyp, ARG4 ); 1201 1202 if ((ARG4 & VKI_IPC_NOWAIT) == 0) 1203 *flags |= SfMayBlock; 1204 break; 1205 } 1206 case VKI_MSGGET: 1207 break; 1208 case VKI_MSGCTL: 1209 ML_(linux_PRE_sys_msgctl)( tid, ARG2, ARG3, ARG5 ); 1210 break; 1211 case VKI_SHMAT: 1212 { 1213 UWord w; 1214 PRE_MEM_WRITE( "shmat(raddr)", ARG4, sizeof(Addr) ); 1215 w = ML_(generic_PRE_sys_shmat)( tid, ARG2, ARG5, ARG3 ); 1216 if (w == 0) 1217 SET_STATUS_Failure( VKI_EINVAL ); 1218 else 1219 ARG5 = w; 1220 break; 1221 } 1222 case VKI_SHMDT: 1223 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG5)) 1224 SET_STATUS_Failure( VKI_EINVAL ); 1225 break; 1226 case VKI_SHMGET: 1227 break; 1228 case VKI_SHMCTL: /* IPCOP_shmctl */ 1229 ML_(generic_PRE_sys_shmctl)( tid, ARG2, ARG3, ARG5 ); 1230 break; 1231 default: 1232 VG_(message)(Vg_DebugMsg, "FATAL: unhandled syscall(ipc) %d", ARG1 ); 1233 VG_(core_panic)("... bye!\n"); 1234 break; /*NOTREACHED*/ 1235 } 1236} 1237 1238POST(sys_ipc) 1239{ 1240 vg_assert(SUCCESS); 1241 switch (ARG1 /* call */) { 1242 case VKI_SEMOP: 1243 case VKI_SEMGET: 1244 break; 1245 case VKI_SEMCTL: 1246 { 1247 UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" ); 1248 ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg ); 1249 break; 1250 } 1251 case VKI_SEMTIMEDOP: 1252 case VKI_MSGSND: 1253 break; 1254 case VKI_MSGRCV: 1255 { 1256 Addr msgp; 1257 Word msgtyp; 1258 1259 msgp = deref_Addr( tid, 1260 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp), 1261 "msgrcv(msgp)" ); 1262 msgtyp = deref_Addr( tid, 1263 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp), 1264 "msgrcv(msgp)" ); 1265 1266 ML_(linux_POST_sys_msgrcv)( tid, RES, ARG2, msgp, ARG3, msgtyp, ARG4 ); 1267 break; 1268 } 1269 case VKI_MSGGET: 1270 break; 1271 case VKI_MSGCTL: 1272 ML_(linux_POST_sys_msgctl)( tid, RES, ARG2, ARG3, ARG5 ); 1273 break; 1274 case VKI_SHMAT: 1275 { 1276 Addr addr; 1277 1278 /* force readability. before the syscall it is 1279 * indeed uninitialized, as can be seen in 1280 * glibc/sysdeps/unix/sysv/linux/shmat.c */ 1281 POST_MEM_WRITE( ARG4, sizeof( Addr ) ); 1282 1283 addr = deref_Addr ( tid, ARG4, "shmat(addr)" ); 1284 ML_(generic_POST_sys_shmat)( tid, addr, ARG2, ARG5, ARG3 ); 1285 break; 1286 } 1287 case VKI_SHMDT: 1288 ML_(generic_POST_sys_shmdt)( tid, RES, ARG5 ); 1289 break; 1290 case VKI_SHMGET: 1291 break; 1292 case VKI_SHMCTL: 1293 ML_(generic_POST_sys_shmctl)( tid, RES, ARG2, ARG3, ARG5 ); 1294 break; 1295 default: 1296 VG_(message)(Vg_DebugMsg, 1297 "FATAL: unhandled syscall(ipc) %d", 1298 ARG1 ); 1299 VG_(core_panic)("... bye!\n"); 1300 break; /*NOTREACHED*/ 1301 } 1302} 1303 1304PRE(old_mmap) 1305{ 1306 /* struct mmap_arg_struct { 1307 unsigned long addr; 1308 unsigned long len; 1309 unsigned long prot; 1310 unsigned long flags; 1311 unsigned long fd; 1312 unsigned long offset; 1313 }; */ 1314 UWord a1, a2, a3, a4, a5, a6; 1315 SysRes r; 1316 1317 UWord* args = (UWord*)ARG1; 1318 PRE_REG_READ1(long, "old_mmap", struct mmap_arg_struct *, args); 1319 PRE_MEM_READ( "old_mmap(args)", (Addr)args, 6*sizeof(UWord) ); 1320 1321 a1 = args[1-1]; 1322 a2 = args[2-1]; 1323 a3 = args[3-1]; 1324 a4 = args[4-1]; 1325 a5 = args[5-1]; 1326 a6 = args[6-1]; 1327 1328 PRINT("old_mmap ( %p, %llu, %d, %d, %d, %d )", 1329 a1, (ULong)a2, a3, a4, a5, a6 ); 1330 1331 r = ML_(generic_PRE_sys_mmap)( tid, a1, a2, a3, a4, a5, (Off64T)a6 ); 1332 SET_STATUS_from_SysRes(r); 1333} 1334 1335PRE(sys_mmap2) 1336{ 1337 SysRes r; 1338 1339 // Exactly like old_mmap() except: 1340 // - all 6 args are passed in regs, rather than in a memory-block. 1341 // - the file offset is specified in pagesize units rather than bytes, 1342 // so that it can be used for files bigger than 2^32 bytes. 1343 // pagesize or 4K-size units in offset? For ppc32/64-linux, this is 1344 // 4K-sized. Assert that the page size is 4K here for safety. 1345 vg_assert(VKI_PAGE_SIZE == 4096); 1346 PRINT("sys_mmap2 ( %p, %llu, %d, %d, %d, %d )", 1347 ARG1, (ULong)ARG2, ARG3, ARG4, ARG5, ARG6 ); 1348 PRE_REG_READ6(long, "mmap2", 1349 unsigned long, start, unsigned long, length, 1350 unsigned long, prot, unsigned long, flags, 1351 unsigned long, fd, unsigned long, offset); 1352 1353 r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, 1354 4096 * (Off64T)ARG6 ); 1355 SET_STATUS_from_SysRes(r); 1356} 1357 1358// XXX: lstat64/fstat64/stat64 are generic, but not necessarily 1359// applicable to every architecture -- I think only to 32-bit archs. 1360// We're going to need something like linux/core_os32.h for such 1361// things, eventually, I think. --njn 1362PRE(sys_lstat64) 1363{ 1364 PRINT("sys_lstat64 ( %p(%s), %p )",ARG1,ARG1,ARG2); 1365 PRE_REG_READ2(long, "lstat64", char *, file_name, struct stat64 *, buf); 1366 PRE_MEM_RASCIIZ( "lstat64(file_name)", ARG1 ); 1367 PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) ); 1368} 1369 1370POST(sys_lstat64) 1371{ 1372 vg_assert(SUCCESS); 1373 if (RES == 0) { 1374 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); 1375 } 1376} 1377 1378PRE(sys_stat64) 1379{ 1380 PRINT("sys_stat64 ( %p(%s), %p )",ARG1,ARG1,ARG2); 1381 PRE_REG_READ2(long, "stat64", char *, file_name, struct stat64 *, buf); 1382 PRE_MEM_RASCIIZ( "stat64(file_name)", ARG1 ); 1383 PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) ); 1384} 1385 1386POST(sys_stat64) 1387{ 1388 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); 1389} 1390 1391PRE(sys_fstatat64) 1392{ 1393 PRINT("sys_fstatat64 ( %d, %p(%s), %p )",ARG1,ARG2,ARG2,ARG3); 1394 PRE_REG_READ3(long, "fstatat64", 1395 int, dfd, char *, file_name, struct stat64 *, buf); 1396 PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 ); 1397 PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) ); 1398} 1399 1400POST(sys_fstatat64) 1401{ 1402 POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) ); 1403} 1404 1405PRE(sys_fstat64) 1406{ 1407 PRINT("sys_fstat64 ( %d, %p )",ARG1,ARG2); 1408 PRE_REG_READ2(long, "fstat64", unsigned long, fd, struct stat64 *, buf); 1409 PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) ); 1410} 1411 1412POST(sys_fstat64) 1413{ 1414 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); 1415} 1416 1417PRE(sys_socketcall) 1418{ 1419# define ARG2_0 (((UWord*)ARG2)[0]) 1420# define ARG2_1 (((UWord*)ARG2)[1]) 1421# define ARG2_2 (((UWord*)ARG2)[2]) 1422# define ARG2_3 (((UWord*)ARG2)[3]) 1423# define ARG2_4 (((UWord*)ARG2)[4]) 1424# define ARG2_5 (((UWord*)ARG2)[5]) 1425 1426 *flags |= SfMayBlock; 1427 PRINT("sys_socketcall ( %d, %p )",ARG1,ARG2); 1428 PRE_REG_READ2(long, "socketcall", int, call, unsigned long *, args); 1429 1430 switch (ARG1 /* request */) { 1431 1432 case VKI_SYS_SOCKETPAIR: 1433 /* int socketpair(int d, int type, int protocol, int sv[2]); */ 1434 PRE_MEM_READ( "socketcall.socketpair(args)", ARG2, 4*sizeof(Addr) ); 1435 ML_(generic_PRE_sys_socketpair)( tid, ARG2_0, ARG2_1, ARG2_2, ARG2_3 ); 1436 break; 1437 1438 case VKI_SYS_SOCKET: 1439 /* int socket(int domain, int type, int protocol); */ 1440 PRE_MEM_READ( "socketcall.socket(args)", ARG2, 3*sizeof(Addr) ); 1441 break; 1442 1443 case VKI_SYS_BIND: 1444 /* int bind(int sockfd, struct sockaddr *my_addr, 1445 int addrlen); */ 1446 PRE_MEM_READ( "socketcall.bind(args)", ARG2, 3*sizeof(Addr) ); 1447 ML_(generic_PRE_sys_bind)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1448 break; 1449 1450 case VKI_SYS_LISTEN: 1451 /* int listen(int s, int backlog); */ 1452 PRE_MEM_READ( "socketcall.listen(args)", ARG2, 2*sizeof(Addr) ); 1453 break; 1454 1455 case VKI_SYS_ACCEPT: { 1456 /* int accept(int s, struct sockaddr *addr, int *addrlen); */ 1457 PRE_MEM_READ( "socketcall.accept(args)", ARG2, 3*sizeof(Addr) ); 1458 ML_(generic_PRE_sys_accept)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1459 break; 1460 } 1461 1462 case VKI_SYS_SENDTO: 1463 /* int sendto(int s, const void *msg, int len, 1464 unsigned int flags, 1465 const struct sockaddr *to, int tolen); */ 1466 PRE_MEM_READ( "socketcall.sendto(args)", ARG2, 6*sizeof(Addr) ); 1467 ML_(generic_PRE_sys_sendto)( tid, ARG2_0, ARG2_1, ARG2_2, 1468 ARG2_3, ARG2_4, ARG2_5 ); 1469 break; 1470 1471 case VKI_SYS_SEND: 1472 /* int send(int s, const void *msg, size_t len, int flags); */ 1473 PRE_MEM_READ( "socketcall.send(args)", ARG2, 4*sizeof(Addr) ); 1474 ML_(generic_PRE_sys_send)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1475 break; 1476 1477 case VKI_SYS_RECVFROM: 1478 /* int recvfrom(int s, void *buf, int len, unsigned int flags, 1479 struct sockaddr *from, int *fromlen); */ 1480 PRE_MEM_READ( "socketcall.recvfrom(args)", ARG2, 6*sizeof(Addr) ); 1481 ML_(generic_PRE_sys_recvfrom)( tid, ARG2_0, ARG2_1, ARG2_2, 1482 ARG2_3, ARG2_4, ARG2_5 ); 1483 break; 1484 1485 case VKI_SYS_RECV: 1486 /* int recv(int s, void *buf, int len, unsigned int flags); */ 1487 /* man 2 recv says: 1488 The recv call is normally used only on a connected socket 1489 (see connect(2)) and is identical to recvfrom with a NULL 1490 from parameter. 1491 */ 1492 PRE_MEM_READ( "socketcall.recv(args)", ARG2, 4*sizeof(Addr) ); 1493 ML_(generic_PRE_sys_recv)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1494 break; 1495 1496 case VKI_SYS_CONNECT: 1497 /* int connect(int sockfd, 1498 struct sockaddr *serv_addr, int addrlen ); */ 1499 PRE_MEM_READ( "socketcall.connect(args)", ARG2, 3*sizeof(Addr) ); 1500 ML_(generic_PRE_sys_connect)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1501 break; 1502 1503 case VKI_SYS_SETSOCKOPT: 1504 /* int setsockopt(int s, int level, int optname, 1505 const void *optval, int optlen); */ 1506 PRE_MEM_READ( "socketcall.setsockopt(args)", ARG2, 5*sizeof(Addr) ); 1507 ML_(generic_PRE_sys_setsockopt)( tid, ARG2_0, ARG2_1, ARG2_2, 1508 ARG2_3, ARG2_4 ); 1509 break; 1510 1511 case VKI_SYS_GETSOCKOPT: 1512 /* int getsockopt(int s, int level, int optname, 1513 void *optval, socklen_t *optlen); */ 1514 PRE_MEM_READ( "socketcall.getsockopt(args)", ARG2, 5*sizeof(Addr) ); 1515 ML_(generic_PRE_sys_getsockopt)( tid, ARG2_0, ARG2_1, ARG2_2, 1516 ARG2_3, ARG2_4 ); 1517 break; 1518 1519 case VKI_SYS_GETSOCKNAME: 1520 /* int getsockname(int s, struct sockaddr* name, int* namelen) */ 1521 PRE_MEM_READ( "socketcall.getsockname(args)", ARG2, 3*sizeof(Addr) ); 1522 ML_(generic_PRE_sys_getsockname)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1523 break; 1524 1525 case VKI_SYS_GETPEERNAME: 1526 /* int getpeername(int s, struct sockaddr* name, int* namelen) */ 1527 PRE_MEM_READ( "socketcall.getpeername(args)", ARG2, 3*sizeof(Addr) ); 1528 ML_(generic_PRE_sys_getpeername)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1529 break; 1530 1531 case VKI_SYS_SHUTDOWN: 1532 /* int shutdown(int s, int how); */ 1533 PRE_MEM_READ( "socketcall.shutdown(args)", ARG2, 2*sizeof(Addr) ); 1534 break; 1535 1536 case VKI_SYS_SENDMSG: { 1537 /* int sendmsg(int s, const struct msghdr *msg, int flags); */ 1538 1539 /* this causes warnings, and I don't get why. glibc bug? 1540 * (after all it's glibc providing the arguments array) 1541 PRE_MEM_READ( "socketcall.sendmsg(args)", ARG2, 3*sizeof(Addr) ); 1542 */ 1543 ML_(generic_PRE_sys_sendmsg)( tid, ARG2_0, ARG2_1 ); 1544 break; 1545 } 1546 1547 case VKI_SYS_RECVMSG: { 1548 /* int recvmsg(int s, struct msghdr *msg, int flags); */ 1549 1550 /* this causes warnings, and I don't get why. glibc bug? 1551 * (after all it's glibc providing the arguments array) 1552 PRE_MEM_READ("socketcall.recvmsg(args)", ARG2, 3*sizeof(Addr) ); 1553 */ 1554 ML_(generic_PRE_sys_recvmsg)( tid, ARG2_0, ARG2_1 ); 1555 break; 1556 } 1557 1558 default: 1559 VG_(message)(Vg_DebugMsg,"Warning: unhandled socketcall 0x%x",ARG1); 1560 SET_STATUS_Failure( VKI_EINVAL ); 1561 break; 1562 } 1563# undef ARG2_0 1564# undef ARG2_1 1565# undef ARG2_2 1566# undef ARG2_3 1567# undef ARG2_4 1568# undef ARG2_5 1569} 1570 1571POST(sys_socketcall) 1572{ 1573# define ARG2_0 (((UWord*)ARG2)[0]) 1574# define ARG2_1 (((UWord*)ARG2)[1]) 1575# define ARG2_2 (((UWord*)ARG2)[2]) 1576# define ARG2_3 (((UWord*)ARG2)[3]) 1577# define ARG2_4 (((UWord*)ARG2)[4]) 1578# define ARG2_5 (((UWord*)ARG2)[5]) 1579 1580 SysRes r; 1581 vg_assert(SUCCESS); 1582 switch (ARG1 /* request */) { 1583 1584 case VKI_SYS_SOCKETPAIR: 1585 r = ML_(generic_POST_sys_socketpair)( 1586 tid, VG_(mk_SysRes_Success)(RES), 1587 ARG2_0, ARG2_1, ARG2_2, ARG2_3 1588 ); 1589 SET_STATUS_from_SysRes(r); 1590 break; 1591 1592 case VKI_SYS_SOCKET: 1593 r = ML_(generic_POST_sys_socket)( tid, VG_(mk_SysRes_Success)(RES) ); 1594 SET_STATUS_from_SysRes(r); 1595 break; 1596 1597 case VKI_SYS_BIND: 1598 /* int bind(int sockfd, struct sockaddr *my_addr, 1599 int addrlen); */ 1600 break; 1601 1602 case VKI_SYS_LISTEN: 1603 /* int listen(int s, int backlog); */ 1604 break; 1605 1606 case VKI_SYS_ACCEPT: 1607 /* int accept(int s, struct sockaddr *addr, int *addrlen); */ 1608 r = ML_(generic_POST_sys_accept)( tid, VG_(mk_SysRes_Success)(RES), 1609 ARG2_0, ARG2_1, ARG2_2 ); 1610 SET_STATUS_from_SysRes(r); 1611 break; 1612 1613 case VKI_SYS_SENDTO: 1614 break; 1615 1616 case VKI_SYS_SEND: 1617 break; 1618 1619 case VKI_SYS_RECVFROM: 1620 ML_(generic_POST_sys_recvfrom)( tid, VG_(mk_SysRes_Success)(RES), 1621 ARG2_0, ARG2_1, ARG2_2, 1622 ARG2_3, ARG2_4, ARG2_5 ); 1623 break; 1624 1625 case VKI_SYS_RECV: 1626 ML_(generic_POST_sys_recv)( tid, RES, ARG2_0, ARG2_1, ARG2_2 ); 1627 break; 1628 1629 case VKI_SYS_CONNECT: 1630 break; 1631 1632 case VKI_SYS_SETSOCKOPT: 1633 break; 1634 1635 case VKI_SYS_GETSOCKOPT: 1636 ML_(generic_POST_sys_getsockopt)( tid, VG_(mk_SysRes_Success)(RES), 1637 ARG2_0, ARG2_1, 1638 ARG2_2, ARG2_3, ARG2_4 ); 1639 break; 1640 1641 case VKI_SYS_GETSOCKNAME: 1642 ML_(generic_POST_sys_getsockname)( tid, VG_(mk_SysRes_Success)(RES), 1643 ARG2_0, ARG2_1, ARG2_2 ); 1644 break; 1645 1646 case VKI_SYS_GETPEERNAME: 1647 ML_(generic_POST_sys_getpeername)( tid, VG_(mk_SysRes_Success)(RES), 1648 ARG2_0, ARG2_1, ARG2_2 ); 1649 break; 1650 1651 case VKI_SYS_SHUTDOWN: 1652 break; 1653 1654 case VKI_SYS_SENDMSG: 1655 break; 1656 1657 case VKI_SYS_RECVMSG: 1658 ML_(generic_POST_sys_recvmsg)( tid, ARG2_0, ARG2_1 ); 1659 break; 1660 1661 default: 1662 VG_(message)(Vg_DebugMsg,"FATAL: unhandled socketcall 0x%x",ARG1); 1663 VG_(core_panic)("... bye!\n"); 1664 break; /*NOTREACHED*/ 1665 } 1666# undef ARG2_0 1667# undef ARG2_1 1668# undef ARG2_2 1669# undef ARG2_3 1670# undef ARG2_4 1671# undef ARG2_5 1672} 1673 1674/* Convert from non-RT to RT sigset_t's */ 1675static 1676void convert_sigset_to_rt(const vki_old_sigset_t *oldset, vki_sigset_t *set) 1677{ 1678 VG_(sigemptyset)(set); 1679 set->sig[0] = *oldset; 1680} 1681PRE(sys_sigaction) 1682{ 1683 struct vki_sigaction new, old; 1684 struct vki_sigaction *newp, *oldp; 1685 1686 PRINT("sys_sigaction ( %d, %p, %p )", ARG1,ARG2,ARG3); 1687 PRE_REG_READ3(int, "sigaction", 1688 int, signum, const struct old_sigaction *, act, 1689 struct old_sigaction *, oldact); 1690 1691 newp = oldp = NULL; 1692 1693 if (ARG2 != 0) { 1694 struct vki_old_sigaction *sa = (struct vki_old_sigaction *)ARG2; 1695 PRE_MEM_READ( "sigaction(act->sa_handler)", (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler)); 1696 PRE_MEM_READ( "sigaction(act->sa_mask)", (Addr)&sa->sa_mask, sizeof(sa->sa_mask)); 1697 PRE_MEM_READ( "sigaction(act->sa_flags)", (Addr)&sa->sa_flags, sizeof(sa->sa_flags)); 1698 if (ML_(safe_to_deref)(sa,sizeof(sa)) 1699 && (sa->sa_flags & VKI_SA_RESTORER)) 1700 PRE_MEM_READ( "sigaction(act->sa_restorer)", (Addr)&sa->sa_restorer, sizeof(sa->sa_restorer)); 1701 } 1702 1703 if (ARG3 != 0) { 1704 PRE_MEM_WRITE( "sigaction(oldact)", ARG3, sizeof(struct vki_old_sigaction)); 1705 oldp = &old; 1706 } 1707 1708 //jrs 20050207: what?! how can this make any sense? 1709 //if (VG_(is_kerror)(SYSRES)) 1710 // return; 1711 1712 if (ARG2 != 0) { 1713 struct vki_old_sigaction *oldnew = (struct vki_old_sigaction *)ARG2; 1714 1715 new.ksa_handler = oldnew->ksa_handler; 1716 new.sa_flags = oldnew->sa_flags; 1717 new.sa_restorer = oldnew->sa_restorer; 1718 convert_sigset_to_rt(&oldnew->sa_mask, &new.sa_mask); 1719 newp = &new; 1720 } 1721 1722 SET_STATUS_from_SysRes( VG_(do_sys_sigaction)(ARG1, newp, oldp) ); 1723 1724 if (ARG3 != 0 && SUCCESS && RES == 0) { 1725 struct vki_old_sigaction *oldold = (struct vki_old_sigaction *)ARG3; 1726 1727 oldold->ksa_handler = oldp->ksa_handler; 1728 oldold->sa_flags = oldp->sa_flags; 1729 oldold->sa_restorer = oldp->sa_restorer; 1730 oldold->sa_mask = oldp->sa_mask.sig[0]; 1731 } 1732} 1733 1734POST(sys_sigaction) 1735{ 1736 vg_assert(SUCCESS); 1737 if (RES == 0 && ARG3 != 0) 1738 POST_MEM_WRITE( ARG3, sizeof(struct vki_old_sigaction)); 1739} 1740 1741PRE(sys_sigsuspend) 1742{ 1743 /* The C library interface to sigsuspend just takes a pointer to 1744 a signal mask but this system call has three arguments - the first 1745 two don't appear to be used by the kernel and are always passed as 1746 zero by glibc and the third is the first word of the signal mask 1747 so only 32 signals are supported. 1748 1749 In fact glibc normally uses rt_sigsuspend if it is available as 1750 that takes a pointer to the signal mask so supports more signals. 1751 */ 1752 *flags |= SfMayBlock; 1753 PRINT("sys_sigsuspend ( %d, %d, %d )", ARG1,ARG2,ARG3 ); 1754 PRE_REG_READ3(int, "sigsuspend", 1755 int, history0, int, history1, 1756 vki_old_sigset_t, mask); 1757} 1758 1759PRE(sys_vm86old) 1760{ 1761 PRINT("sys_vm86old ( %p )", ARG1); 1762 PRE_REG_READ1(int, "vm86old", struct vm86_struct *, info); 1763 PRE_MEM_WRITE( "vm86old(info)", ARG1, sizeof(struct vki_vm86_struct)); 1764} 1765 1766POST(sys_vm86old) 1767{ 1768 POST_MEM_WRITE( ARG1, sizeof(struct vki_vm86_struct)); 1769} 1770 1771PRE(sys_vm86) 1772{ 1773 PRINT("sys_vm86 ( %d, %p )", ARG1,ARG2); 1774 PRE_REG_READ2(int, "vm86", unsigned long, fn, struct vm86plus_struct *, v86); 1775 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS) 1776 PRE_MEM_WRITE( "vm86(v86)", ARG2, sizeof(struct vki_vm86plus_struct)); 1777} 1778 1779POST(sys_vm86) 1780{ 1781 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS) 1782 POST_MEM_WRITE( ARG2, sizeof(struct vki_vm86plus_struct)); 1783} 1784 1785 1786/* --------------------------------------------------------------- 1787 PRE/POST wrappers for x86/Linux-variant specific syscalls 1788 ------------------------------------------------------------ */ 1789 1790PRE(sys_syscall223) 1791{ 1792 Int err; 1793 1794 /* 223 is used by sys_bproc. If we're not on a declared bproc 1795 variant, fail in the usual way. */ 1796 1797 if (!VG_(strstr)(VG_(clo_kernel_variant), "bproc")) { 1798 PRINT("non-existent syscall! (syscall 223)"); 1799 PRE_REG_READ0(long, "ni_syscall(223)"); 1800 SET_STATUS_Failure( VKI_ENOSYS ); 1801 return; 1802 } 1803 1804 err = ML_(linux_variant_PRE_sys_bproc)( ARG1, ARG2, ARG3, 1805 ARG4, ARG5, ARG6 ); 1806 if (err) { 1807 SET_STATUS_Failure( err ); 1808 return; 1809 } 1810 /* Let it go through. */ 1811 *flags |= SfMayBlock; /* who knows? play safe. */ 1812} 1813 1814POST(sys_syscall223) 1815{ 1816 ML_(linux_variant_POST_sys_bproc)( ARG1, ARG2, ARG3, 1817 ARG4, ARG5, ARG6 ); 1818} 1819 1820#undef PRE 1821#undef POST 1822 1823 1824/* --------------------------------------------------------------------- 1825 The x86/Linux syscall table 1826 ------------------------------------------------------------------ */ 1827 1828/* Add an x86-linux specific wrapper to a syscall table. */ 1829#define PLAX_(sysno, name) WRAPPER_ENTRY_X_(x86_linux, sysno, name) 1830#define PLAXY(sysno, name) WRAPPER_ENTRY_XY(x86_linux, sysno, name) 1831 1832 1833// This table maps from __NR_xxx syscall numbers (from 1834// linux/include/asm-i386/unistd.h) to the appropriate PRE/POST sys_foo() 1835// wrappers on x86 (as per sys_call_table in linux/arch/i386/kernel/entry.S). 1836// 1837// For those syscalls not handled by Valgrind, the annotation indicate its 1838// arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/? 1839// (unknown). 1840 1841const SyscallTableEntry ML_(syscall_table)[] = { 1842//zz // (restart_syscall) // 0 1843 GENX_(__NR_exit, sys_exit), // 1 1844 GENX_(__NR_fork, sys_fork), // 2 1845 GENXY(__NR_read, sys_read), // 3 1846 GENX_(__NR_write, sys_write), // 4 1847 1848 GENXY(__NR_open, sys_open), // 5 1849 GENXY(__NR_close, sys_close), // 6 1850 GENXY(__NR_waitpid, sys_waitpid), // 7 1851 GENXY(__NR_creat, sys_creat), // 8 1852 GENX_(__NR_link, sys_link), // 9 1853 1854 GENX_(__NR_unlink, sys_unlink), // 10 1855 GENX_(__NR_execve, sys_execve), // 11 1856 GENX_(__NR_chdir, sys_chdir), // 12 1857 GENXY(__NR_time, sys_time), // 13 1858 GENX_(__NR_mknod, sys_mknod), // 14 1859 1860 GENX_(__NR_chmod, sys_chmod), // 15 1861//zz LINX_(__NR_lchown, sys_lchown16), // 16 1862 GENX_(__NR_break, sys_ni_syscall), // 17 1863//zz // (__NR_oldstat, sys_stat), // 18 (obsolete) 1864 LINX_(__NR_lseek, sys_lseek), // 19 1865 1866 GENX_(__NR_getpid, sys_getpid), // 20 1867 LINX_(__NR_mount, sys_mount), // 21 1868 LINX_(__NR_umount, sys_oldumount), // 22 1869 LINX_(__NR_setuid, sys_setuid16), // 23 ## P 1870 LINX_(__NR_getuid, sys_getuid16), // 24 ## P 1871//zz 1872//zz // (__NR_stime, sys_stime), // 25 * (SVr4,SVID,X/OPEN) 1873 PLAXY(__NR_ptrace, sys_ptrace), // 26 1874 GENX_(__NR_alarm, sys_alarm), // 27 1875//zz // (__NR_oldfstat, sys_fstat), // 28 * L -- obsolete 1876 GENX_(__NR_pause, sys_pause), // 29 1877 1878 LINX_(__NR_utime, sys_utime), // 30 1879 GENX_(__NR_stty, sys_ni_syscall), // 31 1880 GENX_(__NR_gtty, sys_ni_syscall), // 32 1881 GENX_(__NR_access, sys_access), // 33 1882 GENX_(__NR_nice, sys_nice), // 34 1883 1884 GENX_(__NR_ftime, sys_ni_syscall), // 35 1885 GENX_(__NR_sync, sys_sync), // 36 1886 GENX_(__NR_kill, sys_kill), // 37 1887 GENX_(__NR_rename, sys_rename), // 38 1888 GENX_(__NR_mkdir, sys_mkdir), // 39 1889 1890 GENX_(__NR_rmdir, sys_rmdir), // 40 1891 GENXY(__NR_dup, sys_dup), // 41 1892 LINXY(__NR_pipe, sys_pipe), // 42 1893 GENXY(__NR_times, sys_times), // 43 1894 GENX_(__NR_prof, sys_ni_syscall), // 44 1895//zz 1896 GENX_(__NR_brk, sys_brk), // 45 1897 LINX_(__NR_setgid, sys_setgid16), // 46 1898 LINX_(__NR_getgid, sys_getgid16), // 47 1899//zz // (__NR_signal, sys_signal), // 48 */* (ANSI C) 1900 LINX_(__NR_geteuid, sys_geteuid16), // 49 1901 1902 LINX_(__NR_getegid, sys_getegid16), // 50 1903 GENX_(__NR_acct, sys_acct), // 51 1904 LINX_(__NR_umount2, sys_umount), // 52 1905 GENX_(__NR_lock, sys_ni_syscall), // 53 1906 GENXY(__NR_ioctl, sys_ioctl), // 54 1907 1908 GENXY(__NR_fcntl, sys_fcntl), // 55 1909 GENX_(__NR_mpx, sys_ni_syscall), // 56 1910 GENX_(__NR_setpgid, sys_setpgid), // 57 1911 GENX_(__NR_ulimit, sys_ni_syscall), // 58 1912//zz // (__NR_oldolduname, sys_olduname), // 59 Linux -- obsolete 1913//zz 1914 GENX_(__NR_umask, sys_umask), // 60 1915 GENX_(__NR_chroot, sys_chroot), // 61 1916//zz // (__NR_ustat, sys_ustat) // 62 SVr4 -- deprecated 1917 GENXY(__NR_dup2, sys_dup2), // 63 1918 GENX_(__NR_getppid, sys_getppid), // 64 1919 1920 GENX_(__NR_getpgrp, sys_getpgrp), // 65 1921 GENX_(__NR_setsid, sys_setsid), // 66 1922 PLAXY(__NR_sigaction, sys_sigaction), // 67 1923//zz // (__NR_sgetmask, sys_sgetmask), // 68 */* (ANSI C) 1924//zz // (__NR_ssetmask, sys_ssetmask), // 69 */* (ANSI C) 1925//zz 1926 LINX_(__NR_setreuid, sys_setreuid16), // 70 1927 LINX_(__NR_setregid, sys_setregid16), // 71 1928 PLAX_(__NR_sigsuspend, sys_sigsuspend), // 72 1929 LINXY(__NR_sigpending, sys_sigpending), // 73 1930//zz // (__NR_sethostname, sys_sethostname), // 74 */* 1931//zz 1932 GENX_(__NR_setrlimit, sys_setrlimit), // 75 1933 GENXY(__NR_getrlimit, sys_old_getrlimit), // 76 1934 GENXY(__NR_getrusage, sys_getrusage), // 77 1935 GENXY(__NR_gettimeofday, sys_gettimeofday), // 78 1936 GENX_(__NR_settimeofday, sys_settimeofday), // 79 1937 1938 LINXY(__NR_getgroups, sys_getgroups16), // 80 1939 LINX_(__NR_setgroups, sys_setgroups16), // 81 1940 PLAX_(__NR_select, old_select), // 82 1941 GENX_(__NR_symlink, sys_symlink), // 83 1942//zz // (__NR_oldlstat, sys_lstat), // 84 -- obsolete 1943//zz 1944 GENX_(__NR_readlink, sys_readlink), // 85 1945//zz // (__NR_uselib, sys_uselib), // 86 */Linux 1946//zz // (__NR_swapon, sys_swapon), // 87 */Linux 1947//zz // (__NR_reboot, sys_reboot), // 88 */Linux 1948//zz // (__NR_readdir, old_readdir), // 89 -- superseded 1949//zz 1950 PLAX_(__NR_mmap, old_mmap), // 90 1951 GENXY(__NR_munmap, sys_munmap), // 91 1952 GENX_(__NR_truncate, sys_truncate), // 92 1953 GENX_(__NR_ftruncate, sys_ftruncate), // 93 1954 GENX_(__NR_fchmod, sys_fchmod), // 94 1955 1956 LINX_(__NR_fchown, sys_fchown16), // 95 1957 GENX_(__NR_getpriority, sys_getpriority), // 96 1958 GENX_(__NR_setpriority, sys_setpriority), // 97 1959 GENX_(__NR_profil, sys_ni_syscall), // 98 1960 GENXY(__NR_statfs, sys_statfs), // 99 1961 1962 GENXY(__NR_fstatfs, sys_fstatfs), // 100 1963 LINX_(__NR_ioperm, sys_ioperm), // 101 1964 PLAXY(__NR_socketcall, sys_socketcall), // 102 x86/Linux-only 1965 LINXY(__NR_syslog, sys_syslog), // 103 1966 GENXY(__NR_setitimer, sys_setitimer), // 104 1967 1968 GENXY(__NR_getitimer, sys_getitimer), // 105 1969 GENXY(__NR_stat, sys_newstat), // 106 1970 GENXY(__NR_lstat, sys_newlstat), // 107 1971 GENXY(__NR_fstat, sys_newfstat), // 108 1972//zz // (__NR_olduname, sys_uname), // 109 -- obsolete 1973//zz 1974 GENX_(__NR_iopl, sys_iopl), // 110 1975 LINX_(__NR_vhangup, sys_vhangup), // 111 1976 GENX_(__NR_idle, sys_ni_syscall), // 112 1977 PLAXY(__NR_vm86old, sys_vm86old), // 113 x86/Linux-only 1978 GENXY(__NR_wait4, sys_wait4), // 114 1979//zz 1980//zz // (__NR_swapoff, sys_swapoff), // 115 */Linux 1981 LINXY(__NR_sysinfo, sys_sysinfo), // 116 1982 PLAXY(__NR_ipc, sys_ipc), // 117 1983 GENX_(__NR_fsync, sys_fsync), // 118 1984 PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux 1985 1986 PLAX_(__NR_clone, sys_clone), // 120 1987//zz // (__NR_setdomainname, sys_setdomainname), // 121 */*(?) 1988 GENXY(__NR_uname, sys_newuname), // 122 1989 PLAX_(__NR_modify_ldt, sys_modify_ldt), // 123 1990//zz LINXY(__NR_adjtimex, sys_adjtimex), // 124 1991//zz 1992 GENXY(__NR_mprotect, sys_mprotect), // 125 1993 LINXY(__NR_sigprocmask, sys_sigprocmask), // 126 1994//zz // Nb: create_module() was removed 2.4-->2.6 1995 GENX_(__NR_create_module, sys_ni_syscall), // 127 1996 GENX_(__NR_init_module, sys_init_module), // 128 1997//zz // (__NR_delete_module, sys_delete_module), // 129 (*/Linux)? 1998//zz 1999//zz // Nb: get_kernel_syms() was removed 2.4-->2.6 2000 GENX_(__NR_get_kernel_syms, sys_ni_syscall), // 130 2001 LINX_(__NR_quotactl, sys_quotactl), // 131 2002 GENX_(__NR_getpgid, sys_getpgid), // 132 2003 GENX_(__NR_fchdir, sys_fchdir), // 133 2004//zz // (__NR_bdflush, sys_bdflush), // 134 */Linux 2005//zz 2006//zz // (__NR_sysfs, sys_sysfs), // 135 SVr4 2007 LINX_(__NR_personality, sys_personality), // 136 2008 GENX_(__NR_afs_syscall, sys_ni_syscall), // 137 2009 LINX_(__NR_setfsuid, sys_setfsuid16), // 138 2010 LINX_(__NR_setfsgid, sys_setfsgid16), // 139 2011 2012 LINXY(__NR__llseek, sys_llseek), // 140 2013 GENXY(__NR_getdents, sys_getdents), // 141 2014 GENX_(__NR__newselect, sys_select), // 142 2015 GENX_(__NR_flock, sys_flock), // 143 2016 GENX_(__NR_msync, sys_msync), // 144 2017 2018 GENXY(__NR_readv, sys_readv), // 145 2019 GENX_(__NR_writev, sys_writev), // 146 2020 GENX_(__NR_getsid, sys_getsid), // 147 2021 GENX_(__NR_fdatasync, sys_fdatasync), // 148 2022 LINXY(__NR__sysctl, sys_sysctl), // 149 2023 2024 GENX_(__NR_mlock, sys_mlock), // 150 2025 GENX_(__NR_munlock, sys_munlock), // 151 2026 GENX_(__NR_mlockall, sys_mlockall), // 152 2027 LINX_(__NR_munlockall, sys_munlockall), // 153 2028 LINXY(__NR_sched_setparam, sys_sched_setparam), // 154 2029 2030 LINXY(__NR_sched_getparam, sys_sched_getparam), // 155 2031 LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156 2032 LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157 2033 LINX_(__NR_sched_yield, sys_sched_yield), // 158 2034 LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159 2035 2036 LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160 2037//zz //LINX?(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161 */* 2038 GENXY(__NR_nanosleep, sys_nanosleep), // 162 2039 GENX_(__NR_mremap, sys_mremap), // 163 2040 LINX_(__NR_setresuid, sys_setresuid16), // 164 2041 2042 LINXY(__NR_getresuid, sys_getresuid16), // 165 2043 PLAXY(__NR_vm86, sys_vm86), // 166 x86/Linux-only 2044 GENX_(__NR_query_module, sys_ni_syscall), // 167 2045 GENXY(__NR_poll, sys_poll), // 168 2046//zz // (__NR_nfsservctl, sys_nfsservctl), // 169 */Linux 2047//zz 2048 LINX_(__NR_setresgid, sys_setresgid16), // 170 2049 LINXY(__NR_getresgid, sys_getresgid16), // 171 2050 LINXY(__NR_prctl, sys_prctl), // 172 2051 PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173 x86/Linux only? 2052 LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174 2053 2054 LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175 2055 LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176 2056 LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 177 2057 LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 178 2058 LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179 2059 2060 GENXY(__NR_pread64, sys_pread64), // 180 2061 GENX_(__NR_pwrite64, sys_pwrite64), // 181 2062 LINX_(__NR_chown, sys_chown16), // 182 2063 GENXY(__NR_getcwd, sys_getcwd), // 183 2064 LINXY(__NR_capget, sys_capget), // 184 2065 2066 LINX_(__NR_capset, sys_capset), // 185 2067 GENXY(__NR_sigaltstack, sys_sigaltstack), // 186 2068 LINXY(__NR_sendfile, sys_sendfile), // 187 2069 GENXY(__NR_getpmsg, sys_getpmsg), // 188 2070 GENX_(__NR_putpmsg, sys_putpmsg), // 189 2071 2072 // Nb: we treat vfork as fork 2073 GENX_(__NR_vfork, sys_fork), // 190 2074 GENXY(__NR_ugetrlimit, sys_getrlimit), // 191 2075 PLAX_(__NR_mmap2, sys_mmap2), // 192 2076 GENX_(__NR_truncate64, sys_truncate64), // 193 2077 GENX_(__NR_ftruncate64, sys_ftruncate64), // 194 2078 2079 PLAXY(__NR_stat64, sys_stat64), // 195 2080 PLAXY(__NR_lstat64, sys_lstat64), // 196 2081 PLAXY(__NR_fstat64, sys_fstat64), // 197 2082 GENX_(__NR_lchown32, sys_lchown), // 198 2083 GENX_(__NR_getuid32, sys_getuid), // 199 2084 2085 GENX_(__NR_getgid32, sys_getgid), // 200 2086 GENX_(__NR_geteuid32, sys_geteuid), // 201 2087 GENX_(__NR_getegid32, sys_getegid), // 202 2088 GENX_(__NR_setreuid32, sys_setreuid), // 203 2089 GENX_(__NR_setregid32, sys_setregid), // 204 2090 2091 GENXY(__NR_getgroups32, sys_getgroups), // 205 2092 GENX_(__NR_setgroups32, sys_setgroups), // 206 2093 GENX_(__NR_fchown32, sys_fchown), // 207 2094 LINX_(__NR_setresuid32, sys_setresuid), // 208 2095 LINXY(__NR_getresuid32, sys_getresuid), // 209 2096 2097 LINX_(__NR_setresgid32, sys_setresgid), // 210 2098 LINXY(__NR_getresgid32, sys_getresgid), // 211 2099 GENX_(__NR_chown32, sys_chown), // 212 2100 GENX_(__NR_setuid32, sys_setuid), // 213 2101 GENX_(__NR_setgid32, sys_setgid), // 214 2102 2103 LINX_(__NR_setfsuid32, sys_setfsuid), // 215 2104 LINX_(__NR_setfsgid32, sys_setfsgid), // 216 2105//zz // (__NR_pivot_root, sys_pivot_root), // 217 */Linux 2106 GENXY(__NR_mincore, sys_mincore), // 218 2107 GENX_(__NR_madvise, sys_madvise), // 219 2108 2109 GENXY(__NR_getdents64, sys_getdents64), // 220 2110 GENXY(__NR_fcntl64, sys_fcntl64), // 221 2111 GENX_(222, sys_ni_syscall), // 222 2112 PLAXY(223, sys_syscall223), // 223 // sys_bproc? 2113 LINX_(__NR_gettid, sys_gettid), // 224 2114 2115//zz // (__NR_readahead, sys_readahead), // 225 */(Linux?) 2116 LINX_(__NR_setxattr, sys_setxattr), // 226 2117 LINX_(__NR_lsetxattr, sys_lsetxattr), // 227 2118 LINX_(__NR_fsetxattr, sys_fsetxattr), // 228 2119 LINXY(__NR_getxattr, sys_getxattr), // 229 2120 2121 LINXY(__NR_lgetxattr, sys_lgetxattr), // 230 2122 LINXY(__NR_fgetxattr, sys_fgetxattr), // 231 2123 LINXY(__NR_listxattr, sys_listxattr), // 232 2124 LINXY(__NR_llistxattr, sys_llistxattr), // 233 2125 LINXY(__NR_flistxattr, sys_flistxattr), // 234 2126 2127 LINX_(__NR_removexattr, sys_removexattr), // 235 2128 LINX_(__NR_lremovexattr, sys_lremovexattr), // 236 2129 LINX_(__NR_fremovexattr, sys_fremovexattr), // 237 2130 LINXY(__NR_tkill, sys_tkill), // 238 */Linux 2131 LINXY(__NR_sendfile64, sys_sendfile64), // 239 2132 2133 LINXY(__NR_futex, sys_futex), // 240 2134 LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 241 2135 LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 242 2136 PLAX_(__NR_set_thread_area, sys_set_thread_area), // 243 2137 PLAX_(__NR_get_thread_area, sys_get_thread_area), // 244 2138 2139 LINXY(__NR_io_setup, sys_io_setup), // 245 2140 LINX_(__NR_io_destroy, sys_io_destroy), // 246 2141 LINXY(__NR_io_getevents, sys_io_getevents), // 247 2142 LINX_(__NR_io_submit, sys_io_submit), // 248 2143 LINXY(__NR_io_cancel, sys_io_cancel), // 249 2144 2145 LINX_(__NR_fadvise64, sys_fadvise64), // 250 */(Linux?) 2146 GENX_(251, sys_ni_syscall), // 251 2147 LINX_(__NR_exit_group, sys_exit_group), // 252 2148 GENXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 253 2149 LINXY(__NR_epoll_create, sys_epoll_create), // 254 2150 2151 LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 255 2152 LINXY(__NR_epoll_wait, sys_epoll_wait), // 256 2153//zz // (__NR_remap_file_pages, sys_remap_file_pages), // 257 */Linux 2154 LINX_(__NR_set_tid_address, sys_set_tid_address), // 258 2155 LINXY(__NR_timer_create, sys_timer_create), // 259 2156 2157 LINXY(__NR_timer_settime, sys_timer_settime), // (timer_create+1) 2158 LINXY(__NR_timer_gettime, sys_timer_gettime), // (timer_create+2) 2159 LINX_(__NR_timer_getoverrun, sys_timer_getoverrun),//(timer_create+3) 2160 LINX_(__NR_timer_delete, sys_timer_delete), // (timer_create+4) 2161 LINX_(__NR_clock_settime, sys_clock_settime), // (timer_create+5) 2162 2163 LINXY(__NR_clock_gettime, sys_clock_gettime), // (timer_create+6) 2164 LINXY(__NR_clock_getres, sys_clock_getres), // (timer_create+7) 2165 LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// (timer_create+8) */* 2166 GENXY(__NR_statfs64, sys_statfs64), // 268 2167 GENXY(__NR_fstatfs64, sys_fstatfs64), // 269 2168 2169 LINX_(__NR_tgkill, sys_tgkill), // 270 */Linux 2170 GENX_(__NR_utimes, sys_utimes), // 271 2171 LINX_(__NR_fadvise64_64, sys_fadvise64_64), // 272 */(Linux?) 2172 GENX_(__NR_vserver, sys_ni_syscall), // 273 2173 LINX_(__NR_mbind, sys_mbind), // 274 ?/? 2174 2175 LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 275 ?/? 2176 LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 276 ?/? 2177 LINXY(__NR_mq_open, sys_mq_open), // 277 2178 LINX_(__NR_mq_unlink, sys_mq_unlink), // (mq_open+1) 2179 LINX_(__NR_mq_timedsend, sys_mq_timedsend), // (mq_open+2) 2180 2181 LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// (mq_open+3) 2182 LINX_(__NR_mq_notify, sys_mq_notify), // (mq_open+4) 2183 LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // (mq_open+5) 2184 GENX_(__NR_sys_kexec_load, sys_ni_syscall), // 283 2185 LINXY(__NR_waitid, sys_waitid), // 284 2186 2187 GENX_(285, sys_ni_syscall), // 285 2188 LINX_(__NR_add_key, sys_add_key), // 286 2189 LINX_(__NR_request_key, sys_request_key), // 287 2190 LINXY(__NR_keyctl, sys_keyctl), // 288 2191// LINX_(__NR_ioprio_set, sys_ioprio_set), // 289 2192 2193// LINX_(__NR_ioprio_get, sys_ioprio_get), // 290 2194 LINX_(__NR_inotify_init, sys_inotify_init), // 291 2195 LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 292 2196 LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 293 2197// LINX_(__NR_migrate_pages, sys_migrate_pages), // 294 2198 2199 LINXY(__NR_openat, sys_openat), // 295 2200 LINX_(__NR_mkdirat, sys_mkdirat), // 296 2201 LINX_(__NR_mknodat, sys_mknodat), // 297 2202 LINX_(__NR_fchownat, sys_fchownat), // 298 2203 LINX_(__NR_futimesat, sys_futimesat), // 299 2204 2205 PLAXY(__NR_fstatat64, sys_fstatat64), // 300 2206 LINX_(__NR_unlinkat, sys_unlinkat), // 301 2207 LINX_(__NR_renameat, sys_renameat), // 302 2208 LINX_(__NR_linkat, sys_linkat), // 303 2209 LINX_(__NR_symlinkat, sys_symlinkat), // 304 2210 2211 LINX_(__NR_readlinkat, sys_readlinkat), // 305 2212 LINX_(__NR_fchmodat, sys_fchmodat), // 306 2213 LINX_(__NR_faccessat, sys_faccessat), // 307 2214 LINX_(__NR_pselect6, sys_pselect6), // 308 2215 LINXY(__NR_ppoll, sys_ppoll), // 309 2216 2217// LINX_(__NR_unshare, sys_unshare), // 310 2218 LINX_(__NR_set_robust_list, sys_set_robust_list), // 311 2219 LINXY(__NR_get_robust_list, sys_get_robust_list), // 312 2220// LINX_(__NR_splice, sys_ni_syscall), // 313 2221 LINX_(__NR_sync_file_range, sys_sync_file_range), // 314 2222 2223// LINX_(__NR_tee, sys_ni_syscall), // 315 2224// LINX_(__NR_vmsplice, sys_ni_syscall), // 316 2225// LINX_(__NR_move_pages, sys_ni_syscall), // 317 2226// LINX_(__NR_getcpu, sys_ni_syscall), // 318 2227 LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 319 2228 2229 LINX_(__NR_utimensat, sys_utimensat), // 320 2230 LINXY(__NR_signalfd, sys_signalfd), // 321 2231 LINXY(__NR_timerfd_create, sys_timerfd_create), // 322 2232 LINX_(__NR_eventfd, sys_eventfd), // 323 2233// LINX_(__NR_fallocate, sys_ni_syscall), // 324 2234 LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 325 2235 LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 326 2236}; 2237 2238const UInt ML_(syscall_table_size) = 2239 sizeof(ML_(syscall_table)) / sizeof(ML_(syscall_table)[0]); 2240 2241/*--------------------------------------------------------------------*/ 2242/*--- end ---*/ 2243/*--------------------------------------------------------------------*/ 2244