syswrap-x86-linux.c revision d8feb70c360acdd48bc3b72507f58d74bccf9998
1 2/*--------------------------------------------------------------------*/ 3/*--- Platform-specific syscalls stuff. syswrap-x86-linux.c ---*/ 4/*--------------------------------------------------------------------*/ 5 6/* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2000-2009 Nicholas Nethercote 11 njn@valgrind.org 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 26 02111-1307, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29*/ 30 31#if defined(VGP_x86_linux) 32 33/* TODO/FIXME jrs 20050207: assignments to the syscall return result 34 in interrupted_syscall() need to be reviewed. They don't seem 35 to assign the shadow state. 36*/ 37 38#include "pub_core_basics.h" 39#include "pub_core_vki.h" 40#include "pub_core_vkiscnums.h" 41#include "pub_core_threadstate.h" 42#include "pub_core_aspacemgr.h" 43#include "pub_core_debuglog.h" 44#include "pub_core_libcbase.h" 45#include "pub_core_libcassert.h" 46#include "pub_core_libcprint.h" 47#include "pub_core_libcproc.h" 48#include "pub_core_libcsignal.h" 49#include "pub_core_mallocfree.h" 50#include "pub_core_options.h" 51#include "pub_core_scheduler.h" 52#include "pub_core_sigframe.h" // For VG_(sigframe_destroy)() 53#include "pub_core_signals.h" 54#include "pub_core_syscall.h" 55#include "pub_core_syswrap.h" 56#include "pub_core_tooliface.h" 57#include "pub_core_stacks.h" // VG_(register_stack) 58 59#include "priv_types_n_macros.h" 60#include "priv_syswrap-generic.h" /* for decls of generic wrappers */ 61#include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */ 62#include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */ 63#include "priv_syswrap-main.h" 64 65 66/* --------------------------------------------------------------------- 67 clone() handling 68 ------------------------------------------------------------------ */ 69 70/* Call f(arg1), but first switch stacks, using 'stack' as the new 71 stack, and use 'retaddr' as f's return-to address. Also, clear all 72 the integer registers before entering f.*/ 73__attribute__((noreturn)) 74void ML_(call_on_new_stack_0_1) ( Addr stack, 75 Addr retaddr, 76 void (*f)(Word), 77 Word arg1 ); 78// 4(%esp) == stack 79// 8(%esp) == retaddr 80// 12(%esp) == f 81// 16(%esp) == arg1 82asm( 83".text\n" 84".globl vgModuleLocal_call_on_new_stack_0_1\n" 85"vgModuleLocal_call_on_new_stack_0_1:\n" 86" movl %esp, %esi\n" // remember old stack pointer 87" movl 4(%esi), %esp\n" // set stack 88" pushl 16(%esi)\n" // arg1 to stack 89" pushl 8(%esi)\n" // retaddr to stack 90" pushl 12(%esi)\n" // f to stack 91" movl $0, %eax\n" // zero all GP regs 92" movl $0, %ebx\n" 93" movl $0, %ecx\n" 94" movl $0, %edx\n" 95" movl $0, %esi\n" 96" movl $0, %edi\n" 97" movl $0, %ebp\n" 98" ret\n" // jump to f 99" ud2\n" // should never get here 100".previous\n" 101); 102 103 104/* 105 Perform a clone system call. clone is strange because it has 106 fork()-like return-twice semantics, so it needs special 107 handling here. 108 109 Upon entry, we have: 110 111 int (fn)(void*) in 0+FSZ(%esp) 112 void* child_stack in 4+FSZ(%esp) 113 int flags in 8+FSZ(%esp) 114 void* arg in 12+FSZ(%esp) 115 pid_t* child_tid in 16+FSZ(%esp) 116 pid_t* parent_tid in 20+FSZ(%esp) 117 void* tls_ptr in 24+FSZ(%esp) 118 119 System call requires: 120 121 int $__NR_clone in %eax 122 int flags in %ebx 123 void* child_stack in %ecx 124 pid_t* parent_tid in %edx 125 pid_t* child_tid in %edi 126 void* tls_ptr in %esi 127 128 Returns an Int encoded in the linux-x86 way, not a SysRes. 129 */ 130#define FSZ "4+4+4+4" /* frame size = retaddr+ebx+edi+esi */ 131#define __NR_CLONE VG_STRINGIFY(__NR_clone) 132#define __NR_EXIT VG_STRINGIFY(__NR_exit) 133 134extern 135Int do_syscall_clone_x86_linux ( Word (*fn)(void *), 136 void* stack, 137 Int flags, 138 void* arg, 139 Int* child_tid, 140 Int* parent_tid, 141 vki_modify_ldt_t * ); 142asm( 143".text\n" 144"do_syscall_clone_x86_linux:\n" 145" push %ebx\n" 146" push %edi\n" 147" push %esi\n" 148 149 /* set up child stack with function and arg */ 150" movl 4+"FSZ"(%esp), %ecx\n" /* syscall arg2: child stack */ 151" movl 12+"FSZ"(%esp), %ebx\n" /* fn arg */ 152" movl 0+"FSZ"(%esp), %eax\n" /* fn */ 153" lea -8(%ecx), %ecx\n" /* make space on stack */ 154" movl %ebx, 4(%ecx)\n" /* fn arg */ 155" movl %eax, 0(%ecx)\n" /* fn */ 156 157 /* get other args to clone */ 158" movl 8+"FSZ"(%esp), %ebx\n" /* syscall arg1: flags */ 159" movl 20+"FSZ"(%esp), %edx\n" /* syscall arg3: parent tid * */ 160" movl 16+"FSZ"(%esp), %edi\n" /* syscall arg5: child tid * */ 161" movl 24+"FSZ"(%esp), %esi\n" /* syscall arg4: tls_ptr * */ 162" movl $"__NR_CLONE", %eax\n" 163" int $0x80\n" /* clone() */ 164" testl %eax, %eax\n" /* child if retval == 0 */ 165" jnz 1f\n" 166 167 /* CHILD - call thread function */ 168" popl %eax\n" 169" call *%eax\n" /* call fn */ 170 171 /* exit with result */ 172" movl %eax, %ebx\n" /* arg1: return value from fn */ 173" movl $"__NR_EXIT", %eax\n" 174" int $0x80\n" 175 176 /* Hm, exit returned */ 177" ud2\n" 178 179"1:\n" /* PARENT or ERROR */ 180" pop %esi\n" 181" pop %edi\n" 182" pop %ebx\n" 183" ret\n" 184".previous\n" 185); 186 187#undef FSZ 188#undef __NR_CLONE 189#undef __NR_EXIT 190 191 192// forward declarations 193static void setup_child ( ThreadArchState*, ThreadArchState*, Bool ); 194static SysRes sys_set_thread_area ( ThreadId, vki_modify_ldt_t* ); 195 196/* 197 When a client clones, we need to keep track of the new thread. This means: 198 1. allocate a ThreadId+ThreadState+stack for the the thread 199 200 2. initialize the thread's new VCPU state 201 202 3. create the thread using the same args as the client requested, 203 but using the scheduler entrypoint for EIP, and a separate stack 204 for ESP. 205 */ 206static SysRes do_clone ( ThreadId ptid, 207 UInt flags, Addr esp, 208 Int* parent_tidptr, 209 Int* child_tidptr, 210 vki_modify_ldt_t *tlsinfo) 211{ 212 static const Bool debug = False; 213 214 ThreadId ctid = VG_(alloc_ThreadState)(); 215 ThreadState* ptst = VG_(get_ThreadState)(ptid); 216 ThreadState* ctst = VG_(get_ThreadState)(ctid); 217 UWord* stack; 218 NSegment const* seg; 219 SysRes res; 220 Int eax; 221 vki_sigset_t blockall, savedmask; 222 223 VG_(sigfillset)(&blockall); 224 225 vg_assert(VG_(is_running_thread)(ptid)); 226 vg_assert(VG_(is_valid_tid)(ctid)); 227 228 stack = (UWord*)ML_(allocstack)(ctid); 229 if (stack == NULL) { 230 res = VG_(mk_SysRes_Error)( VKI_ENOMEM ); 231 goto out; 232 } 233 234 /* Copy register state 235 236 Both parent and child return to the same place, and the code 237 following the clone syscall works out which is which, so we 238 don't need to worry about it. 239 240 The parent gets the child's new tid returned from clone, but the 241 child gets 0. 242 243 If the clone call specifies a NULL esp for the new thread, then 244 it actually gets a copy of the parent's esp. 245 */ 246 /* Note: the clone call done by the Quadrics Elan3 driver specifies 247 clone flags of 0xF00, and it seems to rely on the assumption 248 that the child inherits a copy of the parent's GDT. 249 setup_child takes care of setting that up. */ 250 setup_child( &ctst->arch, &ptst->arch, True ); 251 252 /* Make sys_clone appear to have returned Success(0) in the 253 child. */ 254 ctst->arch.vex.guest_EAX = 0; 255 256 if (esp != 0) 257 ctst->arch.vex.guest_ESP = esp; 258 259 ctst->os_state.parent = ptid; 260 261 /* inherit signal mask */ 262 ctst->sig_mask = ptst->sig_mask; 263 ctst->tmp_sig_mask = ptst->sig_mask; 264 265 /* We don't really know where the client stack is, because its 266 allocated by the client. The best we can do is look at the 267 memory mappings and try to derive some useful information. We 268 assume that esp starts near its highest possible value, and can 269 only go down to the start of the mmaped segment. */ 270 seg = VG_(am_find_nsegment)((Addr)esp); 271 if (seg && seg->kind != SkResvn) { 272 ctst->client_stack_highest_word = (Addr)VG_PGROUNDUP(esp); 273 ctst->client_stack_szB = ctst->client_stack_highest_word - seg->start; 274 275 VG_(register_stack)(seg->start, ctst->client_stack_highest_word); 276 277 if (debug) 278 VG_(printf)("tid %d: guessed client stack range %#lx-%#lx\n", 279 ctid, seg->start, VG_PGROUNDUP(esp)); 280 } else { 281 VG_(message)(Vg_UserMsg, 282 "!? New thread %d starts with ESP(%#lx) unmapped\n", 283 ctid, esp); 284 ctst->client_stack_szB = 0; 285 } 286 287 /* Assume the clone will succeed, and tell any tool that wants to 288 know that this thread has come into existence. We cannot defer 289 it beyond this point because sys_set_thread_area, just below, 290 causes tCheck to assert by making references to the new ThreadId 291 if we don't state the new thread exists prior to that point. 292 If the clone fails, we'll send out a ll_exit notification for it 293 at the out: label below, to clean up. */ 294 VG_TRACK ( pre_thread_ll_create, ptid, ctid ); 295 296 if (flags & VKI_CLONE_SETTLS) { 297 if (debug) 298 VG_(printf)("clone child has SETTLS: tls info at %p: idx=%d " 299 "base=%#lx limit=%x; esp=%#x fs=%x gs=%x\n", 300 tlsinfo, tlsinfo->entry_number, 301 tlsinfo->base_addr, tlsinfo->limit, 302 ptst->arch.vex.guest_ESP, 303 ctst->arch.vex.guest_FS, ctst->arch.vex.guest_GS); 304 res = sys_set_thread_area(ctid, tlsinfo); 305 if (sr_isError(res)) 306 goto out; 307 } 308 309 flags &= ~VKI_CLONE_SETTLS; 310 311 /* start the thread with everything blocked */ 312 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask); 313 314 /* Create the new thread */ 315 eax = do_syscall_clone_x86_linux( 316 ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid], 317 child_tidptr, parent_tidptr, NULL 318 ); 319 res = VG_(mk_SysRes_x86_linux)( eax ); 320 321 VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL); 322 323 out: 324 if (sr_isError(res)) { 325 /* clone failed */ 326 VG_(cleanup_thread)(&ctst->arch); 327 ctst->status = VgTs_Empty; 328 /* oops. Better tell the tool the thread exited in a hurry :-) */ 329 VG_TRACK( pre_thread_ll_exit, ctid ); 330 } 331 332 return res; 333} 334 335 336/* --------------------------------------------------------------------- 337 LDT/GDT simulation 338 ------------------------------------------------------------------ */ 339 340/* Details of the LDT simulation 341 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 342 343 When a program runs natively, the linux kernel allows each *thread* 344 in it to have its own LDT. Almost all programs never do this -- 345 it's wildly unportable, after all -- and so the kernel never 346 allocates the structure, which is just as well as an LDT occupies 347 64k of memory (8192 entries of size 8 bytes). 348 349 A thread may choose to modify its LDT entries, by doing the 350 __NR_modify_ldt syscall. In such a situation the kernel will then 351 allocate an LDT structure for it. Each LDT entry is basically a 352 (base, limit) pair. A virtual address in a specific segment is 353 translated to a linear address by adding the segment's base value. 354 In addition, the virtual address must not exceed the limit value. 355 356 To use an LDT entry, a thread loads one of the segment registers 357 (%cs, %ss, %ds, %es, %fs, %gs) with the index of the LDT entry (0 358 .. 8191) it wants to use. In fact, the required value is (index << 359 3) + 7, but that's not important right now. Any normal instruction 360 which includes an addressing mode can then be made relative to that 361 LDT entry by prefixing the insn with a so-called segment-override 362 prefix, a byte which indicates which of the 6 segment registers 363 holds the LDT index. 364 365 Now, a key constraint is that valgrind's address checks operate in 366 terms of linear addresses. So we have to explicitly translate 367 virtual addrs into linear addrs, and that means doing a complete 368 LDT simulation. 369 370 Calls to modify_ldt are intercepted. For each thread, we maintain 371 an LDT (with the same normally-never-allocated optimisation that 372 the kernel does). This is updated as expected via calls to 373 modify_ldt. 374 375 When a thread does an amode calculation involving a segment 376 override prefix, the relevant LDT entry for the thread is 377 consulted. It all works. 378 379 There is a conceptual problem, which appears when switching back to 380 native execution, either temporarily to pass syscalls to the 381 kernel, or permanently, when debugging V. Problem at such points 382 is that it's pretty pointless to copy the simulated machine's 383 segment registers to the real machine, because we'd also need to 384 copy the simulated LDT into the real one, and that's prohibitively 385 expensive. 386 387 Fortunately it looks like no syscalls rely on the segment regs or 388 LDT being correct, so we can get away with it. Apart from that the 389 simulation is pretty straightforward. All 6 segment registers are 390 tracked, although only %ds, %es, %fs and %gs are allowed as 391 prefixes. Perhaps it could be restricted even more than that -- I 392 am not sure what is and isn't allowed in user-mode. 393*/ 394 395/* Translate a struct modify_ldt_ldt_s to a VexGuestX86SegDescr, using 396 the Linux kernel's logic (cut-n-paste of code in 397 linux/kernel/ldt.c). */ 398 399static 400void translate_to_hw_format ( /* IN */ vki_modify_ldt_t* inn, 401 /* OUT */ VexGuestX86SegDescr* out, 402 Int oldmode ) 403{ 404 UInt entry_1, entry_2; 405 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 406 407 if (0) 408 VG_(printf)("translate_to_hw_format: base %#lx, limit %d\n", 409 inn->base_addr, inn->limit ); 410 411 /* Allow LDTs to be cleared by the user. */ 412 if (inn->base_addr == 0 && inn->limit == 0) { 413 if (oldmode || 414 (inn->contents == 0 && 415 inn->read_exec_only == 1 && 416 inn->seg_32bit == 0 && 417 inn->limit_in_pages == 0 && 418 inn->seg_not_present == 1 && 419 inn->useable == 0 )) { 420 entry_1 = 0; 421 entry_2 = 0; 422 goto install; 423 } 424 } 425 426 entry_1 = ((inn->base_addr & 0x0000ffff) << 16) | 427 (inn->limit & 0x0ffff); 428 entry_2 = (inn->base_addr & 0xff000000) | 429 ((inn->base_addr & 0x00ff0000) >> 16) | 430 (inn->limit & 0xf0000) | 431 ((inn->read_exec_only ^ 1) << 9) | 432 (inn->contents << 10) | 433 ((inn->seg_not_present ^ 1) << 15) | 434 (inn->seg_32bit << 22) | 435 (inn->limit_in_pages << 23) | 436 0x7000; 437 if (!oldmode) 438 entry_2 |= (inn->useable << 20); 439 440 /* Install the new entry ... */ 441 install: 442 out->LdtEnt.Words.word1 = entry_1; 443 out->LdtEnt.Words.word2 = entry_2; 444} 445 446/* Create a zeroed-out GDT. */ 447static VexGuestX86SegDescr* alloc_zeroed_x86_GDT ( void ) 448{ 449 Int nbytes = VEX_GUEST_X86_GDT_NENT * sizeof(VexGuestX86SegDescr); 450 return VG_(arena_calloc)(VG_AR_CORE, "di.syswrap-x86.azxG.1", nbytes, 1); 451} 452 453/* Create a zeroed-out LDT. */ 454static VexGuestX86SegDescr* alloc_zeroed_x86_LDT ( void ) 455{ 456 Int nbytes = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr); 457 return VG_(arena_calloc)(VG_AR_CORE, "di.syswrap-x86.azxL.1", nbytes, 1); 458} 459 460/* Free up an LDT or GDT allocated by the above fns. */ 461static void free_LDT_or_GDT ( VexGuestX86SegDescr* dt ) 462{ 463 vg_assert(dt); 464 VG_(arena_free)(VG_AR_CORE, (void*)dt); 465} 466 467/* Copy contents between two existing LDTs. */ 468static void copy_LDT_from_to ( VexGuestX86SegDescr* src, 469 VexGuestX86SegDescr* dst ) 470{ 471 Int i; 472 vg_assert(src); 473 vg_assert(dst); 474 for (i = 0; i < VEX_GUEST_X86_LDT_NENT; i++) 475 dst[i] = src[i]; 476} 477 478/* Copy contents between two existing GDTs. */ 479static void copy_GDT_from_to ( VexGuestX86SegDescr* src, 480 VexGuestX86SegDescr* dst ) 481{ 482 Int i; 483 vg_assert(src); 484 vg_assert(dst); 485 for (i = 0; i < VEX_GUEST_X86_GDT_NENT; i++) 486 dst[i] = src[i]; 487} 488 489/* Free this thread's DTs, if it has any. */ 490static void deallocate_LGDTs_for_thread ( VexGuestX86State* vex ) 491{ 492 vg_assert(sizeof(HWord) == sizeof(void*)); 493 494 if (0) 495 VG_(printf)("deallocate_LGDTs_for_thread: " 496 "ldt = 0x%lx, gdt = 0x%lx\n", 497 vex->guest_LDT, vex->guest_GDT ); 498 499 if (vex->guest_LDT != (HWord)NULL) { 500 free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_LDT ); 501 vex->guest_LDT = (HWord)NULL; 502 } 503 504 if (vex->guest_GDT != (HWord)NULL) { 505 free_LDT_or_GDT( (VexGuestX86SegDescr*)vex->guest_GDT ); 506 vex->guest_GDT = (HWord)NULL; 507 } 508} 509 510 511/* 512 * linux/kernel/ldt.c 513 * 514 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds 515 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 516 */ 517 518/* 519 * read_ldt() is not really atomic - this is not a problem since 520 * synchronization of reads and writes done to the LDT has to be 521 * assured by user-space anyway. Writes are atomic, to protect 522 * the security checks done on new descriptors. 523 */ 524static 525SysRes read_ldt ( ThreadId tid, UChar* ptr, UInt bytecount ) 526{ 527 SysRes res; 528 UInt i, size; 529 UChar* ldt; 530 531 if (0) 532 VG_(printf)("read_ldt: tid = %d, ptr = %p, bytecount = %d\n", 533 tid, ptr, bytecount ); 534 535 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 536 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 537 538 ldt = (Char*)(VG_(threads)[tid].arch.vex.guest_LDT); 539 res = VG_(mk_SysRes_Success)( 0 ); 540 if (ldt == NULL) 541 /* LDT not allocated, meaning all entries are null */ 542 goto out; 543 544 size = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr); 545 if (size > bytecount) 546 size = bytecount; 547 548 res = VG_(mk_SysRes_Success)( size ); 549 for (i = 0; i < size; i++) 550 ptr[i] = ldt[i]; 551 552 out: 553 return res; 554} 555 556 557static 558SysRes write_ldt ( ThreadId tid, void* ptr, UInt bytecount, Int oldmode ) 559{ 560 SysRes res; 561 VexGuestX86SegDescr* ldt; 562 vki_modify_ldt_t* ldt_info; 563 564 if (0) 565 VG_(printf)("write_ldt: tid = %d, ptr = %p, " 566 "bytecount = %d, oldmode = %d\n", 567 tid, ptr, bytecount, oldmode ); 568 569 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 570 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 571 572 ldt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_LDT; 573 ldt_info = (vki_modify_ldt_t*)ptr; 574 575 res = VG_(mk_SysRes_Error)( VKI_EINVAL ); 576 if (bytecount != sizeof(vki_modify_ldt_t)) 577 goto out; 578 579 res = VG_(mk_SysRes_Error)( VKI_EINVAL ); 580 if (ldt_info->entry_number >= VEX_GUEST_X86_LDT_NENT) 581 goto out; 582 if (ldt_info->contents == 3) { 583 if (oldmode) 584 goto out; 585 if (ldt_info->seg_not_present == 0) 586 goto out; 587 } 588 589 /* If this thread doesn't have an LDT, we'd better allocate it 590 now. */ 591 if (ldt == NULL) { 592 ldt = alloc_zeroed_x86_LDT(); 593 VG_(threads)[tid].arch.vex.guest_LDT = (HWord)ldt; 594 } 595 596 /* Install the new entry ... */ 597 translate_to_hw_format ( ldt_info, &ldt[ldt_info->entry_number], oldmode ); 598 res = VG_(mk_SysRes_Success)( 0 ); 599 600 out: 601 return res; 602} 603 604 605static SysRes sys_modify_ldt ( ThreadId tid, 606 Int func, void* ptr, UInt bytecount ) 607{ 608 SysRes ret = VG_(mk_SysRes_Error)( VKI_ENOSYS ); 609 610 switch (func) { 611 case 0: 612 ret = read_ldt(tid, ptr, bytecount); 613 break; 614 case 1: 615 ret = write_ldt(tid, ptr, bytecount, 1); 616 break; 617 case 2: 618 VG_(unimplemented)("sys_modify_ldt: func == 2"); 619 /* god knows what this is about */ 620 /* ret = read_default_ldt(ptr, bytecount); */ 621 /*UNREACHED*/ 622 break; 623 case 0x11: 624 ret = write_ldt(tid, ptr, bytecount, 0); 625 break; 626 } 627 return ret; 628} 629 630 631static SysRes sys_set_thread_area ( ThreadId tid, vki_modify_ldt_t* info ) 632{ 633 Int idx; 634 VexGuestX86SegDescr* gdt; 635 636 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 637 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 638 639 if (info == NULL) 640 return VG_(mk_SysRes_Error)( VKI_EFAULT ); 641 642 gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT; 643 644 /* If the thread doesn't have a GDT, allocate it now. */ 645 if (!gdt) { 646 gdt = alloc_zeroed_x86_GDT(); 647 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt; 648 } 649 650 idx = info->entry_number; 651 652 if (idx == -1) { 653 /* Find and use the first free entry. Don't allocate entry 654 zero, because the hardware will never do that, and apparently 655 doing so confuses some code (perhaps stuff running on 656 Wine). */ 657 for (idx = 1; idx < VEX_GUEST_X86_GDT_NENT; idx++) { 658 if (gdt[idx].LdtEnt.Words.word1 == 0 659 && gdt[idx].LdtEnt.Words.word2 == 0) 660 break; 661 } 662 663 if (idx == VEX_GUEST_X86_GDT_NENT) 664 return VG_(mk_SysRes_Error)( VKI_ESRCH ); 665 } else if (idx < 0 || idx == 0 || idx >= VEX_GUEST_X86_GDT_NENT) { 666 /* Similarly, reject attempts to use GDT[0]. */ 667 return VG_(mk_SysRes_Error)( VKI_EINVAL ); 668 } 669 670 translate_to_hw_format(info, &gdt[idx], 0); 671 672 VG_TRACK( pre_mem_write, Vg_CoreSysCall, tid, 673 "set_thread_area(info->entry)", 674 (Addr) & info->entry_number, sizeof(unsigned int) ); 675 info->entry_number = idx; 676 VG_TRACK( post_mem_write, Vg_CoreSysCall, tid, 677 (Addr) & info->entry_number, sizeof(unsigned int) ); 678 679 return VG_(mk_SysRes_Success)( 0 ); 680} 681 682 683static SysRes sys_get_thread_area ( ThreadId tid, vki_modify_ldt_t* info ) 684{ 685 Int idx; 686 VexGuestX86SegDescr* gdt; 687 688 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*)); 689 vg_assert(8 == sizeof(VexGuestX86SegDescr)); 690 691 if (info == NULL) 692 return VG_(mk_SysRes_Error)( VKI_EFAULT ); 693 694 idx = info->entry_number; 695 696 if (idx < 0 || idx >= VEX_GUEST_X86_GDT_NENT) 697 return VG_(mk_SysRes_Error)( VKI_EINVAL ); 698 699 gdt = (VexGuestX86SegDescr*)VG_(threads)[tid].arch.vex.guest_GDT; 700 701 /* If the thread doesn't have a GDT, allocate it now. */ 702 if (!gdt) { 703 gdt = alloc_zeroed_x86_GDT(); 704 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt; 705 } 706 707 info->base_addr = ( gdt[idx].LdtEnt.Bits.BaseHi << 24 ) | 708 ( gdt[idx].LdtEnt.Bits.BaseMid << 16 ) | 709 gdt[idx].LdtEnt.Bits.BaseLow; 710 info->limit = ( gdt[idx].LdtEnt.Bits.LimitHi << 16 ) | 711 gdt[idx].LdtEnt.Bits.LimitLow; 712 info->seg_32bit = gdt[idx].LdtEnt.Bits.Default_Big; 713 info->contents = ( gdt[idx].LdtEnt.Bits.Type >> 2 ) & 0x3; 714 info->read_exec_only = ( gdt[idx].LdtEnt.Bits.Type & 0x1 ) ^ 0x1; 715 info->limit_in_pages = gdt[idx].LdtEnt.Bits.Granularity; 716 info->seg_not_present = gdt[idx].LdtEnt.Bits.Pres ^ 0x1; 717 info->useable = gdt[idx].LdtEnt.Bits.Sys; 718 info->reserved = 0; 719 720 return VG_(mk_SysRes_Success)( 0 ); 721} 722 723/* --------------------------------------------------------------------- 724 More thread stuff 725 ------------------------------------------------------------------ */ 726 727void VG_(cleanup_thread) ( ThreadArchState* arch ) 728{ 729 /* Release arch-specific resources held by this thread. */ 730 /* On x86, we have to dump the LDT and GDT. */ 731 deallocate_LGDTs_for_thread( &arch->vex ); 732} 733 734 735static void setup_child ( /*OUT*/ ThreadArchState *child, 736 /*IN*/ ThreadArchState *parent, 737 Bool inherit_parents_GDT ) 738{ 739 /* We inherit our parent's guest state. */ 740 child->vex = parent->vex; 741 child->vex_shadow1 = parent->vex_shadow1; 742 child->vex_shadow2 = parent->vex_shadow2; 743 744 /* We inherit our parent's LDT. */ 745 if (parent->vex.guest_LDT == (HWord)NULL) { 746 /* We hope this is the common case. */ 747 child->vex.guest_LDT = (HWord)NULL; 748 } else { 749 /* No luck .. we have to take a copy of the parent's. */ 750 child->vex.guest_LDT = (HWord)alloc_zeroed_x86_LDT(); 751 copy_LDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_LDT, 752 (VexGuestX86SegDescr*)child->vex.guest_LDT ); 753 } 754 755 /* Either we start with an empty GDT (the usual case) or inherit a 756 copy of our parents' one (Quadrics Elan3 driver -style clone 757 only). */ 758 child->vex.guest_GDT = (HWord)NULL; 759 760 if (inherit_parents_GDT && parent->vex.guest_GDT != (HWord)NULL) { 761 child->vex.guest_GDT = (HWord)alloc_zeroed_x86_GDT(); 762 copy_GDT_from_to( (VexGuestX86SegDescr*)parent->vex.guest_GDT, 763 (VexGuestX86SegDescr*)child->vex.guest_GDT ); 764 } 765} 766 767 768/* --------------------------------------------------------------------- 769 PRE/POST wrappers for x86/Linux-specific syscalls 770 ------------------------------------------------------------------ */ 771 772#define PRE(name) DEFN_PRE_TEMPLATE(x86_linux, name) 773#define POST(name) DEFN_POST_TEMPLATE(x86_linux, name) 774 775/* Add prototypes for the wrappers declared here, so that gcc doesn't 776 harass us for not having prototypes. Really this is a kludge -- 777 the right thing to do is to make these wrappers 'static' since they 778 aren't visible outside this file, but that requires even more macro 779 magic. */ 780DECL_TEMPLATE(x86_linux, sys_socketcall); 781DECL_TEMPLATE(x86_linux, sys_stat64); 782DECL_TEMPLATE(x86_linux, sys_fstatat64); 783DECL_TEMPLATE(x86_linux, sys_fstat64); 784DECL_TEMPLATE(x86_linux, sys_lstat64); 785DECL_TEMPLATE(x86_linux, sys_clone); 786DECL_TEMPLATE(x86_linux, old_mmap); 787DECL_TEMPLATE(x86_linux, sys_mmap2); 788DECL_TEMPLATE(x86_linux, sys_sigreturn); 789DECL_TEMPLATE(x86_linux, sys_ipc); 790DECL_TEMPLATE(x86_linux, sys_rt_sigreturn); 791DECL_TEMPLATE(x86_linux, sys_modify_ldt); 792DECL_TEMPLATE(x86_linux, sys_set_thread_area); 793DECL_TEMPLATE(x86_linux, sys_get_thread_area); 794DECL_TEMPLATE(x86_linux, sys_ptrace); 795DECL_TEMPLATE(x86_linux, sys_sigaction); 796DECL_TEMPLATE(x86_linux, sys_sigsuspend); 797DECL_TEMPLATE(x86_linux, old_select); 798DECL_TEMPLATE(x86_linux, sys_vm86old); 799DECL_TEMPLATE(x86_linux, sys_vm86); 800DECL_TEMPLATE(x86_linux, sys_syscall223); 801 802PRE(old_select) 803{ 804 /* struct sel_arg_struct { 805 unsigned long n; 806 fd_set *inp, *outp, *exp; 807 struct timeval *tvp; 808 }; 809 */ 810 PRE_REG_READ1(long, "old_select", struct sel_arg_struct *, args); 811 PRE_MEM_READ( "old_select(args)", ARG1, 5*sizeof(UWord) ); 812 *flags |= SfMayBlock; 813 { 814 UInt* arg_struct = (UInt*)ARG1; 815 UInt a1, a2, a3, a4, a5; 816 817 a1 = arg_struct[0]; 818 a2 = arg_struct[1]; 819 a3 = arg_struct[2]; 820 a4 = arg_struct[3]; 821 a5 = arg_struct[4]; 822 823 PRINT("old_select ( %d, %#x, %#x, %#x, %#x )", a1,a2,a3,a4,a5); 824 if (a2 != (Addr)NULL) 825 PRE_MEM_READ( "old_select(readfds)", a2, a1/8 /* __FD_SETSIZE/8 */ ); 826 if (a3 != (Addr)NULL) 827 PRE_MEM_READ( "old_select(writefds)", a3, a1/8 /* __FD_SETSIZE/8 */ ); 828 if (a4 != (Addr)NULL) 829 PRE_MEM_READ( "old_select(exceptfds)", a4, a1/8 /* __FD_SETSIZE/8 */ ); 830 if (a5 != (Addr)NULL) 831 PRE_MEM_READ( "old_select(timeout)", a5, sizeof(struct vki_timeval) ); 832 } 833} 834 835PRE(sys_clone) 836{ 837 UInt cloneflags; 838 Bool badarg = False; 839 840 PRINT("sys_clone ( %lx, %#lx, %#lx, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5); 841 PRE_REG_READ2(int, "clone", 842 unsigned long, flags, 843 void *, child_stack); 844 845 if (ARG1 & VKI_CLONE_PARENT_SETTID) { 846 if (VG_(tdict).track_pre_reg_read) { 847 PRA3("clone", int *, parent_tidptr); 848 } 849 PRE_MEM_WRITE("clone(parent_tidptr)", ARG3, sizeof(Int)); 850 if (!VG_(am_is_valid_for_client)(ARG3, sizeof(Int), 851 VKI_PROT_WRITE)) { 852 badarg = True; 853 } 854 } 855 if (ARG1 & VKI_CLONE_SETTLS) { 856 if (VG_(tdict).track_pre_reg_read) { 857 PRA4("clone", vki_modify_ldt_t *, tlsinfo); 858 } 859 PRE_MEM_READ("clone(tlsinfo)", ARG4, sizeof(vki_modify_ldt_t)); 860 if (!VG_(am_is_valid_for_client)(ARG4, sizeof(vki_modify_ldt_t), 861 VKI_PROT_READ)) { 862 badarg = True; 863 } 864 } 865 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) { 866 if (VG_(tdict).track_pre_reg_read) { 867 PRA5("clone", int *, child_tidptr); 868 } 869 PRE_MEM_WRITE("clone(child_tidptr)", ARG5, sizeof(Int)); 870 if (!VG_(am_is_valid_for_client)(ARG5, sizeof(Int), 871 VKI_PROT_WRITE)) { 872 badarg = True; 873 } 874 } 875 876 if (badarg) { 877 SET_STATUS_Failure( VKI_EFAULT ); 878 return; 879 } 880 881 cloneflags = ARG1; 882 883 if (!ML_(client_signal_OK)(ARG1 & VKI_CSIGNAL)) { 884 SET_STATUS_Failure( VKI_EINVAL ); 885 return; 886 } 887 888 /* Be ultra-paranoid and filter out any clone-variants we don't understand: 889 - ??? specifies clone flags of 0x100011 890 - ??? specifies clone flags of 0x1200011. 891 - NPTL specifies clone flags of 0x7D0F00. 892 - The Quadrics Elan3 driver specifies clone flags of 0xF00. 893 - Newer Quadrics Elan3 drivers with NTPL support specify 0x410F00. 894 Everything else is rejected. 895 */ 896 if ( 897 1 || 898 /* 11 Nov 05: for the time being, disable this ultra-paranoia. 899 The switch below probably does a good enough job. */ 900 (cloneflags == 0x100011 || cloneflags == 0x1200011 901 || cloneflags == 0x7D0F00 902 || cloneflags == 0x790F00 903 || cloneflags == 0x3D0F00 904 || cloneflags == 0x410F00 905 || cloneflags == 0xF00 906 || cloneflags == 0xF21)) { 907 /* OK */ 908 } 909 else { 910 /* Nah. We don't like it. Go away. */ 911 goto reject; 912 } 913 914 /* Only look at the flags we really care about */ 915 switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS 916 | VKI_CLONE_FILES | VKI_CLONE_VFORK)) { 917 case VKI_CLONE_VM | VKI_CLONE_FS | VKI_CLONE_FILES: 918 /* thread creation */ 919 SET_STATUS_from_SysRes( 920 do_clone(tid, 921 ARG1, /* flags */ 922 (Addr)ARG2, /* child ESP */ 923 (Int *)ARG3, /* parent_tidptr */ 924 (Int *)ARG5, /* child_tidptr */ 925 (vki_modify_ldt_t *)ARG4)); /* set_tls */ 926 break; 927 928 case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */ 929 /* FALLTHROUGH - assume vfork == fork */ 930 cloneflags &= ~(VKI_CLONE_VFORK | VKI_CLONE_VM); 931 932 case 0: /* plain fork */ 933 SET_STATUS_from_SysRes( 934 ML_(do_fork_clone)(tid, 935 cloneflags, /* flags */ 936 (Int *)ARG3, /* parent_tidptr */ 937 (Int *)ARG5)); /* child_tidptr */ 938 break; 939 940 default: 941 reject: 942 /* should we just ENOSYS? */ 943 VG_(message)(Vg_UserMsg, "\n"); 944 VG_(message)(Vg_UserMsg, "Unsupported clone() flags: 0x%lx\n", ARG1); 945 VG_(message)(Vg_UserMsg, "\n"); 946 VG_(message)(Vg_UserMsg, "The only supported clone() uses are:\n"); 947 VG_(message)(Vg_UserMsg, " - via a threads library (LinuxThreads or NPTL)\n"); 948 VG_(message)(Vg_UserMsg, " - via the implementation of fork or vfork\n"); 949 VG_(message)(Vg_UserMsg, " - for the Quadrics Elan3 user-space driver\n"); 950 VG_(unimplemented) 951 ("Valgrind does not support general clone()."); 952 } 953 954 if (SUCCESS) { 955 if (ARG1 & VKI_CLONE_PARENT_SETTID) 956 POST_MEM_WRITE(ARG3, sizeof(Int)); 957 if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) 958 POST_MEM_WRITE(ARG5, sizeof(Int)); 959 960 /* Thread creation was successful; let the child have the chance 961 to run */ 962 *flags |= SfYieldAfter; 963 } 964} 965 966PRE(sys_sigreturn) 967{ 968 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for 969 an explanation of what follows. */ 970 971 ThreadState* tst; 972 PRINT("sys_sigreturn ( )"); 973 974 vg_assert(VG_(is_valid_tid)(tid)); 975 vg_assert(tid >= 1 && tid < VG_N_THREADS); 976 vg_assert(VG_(is_running_thread)(tid)); 977 978 /* Adjust esp to point to start of frame; skip back up over 979 sigreturn sequence's "popl %eax" and handler ret addr */ 980 tst = VG_(get_ThreadState)(tid); 981 tst->arch.vex.guest_ESP -= sizeof(Addr)+sizeof(Word); 982 /* XXX why does ESP change differ from rt_sigreturn case below? */ 983 984 /* This is only so that the EIP is (might be) useful to report if 985 something goes wrong in the sigreturn */ 986 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch); 987 988 /* Restore register state from frame and remove it */ 989 VG_(sigframe_destroy)(tid, False); 990 991 /* Tell the driver not to update the guest state with the "result", 992 and set a bogus result to keep it happy. */ 993 *flags |= SfNoWriteResult; 994 SET_STATUS_Success(0); 995 996 /* Check to see if any signals arose as a result of this. */ 997 *flags |= SfPollAfter; 998} 999 1000PRE(sys_rt_sigreturn) 1001{ 1002 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for 1003 an explanation of what follows. */ 1004 1005 ThreadState* tst; 1006 PRINT("sys_rt_sigreturn ( )"); 1007 1008 vg_assert(VG_(is_valid_tid)(tid)); 1009 vg_assert(tid >= 1 && tid < VG_N_THREADS); 1010 vg_assert(VG_(is_running_thread)(tid)); 1011 1012 /* Adjust esp to point to start of frame; skip back up over handler 1013 ret addr */ 1014 tst = VG_(get_ThreadState)(tid); 1015 tst->arch.vex.guest_ESP -= sizeof(Addr); 1016 /* XXX why does ESP change differ from sigreturn case above? */ 1017 1018 /* This is only so that the EIP is (might be) useful to report if 1019 something goes wrong in the sigreturn */ 1020 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch); 1021 1022 /* Restore register state from frame and remove it */ 1023 VG_(sigframe_destroy)(tid, True); 1024 1025 /* Tell the driver not to update the guest state with the "result", 1026 and set a bogus result to keep it happy. */ 1027 *flags |= SfNoWriteResult; 1028 SET_STATUS_Success(0); 1029 1030 /* Check to see if any signals arose as a result of this. */ 1031 *flags |= SfPollAfter; 1032} 1033 1034PRE(sys_modify_ldt) 1035{ 1036 PRINT("sys_modify_ldt ( %ld, %#lx, %ld )", ARG1,ARG2,ARG3); 1037 PRE_REG_READ3(int, "modify_ldt", int, func, void *, ptr, 1038 unsigned long, bytecount); 1039 1040 if (ARG1 == 0) { 1041 /* read the LDT into ptr */ 1042 PRE_MEM_WRITE( "modify_ldt(ptr)", ARG2, ARG3 ); 1043 } 1044 if (ARG1 == 1 || ARG1 == 0x11) { 1045 /* write the LDT with the entry pointed at by ptr */ 1046 PRE_MEM_READ( "modify_ldt(ptr)", ARG2, sizeof(vki_modify_ldt_t) ); 1047 } 1048 /* "do" the syscall ourselves; the kernel never sees it */ 1049 SET_STATUS_from_SysRes( sys_modify_ldt( tid, ARG1, (void*)ARG2, ARG3 ) ); 1050 1051 if (ARG1 == 0 && SUCCESS && RES > 0) { 1052 POST_MEM_WRITE( ARG2, RES ); 1053 } 1054} 1055 1056PRE(sys_set_thread_area) 1057{ 1058 PRINT("sys_set_thread_area ( %#lx )", ARG1); 1059 PRE_REG_READ1(int, "set_thread_area", struct user_desc *, u_info) 1060 PRE_MEM_READ( "set_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) ); 1061 1062 /* "do" the syscall ourselves; the kernel never sees it */ 1063 SET_STATUS_from_SysRes( sys_set_thread_area( tid, (void *)ARG1 ) ); 1064} 1065 1066PRE(sys_get_thread_area) 1067{ 1068 PRINT("sys_get_thread_area ( %#lx )", ARG1); 1069 PRE_REG_READ1(int, "get_thread_area", struct user_desc *, u_info) 1070 PRE_MEM_WRITE( "get_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) ); 1071 1072 /* "do" the syscall ourselves; the kernel never sees it */ 1073 SET_STATUS_from_SysRes( sys_get_thread_area( tid, (void *)ARG1 ) ); 1074 1075 if (SUCCESS) { 1076 POST_MEM_WRITE( ARG1, sizeof(vki_modify_ldt_t) ); 1077 } 1078} 1079 1080// Parts of this are x86-specific, but the *PEEK* cases are generic. 1081// 1082// ARG3 is only used for pointers into the traced process's address 1083// space and for offsets into the traced process's struct 1084// user_regs_struct. It is never a pointer into this process's memory 1085// space, and we should therefore not check anything it points to. 1086PRE(sys_ptrace) 1087{ 1088 PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4); 1089 PRE_REG_READ4(int, "ptrace", 1090 long, request, long, pid, long, addr, long, data); 1091 switch (ARG1) { 1092 case VKI_PTRACE_PEEKTEXT: 1093 case VKI_PTRACE_PEEKDATA: 1094 case VKI_PTRACE_PEEKUSR: 1095 PRE_MEM_WRITE( "ptrace(peek)", ARG4, 1096 sizeof (long)); 1097 break; 1098 case VKI_PTRACE_GETREGS: 1099 PRE_MEM_WRITE( "ptrace(getregs)", ARG4, 1100 sizeof (struct vki_user_regs_struct)); 1101 break; 1102 case VKI_PTRACE_GETFPREGS: 1103 PRE_MEM_WRITE( "ptrace(getfpregs)", ARG4, 1104 sizeof (struct vki_user_i387_struct)); 1105 break; 1106 case VKI_PTRACE_GETFPXREGS: 1107 PRE_MEM_WRITE( "ptrace(getfpxregs)", ARG4, 1108 sizeof(struct vki_user_fxsr_struct) ); 1109 break; 1110 case VKI_PTRACE_SETREGS: 1111 PRE_MEM_READ( "ptrace(setregs)", ARG4, 1112 sizeof (struct vki_user_regs_struct)); 1113 break; 1114 case VKI_PTRACE_SETFPREGS: 1115 PRE_MEM_READ( "ptrace(setfpregs)", ARG4, 1116 sizeof (struct vki_user_i387_struct)); 1117 break; 1118 case VKI_PTRACE_SETFPXREGS: 1119 PRE_MEM_READ( "ptrace(setfpxregs)", ARG4, 1120 sizeof(struct vki_user_fxsr_struct) ); 1121 break; 1122 case VKI_PTRACE_GETEVENTMSG: 1123 PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long)); 1124 break; 1125 case VKI_PTRACE_GETSIGINFO: 1126 PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t)); 1127 break; 1128 case VKI_PTRACE_SETSIGINFO: 1129 PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t)); 1130 break; 1131 default: 1132 break; 1133 } 1134} 1135 1136POST(sys_ptrace) 1137{ 1138 switch (ARG1) { 1139 case VKI_PTRACE_PEEKTEXT: 1140 case VKI_PTRACE_PEEKDATA: 1141 case VKI_PTRACE_PEEKUSR: 1142 POST_MEM_WRITE( ARG4, sizeof (long)); 1143 break; 1144 case VKI_PTRACE_GETREGS: 1145 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_regs_struct)); 1146 break; 1147 case VKI_PTRACE_GETFPREGS: 1148 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_i387_struct)); 1149 break; 1150 case VKI_PTRACE_GETFPXREGS: 1151 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_fxsr_struct) ); 1152 break; 1153 case VKI_PTRACE_GETEVENTMSG: 1154 POST_MEM_WRITE( ARG4, sizeof(unsigned long)); 1155 break; 1156 case VKI_PTRACE_GETSIGINFO: 1157 /* XXX: This is a simplification. Different parts of the 1158 * siginfo_t are valid depending on the type of signal. 1159 */ 1160 POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t)); 1161 break; 1162 default: 1163 break; 1164 } 1165} 1166 1167static Addr deref_Addr ( ThreadId tid, Addr a, Char* s ) 1168{ 1169 Addr* a_p = (Addr*)a; 1170 PRE_MEM_READ( s, (Addr)a_p, sizeof(Addr) ); 1171 return *a_p; 1172} 1173 1174PRE(sys_ipc) 1175{ 1176 PRINT("sys_ipc ( %ld, %ld, %ld, %ld, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6); 1177 // XXX: this is simplistic -- some args are not used in all circumstances. 1178 PRE_REG_READ6(int, "ipc", 1179 vki_uint, call, int, first, int, second, int, third, 1180 void *, ptr, long, fifth) 1181 1182 switch (ARG1 /* call */) { 1183 case VKI_SEMOP: 1184 ML_(generic_PRE_sys_semop)( tid, ARG2, ARG5, ARG3 ); 1185 *flags |= SfMayBlock; 1186 break; 1187 case VKI_SEMGET: 1188 break; 1189 case VKI_SEMCTL: 1190 { 1191 UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" ); 1192 ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg ); 1193 break; 1194 } 1195 case VKI_SEMTIMEDOP: 1196 ML_(generic_PRE_sys_semtimedop)( tid, ARG2, ARG5, ARG3, ARG6 ); 1197 *flags |= SfMayBlock; 1198 break; 1199 case VKI_MSGSND: 1200 ML_(linux_PRE_sys_msgsnd)( tid, ARG2, ARG5, ARG3, ARG4 ); 1201 if ((ARG4 & VKI_IPC_NOWAIT) == 0) 1202 *flags |= SfMayBlock; 1203 break; 1204 case VKI_MSGRCV: 1205 { 1206 Addr msgp; 1207 Word msgtyp; 1208 1209 msgp = deref_Addr( tid, 1210 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp), 1211 "msgrcv(msgp)" ); 1212 msgtyp = deref_Addr( tid, 1213 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp), 1214 "msgrcv(msgp)" ); 1215 1216 ML_(linux_PRE_sys_msgrcv)( tid, ARG2, msgp, ARG3, msgtyp, ARG4 ); 1217 1218 if ((ARG4 & VKI_IPC_NOWAIT) == 0) 1219 *flags |= SfMayBlock; 1220 break; 1221 } 1222 case VKI_MSGGET: 1223 break; 1224 case VKI_MSGCTL: 1225 ML_(linux_PRE_sys_msgctl)( tid, ARG2, ARG3, ARG5 ); 1226 break; 1227 case VKI_SHMAT: 1228 { 1229 UWord w; 1230 PRE_MEM_WRITE( "shmat(raddr)", ARG4, sizeof(Addr) ); 1231 w = ML_(generic_PRE_sys_shmat)( tid, ARG2, ARG5, ARG3 ); 1232 if (w == 0) 1233 SET_STATUS_Failure( VKI_EINVAL ); 1234 else 1235 ARG5 = w; 1236 break; 1237 } 1238 case VKI_SHMDT: 1239 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG5)) 1240 SET_STATUS_Failure( VKI_EINVAL ); 1241 break; 1242 case VKI_SHMGET: 1243 break; 1244 case VKI_SHMCTL: /* IPCOP_shmctl */ 1245 ML_(generic_PRE_sys_shmctl)( tid, ARG2, ARG3, ARG5 ); 1246 break; 1247 default: 1248 VG_(message)(Vg_DebugMsg, "FATAL: unhandled syscall(ipc) %ld\n", ARG1 ); 1249 VG_(core_panic)("... bye!\n"); 1250 break; /*NOTREACHED*/ 1251 } 1252} 1253 1254POST(sys_ipc) 1255{ 1256 vg_assert(SUCCESS); 1257 switch (ARG1 /* call */) { 1258 case VKI_SEMOP: 1259 case VKI_SEMGET: 1260 break; 1261 case VKI_SEMCTL: 1262 { 1263 UWord arg = deref_Addr( tid, ARG5, "semctl(arg)" ); 1264 ML_(generic_PRE_sys_semctl)( tid, ARG2, ARG3, ARG4, arg ); 1265 break; 1266 } 1267 case VKI_SEMTIMEDOP: 1268 case VKI_MSGSND: 1269 break; 1270 case VKI_MSGRCV: 1271 { 1272 Addr msgp; 1273 Word msgtyp; 1274 1275 msgp = deref_Addr( tid, 1276 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgp), 1277 "msgrcv(msgp)" ); 1278 msgtyp = deref_Addr( tid, 1279 (Addr) (&((struct vki_ipc_kludge *)ARG5)->msgtyp), 1280 "msgrcv(msgp)" ); 1281 1282 ML_(linux_POST_sys_msgrcv)( tid, RES, ARG2, msgp, ARG3, msgtyp, ARG4 ); 1283 break; 1284 } 1285 case VKI_MSGGET: 1286 break; 1287 case VKI_MSGCTL: 1288 ML_(linux_POST_sys_msgctl)( tid, RES, ARG2, ARG3, ARG5 ); 1289 break; 1290 case VKI_SHMAT: 1291 { 1292 Addr addr; 1293 1294 /* force readability. before the syscall it is 1295 * indeed uninitialized, as can be seen in 1296 * glibc/sysdeps/unix/sysv/linux/shmat.c */ 1297 POST_MEM_WRITE( ARG4, sizeof( Addr ) ); 1298 1299 addr = deref_Addr ( tid, ARG4, "shmat(addr)" ); 1300 ML_(generic_POST_sys_shmat)( tid, addr, ARG2, ARG5, ARG3 ); 1301 break; 1302 } 1303 case VKI_SHMDT: 1304 ML_(generic_POST_sys_shmdt)( tid, RES, ARG5 ); 1305 break; 1306 case VKI_SHMGET: 1307 break; 1308 case VKI_SHMCTL: 1309 ML_(generic_POST_sys_shmctl)( tid, RES, ARG2, ARG3, ARG5 ); 1310 break; 1311 default: 1312 VG_(message)(Vg_DebugMsg, 1313 "FATAL: unhandled syscall(ipc) %ld\n", 1314 ARG1 ); 1315 VG_(core_panic)("... bye!\n"); 1316 break; /*NOTREACHED*/ 1317 } 1318} 1319 1320PRE(old_mmap) 1321{ 1322 /* struct mmap_arg_struct { 1323 unsigned long addr; 1324 unsigned long len; 1325 unsigned long prot; 1326 unsigned long flags; 1327 unsigned long fd; 1328 unsigned long offset; 1329 }; */ 1330 UWord a1, a2, a3, a4, a5, a6; 1331 SysRes r; 1332 1333 UWord* args = (UWord*)ARG1; 1334 PRE_REG_READ1(long, "old_mmap", struct mmap_arg_struct *, args); 1335 PRE_MEM_READ( "old_mmap(args)", (Addr)args, 6*sizeof(UWord) ); 1336 1337 a1 = args[1-1]; 1338 a2 = args[2-1]; 1339 a3 = args[3-1]; 1340 a4 = args[4-1]; 1341 a5 = args[5-1]; 1342 a6 = args[6-1]; 1343 1344 PRINT("old_mmap ( %#lx, %llu, %ld, %ld, %ld, %ld )", 1345 a1, (ULong)a2, a3, a4, a5, a6 ); 1346 1347 r = ML_(generic_PRE_sys_mmap)( tid, a1, a2, a3, a4, a5, (Off64T)a6 ); 1348 SET_STATUS_from_SysRes(r); 1349} 1350 1351PRE(sys_mmap2) 1352{ 1353 SysRes r; 1354 1355 // Exactly like old_mmap() except: 1356 // - all 6 args are passed in regs, rather than in a memory-block. 1357 // - the file offset is specified in pagesize units rather than bytes, 1358 // so that it can be used for files bigger than 2^32 bytes. 1359 // pagesize or 4K-size units in offset? For ppc32/64-linux, this is 1360 // 4K-sized. Assert that the page size is 4K here for safety. 1361 vg_assert(VKI_PAGE_SIZE == 4096); 1362 PRINT("sys_mmap2 ( %#lx, %llu, %ld, %ld, %ld, %ld )", 1363 ARG1, (ULong)ARG2, ARG3, ARG4, ARG5, ARG6 ); 1364 PRE_REG_READ6(long, "mmap2", 1365 unsigned long, start, unsigned long, length, 1366 unsigned long, prot, unsigned long, flags, 1367 unsigned long, fd, unsigned long, offset); 1368 1369 r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, 1370 4096 * (Off64T)ARG6 ); 1371 SET_STATUS_from_SysRes(r); 1372} 1373 1374// XXX: lstat64/fstat64/stat64 are generic, but not necessarily 1375// applicable to every architecture -- I think only to 32-bit archs. 1376// We're going to need something like linux/core_os32.h for such 1377// things, eventually, I think. --njn 1378PRE(sys_lstat64) 1379{ 1380 PRINT("sys_lstat64 ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2); 1381 PRE_REG_READ2(long, "lstat64", char *, file_name, struct stat64 *, buf); 1382 PRE_MEM_RASCIIZ( "lstat64(file_name)", ARG1 ); 1383 PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) ); 1384} 1385 1386POST(sys_lstat64) 1387{ 1388 vg_assert(SUCCESS); 1389 if (RES == 0) { 1390 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); 1391 } 1392} 1393 1394PRE(sys_stat64) 1395{ 1396 PRINT("sys_stat64 ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2); 1397 PRE_REG_READ2(long, "stat64", char *, file_name, struct stat64 *, buf); 1398 PRE_MEM_RASCIIZ( "stat64(file_name)", ARG1 ); 1399 PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) ); 1400} 1401 1402POST(sys_stat64) 1403{ 1404 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); 1405} 1406 1407PRE(sys_fstatat64) 1408{ 1409 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx )",ARG1,ARG2,(char*)ARG2,ARG3); 1410 PRE_REG_READ3(long, "fstatat64", 1411 int, dfd, char *, file_name, struct stat64 *, buf); 1412 PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 ); 1413 PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) ); 1414} 1415 1416POST(sys_fstatat64) 1417{ 1418 POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) ); 1419} 1420 1421PRE(sys_fstat64) 1422{ 1423 PRINT("sys_fstat64 ( %ld, %#lx )",ARG1,ARG2); 1424 PRE_REG_READ2(long, "fstat64", unsigned long, fd, struct stat64 *, buf); 1425 PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) ); 1426} 1427 1428POST(sys_fstat64) 1429{ 1430 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); 1431} 1432 1433PRE(sys_socketcall) 1434{ 1435# define ARG2_0 (((UWord*)ARG2)[0]) 1436# define ARG2_1 (((UWord*)ARG2)[1]) 1437# define ARG2_2 (((UWord*)ARG2)[2]) 1438# define ARG2_3 (((UWord*)ARG2)[3]) 1439# define ARG2_4 (((UWord*)ARG2)[4]) 1440# define ARG2_5 (((UWord*)ARG2)[5]) 1441 1442 *flags |= SfMayBlock; 1443 PRINT("sys_socketcall ( %ld, %#lx )",ARG1,ARG2); 1444 PRE_REG_READ2(long, "socketcall", int, call, unsigned long *, args); 1445 1446 switch (ARG1 /* request */) { 1447 1448 case VKI_SYS_SOCKETPAIR: 1449 /* int socketpair(int d, int type, int protocol, int sv[2]); */ 1450 PRE_MEM_READ( "socketcall.socketpair(args)", ARG2, 4*sizeof(Addr) ); 1451 ML_(generic_PRE_sys_socketpair)( tid, ARG2_0, ARG2_1, ARG2_2, ARG2_3 ); 1452 break; 1453 1454 case VKI_SYS_SOCKET: 1455 /* int socket(int domain, int type, int protocol); */ 1456 PRE_MEM_READ( "socketcall.socket(args)", ARG2, 3*sizeof(Addr) ); 1457 break; 1458 1459 case VKI_SYS_BIND: 1460 /* int bind(int sockfd, struct sockaddr *my_addr, 1461 int addrlen); */ 1462 PRE_MEM_READ( "socketcall.bind(args)", ARG2, 3*sizeof(Addr) ); 1463 ML_(generic_PRE_sys_bind)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1464 break; 1465 1466 case VKI_SYS_LISTEN: 1467 /* int listen(int s, int backlog); */ 1468 PRE_MEM_READ( "socketcall.listen(args)", ARG2, 2*sizeof(Addr) ); 1469 break; 1470 1471 case VKI_SYS_ACCEPT: { 1472 /* int accept(int s, struct sockaddr *addr, int *addrlen); */ 1473 PRE_MEM_READ( "socketcall.accept(args)", ARG2, 3*sizeof(Addr) ); 1474 ML_(generic_PRE_sys_accept)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1475 break; 1476 } 1477 1478 case VKI_SYS_SENDTO: 1479 /* int sendto(int s, const void *msg, int len, 1480 unsigned int flags, 1481 const struct sockaddr *to, int tolen); */ 1482 PRE_MEM_READ( "socketcall.sendto(args)", ARG2, 6*sizeof(Addr) ); 1483 ML_(generic_PRE_sys_sendto)( tid, ARG2_0, ARG2_1, ARG2_2, 1484 ARG2_3, ARG2_4, ARG2_5 ); 1485 break; 1486 1487 case VKI_SYS_SEND: 1488 /* int send(int s, const void *msg, size_t len, int flags); */ 1489 PRE_MEM_READ( "socketcall.send(args)", ARG2, 4*sizeof(Addr) ); 1490 ML_(generic_PRE_sys_send)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1491 break; 1492 1493 case VKI_SYS_RECVFROM: 1494 /* int recvfrom(int s, void *buf, int len, unsigned int flags, 1495 struct sockaddr *from, int *fromlen); */ 1496 PRE_MEM_READ( "socketcall.recvfrom(args)", ARG2, 6*sizeof(Addr) ); 1497 ML_(generic_PRE_sys_recvfrom)( tid, ARG2_0, ARG2_1, ARG2_2, 1498 ARG2_3, ARG2_4, ARG2_5 ); 1499 break; 1500 1501 case VKI_SYS_RECV: 1502 /* int recv(int s, void *buf, int len, unsigned int flags); */ 1503 /* man 2 recv says: 1504 The recv call is normally used only on a connected socket 1505 (see connect(2)) and is identical to recvfrom with a NULL 1506 from parameter. 1507 */ 1508 PRE_MEM_READ( "socketcall.recv(args)", ARG2, 4*sizeof(Addr) ); 1509 ML_(generic_PRE_sys_recv)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1510 break; 1511 1512 case VKI_SYS_CONNECT: 1513 /* int connect(int sockfd, 1514 struct sockaddr *serv_addr, int addrlen ); */ 1515 PRE_MEM_READ( "socketcall.connect(args)", ARG2, 3*sizeof(Addr) ); 1516 ML_(generic_PRE_sys_connect)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1517 break; 1518 1519 case VKI_SYS_SETSOCKOPT: 1520 /* int setsockopt(int s, int level, int optname, 1521 const void *optval, int optlen); */ 1522 PRE_MEM_READ( "socketcall.setsockopt(args)", ARG2, 5*sizeof(Addr) ); 1523 ML_(generic_PRE_sys_setsockopt)( tid, ARG2_0, ARG2_1, ARG2_2, 1524 ARG2_3, ARG2_4 ); 1525 break; 1526 1527 case VKI_SYS_GETSOCKOPT: 1528 /* int getsockopt(int s, int level, int optname, 1529 void *optval, socklen_t *optlen); */ 1530 PRE_MEM_READ( "socketcall.getsockopt(args)", ARG2, 5*sizeof(Addr) ); 1531 ML_(linux_PRE_sys_getsockopt)( tid, ARG2_0, ARG2_1, ARG2_2, 1532 ARG2_3, ARG2_4 ); 1533 break; 1534 1535 case VKI_SYS_GETSOCKNAME: 1536 /* int getsockname(int s, struct sockaddr* name, int* namelen) */ 1537 PRE_MEM_READ( "socketcall.getsockname(args)", ARG2, 3*sizeof(Addr) ); 1538 ML_(generic_PRE_sys_getsockname)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1539 break; 1540 1541 case VKI_SYS_GETPEERNAME: 1542 /* int getpeername(int s, struct sockaddr* name, int* namelen) */ 1543 PRE_MEM_READ( "socketcall.getpeername(args)", ARG2, 3*sizeof(Addr) ); 1544 ML_(generic_PRE_sys_getpeername)( tid, ARG2_0, ARG2_1, ARG2_2 ); 1545 break; 1546 1547 case VKI_SYS_SHUTDOWN: 1548 /* int shutdown(int s, int how); */ 1549 PRE_MEM_READ( "socketcall.shutdown(args)", ARG2, 2*sizeof(Addr) ); 1550 break; 1551 1552 case VKI_SYS_SENDMSG: { 1553 /* int sendmsg(int s, const struct msghdr *msg, int flags); */ 1554 1555 /* this causes warnings, and I don't get why. glibc bug? 1556 * (after all it's glibc providing the arguments array) 1557 PRE_MEM_READ( "socketcall.sendmsg(args)", ARG2, 3*sizeof(Addr) ); 1558 */ 1559 ML_(generic_PRE_sys_sendmsg)( tid, ARG2_0, ARG2_1 ); 1560 break; 1561 } 1562 1563 case VKI_SYS_RECVMSG: { 1564 /* int recvmsg(int s, struct msghdr *msg, int flags); */ 1565 1566 /* this causes warnings, and I don't get why. glibc bug? 1567 * (after all it's glibc providing the arguments array) 1568 PRE_MEM_READ("socketcall.recvmsg(args)", ARG2, 3*sizeof(Addr) ); 1569 */ 1570 ML_(generic_PRE_sys_recvmsg)( tid, ARG2_0, ARG2_1 ); 1571 break; 1572 } 1573 1574 default: 1575 VG_(message)(Vg_DebugMsg,"Warning: unhandled socketcall 0x%lx\n",ARG1); 1576 SET_STATUS_Failure( VKI_EINVAL ); 1577 break; 1578 } 1579# undef ARG2_0 1580# undef ARG2_1 1581# undef ARG2_2 1582# undef ARG2_3 1583# undef ARG2_4 1584# undef ARG2_5 1585} 1586 1587POST(sys_socketcall) 1588{ 1589# define ARG2_0 (((UWord*)ARG2)[0]) 1590# define ARG2_1 (((UWord*)ARG2)[1]) 1591# define ARG2_2 (((UWord*)ARG2)[2]) 1592# define ARG2_3 (((UWord*)ARG2)[3]) 1593# define ARG2_4 (((UWord*)ARG2)[4]) 1594# define ARG2_5 (((UWord*)ARG2)[5]) 1595 1596 SysRes r; 1597 vg_assert(SUCCESS); 1598 switch (ARG1 /* request */) { 1599 1600 case VKI_SYS_SOCKETPAIR: 1601 r = ML_(generic_POST_sys_socketpair)( 1602 tid, VG_(mk_SysRes_Success)(RES), 1603 ARG2_0, ARG2_1, ARG2_2, ARG2_3 1604 ); 1605 SET_STATUS_from_SysRes(r); 1606 break; 1607 1608 case VKI_SYS_SOCKET: 1609 r = ML_(generic_POST_sys_socket)( tid, VG_(mk_SysRes_Success)(RES) ); 1610 SET_STATUS_from_SysRes(r); 1611 break; 1612 1613 case VKI_SYS_BIND: 1614 /* int bind(int sockfd, struct sockaddr *my_addr, 1615 int addrlen); */ 1616 break; 1617 1618 case VKI_SYS_LISTEN: 1619 /* int listen(int s, int backlog); */ 1620 break; 1621 1622 case VKI_SYS_ACCEPT: 1623 /* int accept(int s, struct sockaddr *addr, int *addrlen); */ 1624 r = ML_(generic_POST_sys_accept)( tid, VG_(mk_SysRes_Success)(RES), 1625 ARG2_0, ARG2_1, ARG2_2 ); 1626 SET_STATUS_from_SysRes(r); 1627 break; 1628 1629 case VKI_SYS_SENDTO: 1630 break; 1631 1632 case VKI_SYS_SEND: 1633 break; 1634 1635 case VKI_SYS_RECVFROM: 1636 ML_(generic_POST_sys_recvfrom)( tid, VG_(mk_SysRes_Success)(RES), 1637 ARG2_0, ARG2_1, ARG2_2, 1638 ARG2_3, ARG2_4, ARG2_5 ); 1639 break; 1640 1641 case VKI_SYS_RECV: 1642 ML_(generic_POST_sys_recv)( tid, RES, ARG2_0, ARG2_1, ARG2_2 ); 1643 break; 1644 1645 case VKI_SYS_CONNECT: 1646 break; 1647 1648 case VKI_SYS_SETSOCKOPT: 1649 break; 1650 1651 case VKI_SYS_GETSOCKOPT: 1652 ML_(linux_POST_sys_getsockopt)( tid, VG_(mk_SysRes_Success)(RES), 1653 ARG2_0, ARG2_1, 1654 ARG2_2, ARG2_3, ARG2_4 ); 1655 break; 1656 1657 case VKI_SYS_GETSOCKNAME: 1658 ML_(generic_POST_sys_getsockname)( tid, VG_(mk_SysRes_Success)(RES), 1659 ARG2_0, ARG2_1, ARG2_2 ); 1660 break; 1661 1662 case VKI_SYS_GETPEERNAME: 1663 ML_(generic_POST_sys_getpeername)( tid, VG_(mk_SysRes_Success)(RES), 1664 ARG2_0, ARG2_1, ARG2_2 ); 1665 break; 1666 1667 case VKI_SYS_SHUTDOWN: 1668 break; 1669 1670 case VKI_SYS_SENDMSG: 1671 break; 1672 1673 case VKI_SYS_RECVMSG: 1674 ML_(generic_POST_sys_recvmsg)( tid, ARG2_0, ARG2_1 ); 1675 break; 1676 1677 default: 1678 VG_(message)(Vg_DebugMsg,"FATAL: unhandled socketcall 0x%lx\n",ARG1); 1679 VG_(core_panic)("... bye!\n"); 1680 break; /*NOTREACHED*/ 1681 } 1682# undef ARG2_0 1683# undef ARG2_1 1684# undef ARG2_2 1685# undef ARG2_3 1686# undef ARG2_4 1687# undef ARG2_5 1688} 1689 1690/* Convert from non-RT to RT sigset_t's */ 1691static 1692void convert_sigset_to_rt(const vki_old_sigset_t *oldset, vki_sigset_t *set) 1693{ 1694 VG_(sigemptyset)(set); 1695 set->sig[0] = *oldset; 1696} 1697PRE(sys_sigaction) 1698{ 1699 vki_sigaction_toK_t new, *newp; 1700 vki_sigaction_fromK_t old, *oldp; 1701 1702 PRINT("sys_sigaction ( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3); 1703 PRE_REG_READ3(int, "sigaction", 1704 int, signum, const struct old_sigaction *, act, 1705 struct old_sigaction *, oldact); 1706 1707 newp = oldp = NULL; 1708 1709 if (ARG2 != 0) { 1710 struct vki_old_sigaction *sa = (struct vki_old_sigaction *)ARG2; 1711 PRE_MEM_READ( "sigaction(act->sa_handler)", (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler)); 1712 PRE_MEM_READ( "sigaction(act->sa_mask)", (Addr)&sa->sa_mask, sizeof(sa->sa_mask)); 1713 PRE_MEM_READ( "sigaction(act->sa_flags)", (Addr)&sa->sa_flags, sizeof(sa->sa_flags)); 1714 if (ML_(safe_to_deref)(sa,sizeof(sa)) 1715 && (sa->sa_flags & VKI_SA_RESTORER)) 1716 PRE_MEM_READ( "sigaction(act->sa_restorer)", (Addr)&sa->sa_restorer, sizeof(sa->sa_restorer)); 1717 } 1718 1719 if (ARG3 != 0) { 1720 PRE_MEM_WRITE( "sigaction(oldact)", ARG3, sizeof(struct vki_old_sigaction)); 1721 oldp = &old; 1722 } 1723 1724 //jrs 20050207: what?! how can this make any sense? 1725 //if (VG_(is_kerror)(SYSRES)) 1726 // return; 1727 1728 if (ARG2 != 0) { 1729 struct vki_old_sigaction *oldnew = (struct vki_old_sigaction *)ARG2; 1730 new.ksa_handler = oldnew->ksa_handler; 1731 new.sa_flags = oldnew->sa_flags; 1732 new.sa_restorer = oldnew->sa_restorer; 1733 convert_sigset_to_rt(&oldnew->sa_mask, &new.sa_mask); 1734 newp = &new; 1735 } 1736 1737 SET_STATUS_from_SysRes( VG_(do_sys_sigaction)(ARG1, newp, oldp) ); 1738 1739 if (ARG3 != 0 && SUCCESS && RES == 0) { 1740 struct vki_old_sigaction *oldold = (struct vki_old_sigaction *)ARG3; 1741 oldold->ksa_handler = oldp->ksa_handler; 1742 oldold->sa_flags = oldp->sa_flags; 1743 oldold->sa_restorer = oldp->sa_restorer; 1744 oldold->sa_mask = oldp->sa_mask.sig[0]; 1745 } 1746} 1747 1748POST(sys_sigaction) 1749{ 1750 vg_assert(SUCCESS); 1751 if (RES == 0 && ARG3 != 0) 1752 POST_MEM_WRITE( ARG3, sizeof(struct vki_old_sigaction)); 1753} 1754 1755PRE(sys_sigsuspend) 1756{ 1757 /* The C library interface to sigsuspend just takes a pointer to 1758 a signal mask but this system call has three arguments - the first 1759 two don't appear to be used by the kernel and are always passed as 1760 zero by glibc and the third is the first word of the signal mask 1761 so only 32 signals are supported. 1762 1763 In fact glibc normally uses rt_sigsuspend if it is available as 1764 that takes a pointer to the signal mask so supports more signals. 1765 */ 1766 *flags |= SfMayBlock; 1767 PRINT("sys_sigsuspend ( %ld, %ld, %ld )", ARG1,ARG2,ARG3 ); 1768 PRE_REG_READ3(int, "sigsuspend", 1769 int, history0, int, history1, 1770 vki_old_sigset_t, mask); 1771} 1772 1773PRE(sys_vm86old) 1774{ 1775 PRINT("sys_vm86old ( %#lx )", ARG1); 1776 PRE_REG_READ1(int, "vm86old", struct vm86_struct *, info); 1777 PRE_MEM_WRITE( "vm86old(info)", ARG1, sizeof(struct vki_vm86_struct)); 1778} 1779 1780POST(sys_vm86old) 1781{ 1782 POST_MEM_WRITE( ARG1, sizeof(struct vki_vm86_struct)); 1783} 1784 1785PRE(sys_vm86) 1786{ 1787 PRINT("sys_vm86 ( %ld, %#lx )", ARG1,ARG2); 1788 PRE_REG_READ2(int, "vm86", unsigned long, fn, struct vm86plus_struct *, v86); 1789 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS) 1790 PRE_MEM_WRITE( "vm86(v86)", ARG2, sizeof(struct vki_vm86plus_struct)); 1791} 1792 1793POST(sys_vm86) 1794{ 1795 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS) 1796 POST_MEM_WRITE( ARG2, sizeof(struct vki_vm86plus_struct)); 1797} 1798 1799 1800/* --------------------------------------------------------------- 1801 PRE/POST wrappers for x86/Linux-variant specific syscalls 1802 ------------------------------------------------------------ */ 1803 1804PRE(sys_syscall223) 1805{ 1806 Int err; 1807 1808 /* 223 is used by sys_bproc. If we're not on a declared bproc 1809 variant, fail in the usual way. */ 1810 1811 if (!VG_(strstr)(VG_(clo_kernel_variant), "bproc")) { 1812 PRINT("non-existent syscall! (syscall 223)"); 1813 PRE_REG_READ0(long, "ni_syscall(223)"); 1814 SET_STATUS_Failure( VKI_ENOSYS ); 1815 return; 1816 } 1817 1818 err = ML_(linux_variant_PRE_sys_bproc)( ARG1, ARG2, ARG3, 1819 ARG4, ARG5, ARG6 ); 1820 if (err) { 1821 SET_STATUS_Failure( err ); 1822 return; 1823 } 1824 /* Let it go through. */ 1825 *flags |= SfMayBlock; /* who knows? play safe. */ 1826} 1827 1828POST(sys_syscall223) 1829{ 1830 ML_(linux_variant_POST_sys_bproc)( ARG1, ARG2, ARG3, 1831 ARG4, ARG5, ARG6 ); 1832} 1833 1834#undef PRE 1835#undef POST 1836 1837 1838/* --------------------------------------------------------------------- 1839 The x86/Linux syscall table 1840 ------------------------------------------------------------------ */ 1841 1842/* Add an x86-linux specific wrapper to a syscall table. */ 1843#define PLAX_(sysno, name) WRAPPER_ENTRY_X_(x86_linux, sysno, name) 1844#define PLAXY(sysno, name) WRAPPER_ENTRY_XY(x86_linux, sysno, name) 1845 1846 1847// This table maps from __NR_xxx syscall numbers (from 1848// linux/include/asm-i386/unistd.h) to the appropriate PRE/POST sys_foo() 1849// wrappers on x86 (as per sys_call_table in linux/arch/i386/kernel/entry.S). 1850// 1851// For those syscalls not handled by Valgrind, the annotation indicate its 1852// arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/? 1853// (unknown). 1854 1855const SyscallTableEntry ML_(syscall_table)[] = { 1856//zz // (restart_syscall) // 0 1857 GENX_(__NR_exit, sys_exit), // 1 1858 GENX_(__NR_fork, sys_fork), // 2 1859 GENXY(__NR_read, sys_read), // 3 1860 GENX_(__NR_write, sys_write), // 4 1861 1862 GENXY(__NR_open, sys_open), // 5 1863 GENXY(__NR_close, sys_close), // 6 1864 GENXY(__NR_waitpid, sys_waitpid), // 7 1865 GENXY(__NR_creat, sys_creat), // 8 1866 GENX_(__NR_link, sys_link), // 9 1867 1868 GENX_(__NR_unlink, sys_unlink), // 10 1869 GENX_(__NR_execve, sys_execve), // 11 1870 GENX_(__NR_chdir, sys_chdir), // 12 1871 GENXY(__NR_time, sys_time), // 13 1872 GENX_(__NR_mknod, sys_mknod), // 14 1873 1874 GENX_(__NR_chmod, sys_chmod), // 15 1875//zz LINX_(__NR_lchown, sys_lchown16), // 16 1876 GENX_(__NR_break, sys_ni_syscall), // 17 1877//zz // (__NR_oldstat, sys_stat), // 18 (obsolete) 1878 LINX_(__NR_lseek, sys_lseek), // 19 1879 1880 GENX_(__NR_getpid, sys_getpid), // 20 1881 LINX_(__NR_mount, sys_mount), // 21 1882 LINX_(__NR_umount, sys_oldumount), // 22 1883 LINX_(__NR_setuid, sys_setuid16), // 23 ## P 1884 LINX_(__NR_getuid, sys_getuid16), // 24 ## P 1885 1886 LINX_(__NR_stime, sys_stime), // 25 * (SVr4,SVID,X/OPEN) 1887 PLAXY(__NR_ptrace, sys_ptrace), // 26 1888 GENX_(__NR_alarm, sys_alarm), // 27 1889//zz // (__NR_oldfstat, sys_fstat), // 28 * L -- obsolete 1890 GENX_(__NR_pause, sys_pause), // 29 1891 1892 LINX_(__NR_utime, sys_utime), // 30 1893 GENX_(__NR_stty, sys_ni_syscall), // 31 1894 GENX_(__NR_gtty, sys_ni_syscall), // 32 1895 GENX_(__NR_access, sys_access), // 33 1896 GENX_(__NR_nice, sys_nice), // 34 1897 1898 GENX_(__NR_ftime, sys_ni_syscall), // 35 1899 GENX_(__NR_sync, sys_sync), // 36 1900 GENX_(__NR_kill, sys_kill), // 37 1901 GENX_(__NR_rename, sys_rename), // 38 1902 GENX_(__NR_mkdir, sys_mkdir), // 39 1903 1904 GENX_(__NR_rmdir, sys_rmdir), // 40 1905 GENXY(__NR_dup, sys_dup), // 41 1906 LINXY(__NR_pipe, sys_pipe), // 42 1907 GENXY(__NR_times, sys_times), // 43 1908 GENX_(__NR_prof, sys_ni_syscall), // 44 1909//zz 1910 GENX_(__NR_brk, sys_brk), // 45 1911 LINX_(__NR_setgid, sys_setgid16), // 46 1912 LINX_(__NR_getgid, sys_getgid16), // 47 1913//zz // (__NR_signal, sys_signal), // 48 */* (ANSI C) 1914 LINX_(__NR_geteuid, sys_geteuid16), // 49 1915 1916 LINX_(__NR_getegid, sys_getegid16), // 50 1917 GENX_(__NR_acct, sys_acct), // 51 1918 LINX_(__NR_umount2, sys_umount), // 52 1919 GENX_(__NR_lock, sys_ni_syscall), // 53 1920 LINXY(__NR_ioctl, sys_ioctl), // 54 1921 1922 LINXY(__NR_fcntl, sys_fcntl), // 55 1923 GENX_(__NR_mpx, sys_ni_syscall), // 56 1924 GENX_(__NR_setpgid, sys_setpgid), // 57 1925 GENX_(__NR_ulimit, sys_ni_syscall), // 58 1926//zz // (__NR_oldolduname, sys_olduname), // 59 Linux -- obsolete 1927//zz 1928 GENX_(__NR_umask, sys_umask), // 60 1929 GENX_(__NR_chroot, sys_chroot), // 61 1930//zz // (__NR_ustat, sys_ustat) // 62 SVr4 -- deprecated 1931 GENXY(__NR_dup2, sys_dup2), // 63 1932 GENX_(__NR_getppid, sys_getppid), // 64 1933 1934 GENX_(__NR_getpgrp, sys_getpgrp), // 65 1935 GENX_(__NR_setsid, sys_setsid), // 66 1936 PLAXY(__NR_sigaction, sys_sigaction), // 67 1937//zz // (__NR_sgetmask, sys_sgetmask), // 68 */* (ANSI C) 1938//zz // (__NR_ssetmask, sys_ssetmask), // 69 */* (ANSI C) 1939//zz 1940 LINX_(__NR_setreuid, sys_setreuid16), // 70 1941 LINX_(__NR_setregid, sys_setregid16), // 71 1942 PLAX_(__NR_sigsuspend, sys_sigsuspend), // 72 1943 LINXY(__NR_sigpending, sys_sigpending), // 73 1944//zz // (__NR_sethostname, sys_sethostname), // 74 */* 1945//zz 1946 GENX_(__NR_setrlimit, sys_setrlimit), // 75 1947 GENXY(__NR_getrlimit, sys_old_getrlimit), // 76 1948 GENXY(__NR_getrusage, sys_getrusage), // 77 1949 GENXY(__NR_gettimeofday, sys_gettimeofday), // 78 1950 GENX_(__NR_settimeofday, sys_settimeofday), // 79 1951 1952 LINXY(__NR_getgroups, sys_getgroups16), // 80 1953 LINX_(__NR_setgroups, sys_setgroups16), // 81 1954 PLAX_(__NR_select, old_select), // 82 1955 GENX_(__NR_symlink, sys_symlink), // 83 1956//zz // (__NR_oldlstat, sys_lstat), // 84 -- obsolete 1957//zz 1958 GENX_(__NR_readlink, sys_readlink), // 85 1959//zz // (__NR_uselib, sys_uselib), // 86 */Linux 1960//zz // (__NR_swapon, sys_swapon), // 87 */Linux 1961//zz // (__NR_reboot, sys_reboot), // 88 */Linux 1962//zz // (__NR_readdir, old_readdir), // 89 -- superseded 1963//zz 1964 PLAX_(__NR_mmap, old_mmap), // 90 1965 GENXY(__NR_munmap, sys_munmap), // 91 1966 GENX_(__NR_truncate, sys_truncate), // 92 1967 GENX_(__NR_ftruncate, sys_ftruncate), // 93 1968 GENX_(__NR_fchmod, sys_fchmod), // 94 1969 1970 LINX_(__NR_fchown, sys_fchown16), // 95 1971 GENX_(__NR_getpriority, sys_getpriority), // 96 1972 GENX_(__NR_setpriority, sys_setpriority), // 97 1973 GENX_(__NR_profil, sys_ni_syscall), // 98 1974 GENXY(__NR_statfs, sys_statfs), // 99 1975 1976 GENXY(__NR_fstatfs, sys_fstatfs), // 100 1977 LINX_(__NR_ioperm, sys_ioperm), // 101 1978 PLAXY(__NR_socketcall, sys_socketcall), // 102 x86/Linux-only 1979 LINXY(__NR_syslog, sys_syslog), // 103 1980 GENXY(__NR_setitimer, sys_setitimer), // 104 1981 1982 GENXY(__NR_getitimer, sys_getitimer), // 105 1983 GENXY(__NR_stat, sys_newstat), // 106 1984 GENXY(__NR_lstat, sys_newlstat), // 107 1985 GENXY(__NR_fstat, sys_newfstat), // 108 1986//zz // (__NR_olduname, sys_uname), // 109 -- obsolete 1987//zz 1988 GENX_(__NR_iopl, sys_iopl), // 110 1989 LINX_(__NR_vhangup, sys_vhangup), // 111 1990 GENX_(__NR_idle, sys_ni_syscall), // 112 1991 PLAXY(__NR_vm86old, sys_vm86old), // 113 x86/Linux-only 1992 GENXY(__NR_wait4, sys_wait4), // 114 1993//zz 1994//zz // (__NR_swapoff, sys_swapoff), // 115 */Linux 1995 LINXY(__NR_sysinfo, sys_sysinfo), // 116 1996 PLAXY(__NR_ipc, sys_ipc), // 117 1997 GENX_(__NR_fsync, sys_fsync), // 118 1998 PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux 1999 2000 PLAX_(__NR_clone, sys_clone), // 120 2001//zz // (__NR_setdomainname, sys_setdomainname), // 121 */*(?) 2002 GENXY(__NR_uname, sys_newuname), // 122 2003 PLAX_(__NR_modify_ldt, sys_modify_ldt), // 123 2004//zz LINXY(__NR_adjtimex, sys_adjtimex), // 124 2005//zz 2006 GENXY(__NR_mprotect, sys_mprotect), // 125 2007 LINXY(__NR_sigprocmask, sys_sigprocmask), // 126 2008//zz // Nb: create_module() was removed 2.4-->2.6 2009 GENX_(__NR_create_module, sys_ni_syscall), // 127 2010 LINX_(__NR_init_module, sys_init_module), // 128 2011 LINX_(__NR_delete_module, sys_delete_module), // 129 2012//zz 2013//zz // Nb: get_kernel_syms() was removed 2.4-->2.6 2014 GENX_(__NR_get_kernel_syms, sys_ni_syscall), // 130 2015 LINX_(__NR_quotactl, sys_quotactl), // 131 2016 GENX_(__NR_getpgid, sys_getpgid), // 132 2017 GENX_(__NR_fchdir, sys_fchdir), // 133 2018//zz // (__NR_bdflush, sys_bdflush), // 134 */Linux 2019//zz 2020//zz // (__NR_sysfs, sys_sysfs), // 135 SVr4 2021 LINX_(__NR_personality, sys_personality), // 136 2022 GENX_(__NR_afs_syscall, sys_ni_syscall), // 137 2023 LINX_(__NR_setfsuid, sys_setfsuid16), // 138 2024 LINX_(__NR_setfsgid, sys_setfsgid16), // 139 2025 2026 LINXY(__NR__llseek, sys_llseek), // 140 2027 GENXY(__NR_getdents, sys_getdents), // 141 2028 GENX_(__NR__newselect, sys_select), // 142 2029 GENX_(__NR_flock, sys_flock), // 143 2030 GENX_(__NR_msync, sys_msync), // 144 2031 2032 GENXY(__NR_readv, sys_readv), // 145 2033 GENX_(__NR_writev, sys_writev), // 146 2034 GENX_(__NR_getsid, sys_getsid), // 147 2035 GENX_(__NR_fdatasync, sys_fdatasync), // 148 2036 LINXY(__NR__sysctl, sys_sysctl), // 149 2037 2038 GENX_(__NR_mlock, sys_mlock), // 150 2039 GENX_(__NR_munlock, sys_munlock), // 151 2040 GENX_(__NR_mlockall, sys_mlockall), // 152 2041 LINX_(__NR_munlockall, sys_munlockall), // 153 2042 LINXY(__NR_sched_setparam, sys_sched_setparam), // 154 2043 2044 LINXY(__NR_sched_getparam, sys_sched_getparam), // 155 2045 LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156 2046 LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157 2047 LINX_(__NR_sched_yield, sys_sched_yield), // 158 2048 LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159 2049 2050 LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160 2051//zz //LINX?(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161 */* 2052 GENXY(__NR_nanosleep, sys_nanosleep), // 162 2053 GENX_(__NR_mremap, sys_mremap), // 163 2054 LINX_(__NR_setresuid, sys_setresuid16), // 164 2055 2056 LINXY(__NR_getresuid, sys_getresuid16), // 165 2057 PLAXY(__NR_vm86, sys_vm86), // 166 x86/Linux-only 2058 GENX_(__NR_query_module, sys_ni_syscall), // 167 2059 GENXY(__NR_poll, sys_poll), // 168 2060//zz // (__NR_nfsservctl, sys_nfsservctl), // 169 */Linux 2061//zz 2062 LINX_(__NR_setresgid, sys_setresgid16), // 170 2063 LINXY(__NR_getresgid, sys_getresgid16), // 171 2064 LINXY(__NR_prctl, sys_prctl), // 172 2065 PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173 x86/Linux only? 2066 LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174 2067 2068 LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175 2069 LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176 2070 LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 177 2071 LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 178 2072 LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179 2073 2074 GENXY(__NR_pread64, sys_pread64), // 180 2075 GENX_(__NR_pwrite64, sys_pwrite64), // 181 2076 LINX_(__NR_chown, sys_chown16), // 182 2077 GENXY(__NR_getcwd, sys_getcwd), // 183 2078 LINXY(__NR_capget, sys_capget), // 184 2079 2080 LINX_(__NR_capset, sys_capset), // 185 2081 GENXY(__NR_sigaltstack, sys_sigaltstack), // 186 2082 LINXY(__NR_sendfile, sys_sendfile), // 187 2083 GENXY(__NR_getpmsg, sys_getpmsg), // 188 2084 GENX_(__NR_putpmsg, sys_putpmsg), // 189 2085 2086 // Nb: we treat vfork as fork 2087 GENX_(__NR_vfork, sys_fork), // 190 2088 GENXY(__NR_ugetrlimit, sys_getrlimit), // 191 2089 PLAX_(__NR_mmap2, sys_mmap2), // 192 2090 GENX_(__NR_truncate64, sys_truncate64), // 193 2091 GENX_(__NR_ftruncate64, sys_ftruncate64), // 194 2092 2093 PLAXY(__NR_stat64, sys_stat64), // 195 2094 PLAXY(__NR_lstat64, sys_lstat64), // 196 2095 PLAXY(__NR_fstat64, sys_fstat64), // 197 2096 GENX_(__NR_lchown32, sys_lchown), // 198 2097 GENX_(__NR_getuid32, sys_getuid), // 199 2098 2099 GENX_(__NR_getgid32, sys_getgid), // 200 2100 GENX_(__NR_geteuid32, sys_geteuid), // 201 2101 GENX_(__NR_getegid32, sys_getegid), // 202 2102 GENX_(__NR_setreuid32, sys_setreuid), // 203 2103 GENX_(__NR_setregid32, sys_setregid), // 204 2104 2105 GENXY(__NR_getgroups32, sys_getgroups), // 205 2106 GENX_(__NR_setgroups32, sys_setgroups), // 206 2107 GENX_(__NR_fchown32, sys_fchown), // 207 2108 LINX_(__NR_setresuid32, sys_setresuid), // 208 2109 LINXY(__NR_getresuid32, sys_getresuid), // 209 2110 2111 LINX_(__NR_setresgid32, sys_setresgid), // 210 2112 LINXY(__NR_getresgid32, sys_getresgid), // 211 2113 GENX_(__NR_chown32, sys_chown), // 212 2114 GENX_(__NR_setuid32, sys_setuid), // 213 2115 GENX_(__NR_setgid32, sys_setgid), // 214 2116 2117 LINX_(__NR_setfsuid32, sys_setfsuid), // 215 2118 LINX_(__NR_setfsgid32, sys_setfsgid), // 216 2119//zz // (__NR_pivot_root, sys_pivot_root), // 217 */Linux 2120 GENXY(__NR_mincore, sys_mincore), // 218 2121 GENX_(__NR_madvise, sys_madvise), // 219 2122 2123 GENXY(__NR_getdents64, sys_getdents64), // 220 2124 LINXY(__NR_fcntl64, sys_fcntl64), // 221 2125 GENX_(222, sys_ni_syscall), // 222 2126 PLAXY(223, sys_syscall223), // 223 // sys_bproc? 2127 LINX_(__NR_gettid, sys_gettid), // 224 2128 2129//zz // (__NR_readahead, sys_readahead), // 225 */(Linux?) 2130 LINX_(__NR_setxattr, sys_setxattr), // 226 2131 LINX_(__NR_lsetxattr, sys_lsetxattr), // 227 2132 LINX_(__NR_fsetxattr, sys_fsetxattr), // 228 2133 LINXY(__NR_getxattr, sys_getxattr), // 229 2134 2135 LINXY(__NR_lgetxattr, sys_lgetxattr), // 230 2136 LINXY(__NR_fgetxattr, sys_fgetxattr), // 231 2137 LINXY(__NR_listxattr, sys_listxattr), // 232 2138 LINXY(__NR_llistxattr, sys_llistxattr), // 233 2139 LINXY(__NR_flistxattr, sys_flistxattr), // 234 2140 2141 LINX_(__NR_removexattr, sys_removexattr), // 235 2142 LINX_(__NR_lremovexattr, sys_lremovexattr), // 236 2143 LINX_(__NR_fremovexattr, sys_fremovexattr), // 237 2144 LINXY(__NR_tkill, sys_tkill), // 238 */Linux 2145 LINXY(__NR_sendfile64, sys_sendfile64), // 239 2146 2147 LINXY(__NR_futex, sys_futex), // 240 2148 LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 241 2149 LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 242 2150 PLAX_(__NR_set_thread_area, sys_set_thread_area), // 243 2151 PLAX_(__NR_get_thread_area, sys_get_thread_area), // 244 2152 2153 LINXY(__NR_io_setup, sys_io_setup), // 245 2154 LINX_(__NR_io_destroy, sys_io_destroy), // 246 2155 LINXY(__NR_io_getevents, sys_io_getevents), // 247 2156 LINX_(__NR_io_submit, sys_io_submit), // 248 2157 LINXY(__NR_io_cancel, sys_io_cancel), // 249 2158 2159 LINX_(__NR_fadvise64, sys_fadvise64), // 250 */(Linux?) 2160 GENX_(251, sys_ni_syscall), // 251 2161 LINX_(__NR_exit_group, sys_exit_group), // 252 2162 LINXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 253 2163 LINXY(__NR_epoll_create, sys_epoll_create), // 254 2164 2165 LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 255 2166 LINXY(__NR_epoll_wait, sys_epoll_wait), // 256 2167//zz // (__NR_remap_file_pages, sys_remap_file_pages), // 257 */Linux 2168 LINX_(__NR_set_tid_address, sys_set_tid_address), // 258 2169 LINXY(__NR_timer_create, sys_timer_create), // 259 2170 2171 LINXY(__NR_timer_settime, sys_timer_settime), // (timer_create+1) 2172 LINXY(__NR_timer_gettime, sys_timer_gettime), // (timer_create+2) 2173 LINX_(__NR_timer_getoverrun, sys_timer_getoverrun),//(timer_create+3) 2174 LINX_(__NR_timer_delete, sys_timer_delete), // (timer_create+4) 2175 LINX_(__NR_clock_settime, sys_clock_settime), // (timer_create+5) 2176 2177 LINXY(__NR_clock_gettime, sys_clock_gettime), // (timer_create+6) 2178 LINXY(__NR_clock_getres, sys_clock_getres), // (timer_create+7) 2179 LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// (timer_create+8) */* 2180 GENXY(__NR_statfs64, sys_statfs64), // 268 2181 GENXY(__NR_fstatfs64, sys_fstatfs64), // 269 2182 2183 LINX_(__NR_tgkill, sys_tgkill), // 270 */Linux 2184 GENX_(__NR_utimes, sys_utimes), // 271 2185 LINX_(__NR_fadvise64_64, sys_fadvise64_64), // 272 */(Linux?) 2186 GENX_(__NR_vserver, sys_ni_syscall), // 273 2187 LINX_(__NR_mbind, sys_mbind), // 274 ?/? 2188 2189 LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 275 ?/? 2190 LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 276 ?/? 2191 LINXY(__NR_mq_open, sys_mq_open), // 277 2192 LINX_(__NR_mq_unlink, sys_mq_unlink), // (mq_open+1) 2193 LINX_(__NR_mq_timedsend, sys_mq_timedsend), // (mq_open+2) 2194 2195 LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// (mq_open+3) 2196 LINX_(__NR_mq_notify, sys_mq_notify), // (mq_open+4) 2197 LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // (mq_open+5) 2198 GENX_(__NR_sys_kexec_load, sys_ni_syscall), // 283 2199 LINXY(__NR_waitid, sys_waitid), // 284 2200 2201 GENX_(285, sys_ni_syscall), // 285 2202 LINX_(__NR_add_key, sys_add_key), // 286 2203 LINX_(__NR_request_key, sys_request_key), // 287 2204 LINXY(__NR_keyctl, sys_keyctl), // 288 2205 LINX_(__NR_ioprio_set, sys_ioprio_set), // 289 2206 2207 LINX_(__NR_ioprio_get, sys_ioprio_get), // 290 2208 LINX_(__NR_inotify_init, sys_inotify_init), // 291 2209 LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 292 2210 LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 293 2211// LINX_(__NR_migrate_pages, sys_migrate_pages), // 294 2212 2213 LINXY(__NR_openat, sys_openat), // 295 2214 LINX_(__NR_mkdirat, sys_mkdirat), // 296 2215 LINX_(__NR_mknodat, sys_mknodat), // 297 2216 LINX_(__NR_fchownat, sys_fchownat), // 298 2217 LINX_(__NR_futimesat, sys_futimesat), // 299 2218 2219 PLAXY(__NR_fstatat64, sys_fstatat64), // 300 2220 LINX_(__NR_unlinkat, sys_unlinkat), // 301 2221 LINX_(__NR_renameat, sys_renameat), // 302 2222 LINX_(__NR_linkat, sys_linkat), // 303 2223 LINX_(__NR_symlinkat, sys_symlinkat), // 304 2224 2225 LINX_(__NR_readlinkat, sys_readlinkat), // 305 2226 LINX_(__NR_fchmodat, sys_fchmodat), // 306 2227 LINX_(__NR_faccessat, sys_faccessat), // 307 2228 LINX_(__NR_pselect6, sys_pselect6), // 308 2229 LINXY(__NR_ppoll, sys_ppoll), // 309 2230 2231// LINX_(__NR_unshare, sys_unshare), // 310 2232 LINX_(__NR_set_robust_list, sys_set_robust_list), // 311 2233 LINXY(__NR_get_robust_list, sys_get_robust_list), // 312 2234// LINX_(__NR_splice, sys_ni_syscall), // 313 2235 LINX_(__NR_sync_file_range, sys_sync_file_range), // 314 2236 2237// LINX_(__NR_tee, sys_ni_syscall), // 315 2238// LINX_(__NR_vmsplice, sys_ni_syscall), // 316 2239// LINX_(__NR_move_pages, sys_ni_syscall), // 317 2240// LINX_(__NR_getcpu, sys_ni_syscall), // 318 2241 LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 319 2242 2243 LINX_(__NR_utimensat, sys_utimensat), // 320 2244 LINXY(__NR_signalfd, sys_signalfd), // 321 2245 LINXY(__NR_timerfd_create, sys_timerfd_create), // 322 2246 LINX_(__NR_eventfd, sys_eventfd), // 323 2247 //LINX_(__NR_fallocate, sys_fallocate), // 324 2248 2249 LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 325 2250 LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 326 2251 LINXY(__NR_signalfd4, sys_signalfd4), // 327 2252 LINX_(__NR_eventfd2, sys_eventfd2), // 328 2253 LINXY(__NR_epoll_create1, sys_epoll_create1), // 329 2254 2255 // (__NR_dup3, sys_ni_syscall) // 330 2256 LINXY(__NR_pipe2, sys_pipe2), // 331 2257 LINXY(__NR_inotify_init1, sys_inotify_init1), // 332 2258 LINXY(__NR_preadv, sys_preadv), // 333 2259 LINX_(__NR_pwritev, sys_pwritev), // 334 2260 2261 // (__NR_rt_tgsigqueueinfo, sys_ni_syscall) // 335 2262 LINXY(__NR_perf_counter_open, sys_perf_counter_open) // 336 2263}; 2264 2265const UInt ML_(syscall_table_size) = 2266 sizeof(ML_(syscall_table)) / sizeof(ML_(syscall_table)[0]); 2267 2268#endif // defined(VGP_x86_linux) 2269 2270/*--------------------------------------------------------------------*/ 2271/*--- end ---*/ 2272/*--------------------------------------------------------------------*/ 2273