1//===-- sanitizer_win.cc --------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements windows-specific functions from
12// sanitizer_libc.h.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_platform.h"
16#if SANITIZER_WINDOWS
17
18#define WIN32_LEAN_AND_MEAN
19#define NOGDI
20#include <stdlib.h>
21#include <io.h>
22#include <windows.h>
23
24#include "sanitizer_common.h"
25#include "sanitizer_libc.h"
26#include "sanitizer_mutex.h"
27#include "sanitizer_placement_new.h"
28#include "sanitizer_stacktrace.h"
29
30namespace __sanitizer {
31
32#include "sanitizer_syscall_generic.inc"
33
34// --------------------- sanitizer_common.h
35uptr GetPageSize() {
36  return 1U << 14;  // FIXME: is this configurable?
37}
38
39uptr GetMmapGranularity() {
40  return 1U << 16;  // FIXME: is this configurable?
41}
42
43uptr GetMaxVirtualAddress() {
44  SYSTEM_INFO si;
45  GetSystemInfo(&si);
46  return (uptr)si.lpMaximumApplicationAddress;
47}
48
49bool FileExists(const char *filename) {
50  UNIMPLEMENTED();
51}
52
53uptr internal_getpid() {
54  return GetProcessId(GetCurrentProcess());
55}
56
57// In contrast to POSIX, on Windows GetCurrentThreadId()
58// returns a system-unique identifier.
59uptr GetTid() {
60  return GetCurrentThreadId();
61}
62
63uptr GetThreadSelf() {
64  return GetTid();
65}
66
67void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
68                                uptr *stack_bottom) {
69  CHECK(stack_top);
70  CHECK(stack_bottom);
71  MEMORY_BASIC_INFORMATION mbi;
72  CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
73  // FIXME: is it possible for the stack to not be a single allocation?
74  // Are these values what ASan expects to get (reserved, not committed;
75  // including stack guard page) ?
76  *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
77  *stack_bottom = (uptr)mbi.AllocationBase;
78}
79
80void *MmapOrDie(uptr size, const char *mem_type) {
81  void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
82  if (rv == 0) {
83    Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
84           size, size, mem_type);
85    CHECK("unable to mmap" && 0);
86  }
87  return rv;
88}
89
90void UnmapOrDie(void *addr, uptr size) {
91  if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
92    Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
93           size, size, addr);
94    CHECK("unable to unmap" && 0);
95  }
96}
97
98void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
99  // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
100  // but on Win64 it does.
101  void *p = VirtualAlloc((LPVOID)fixed_addr, size,
102      MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
103  if (p == 0)
104    Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at %p (%d)\n",
105           size, size, fixed_addr, GetLastError());
106  return p;
107}
108
109void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
110  return MmapFixedNoReserve(fixed_addr, size);
111}
112
113void *Mprotect(uptr fixed_addr, uptr size) {
114  return VirtualAlloc((LPVOID)fixed_addr, size,
115                      MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
116}
117
118void FlushUnneededShadowMemory(uptr addr, uptr size) {
119  // This is almost useless on 32-bits.
120  // FIXME: add madvice-analog when we move to 64-bits.
121}
122
123bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
124  // FIXME: shall we do anything here on Windows?
125  return true;
126}
127
128void *MapFileToMemory(const char *file_name, uptr *buff_size) {
129  UNIMPLEMENTED();
130}
131
132static const int kMaxEnvNameLength = 128;
133static const DWORD kMaxEnvValueLength = 32767;
134
135namespace {
136
137struct EnvVariable {
138  char name[kMaxEnvNameLength];
139  char value[kMaxEnvValueLength];
140};
141
142}  // namespace
143
144static const int kEnvVariables = 5;
145static EnvVariable env_vars[kEnvVariables];
146static int num_env_vars;
147
148const char *GetEnv(const char *name) {
149  // Note: this implementation caches the values of the environment variables
150  // and limits their quantity.
151  for (int i = 0; i < num_env_vars; i++) {
152    if (0 == internal_strcmp(name, env_vars[i].name))
153      return env_vars[i].value;
154  }
155  CHECK_LT(num_env_vars, kEnvVariables);
156  DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
157                                     kMaxEnvValueLength);
158  if (rv > 0 && rv < kMaxEnvValueLength) {
159    CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
160    internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
161    num_env_vars++;
162    return env_vars[num_env_vars - 1].value;
163  }
164  return 0;
165}
166
167const char *GetPwd() {
168  UNIMPLEMENTED();
169}
170
171u32 GetUid() {
172  UNIMPLEMENTED();
173}
174
175void DumpProcessMap() {
176  UNIMPLEMENTED();
177}
178
179void DisableCoreDumper() {
180  UNIMPLEMENTED();
181}
182
183void ReExec() {
184  UNIMPLEMENTED();
185}
186
187void PrepareForSandboxing() {
188  // Nothing here for now.
189}
190
191bool StackSizeIsUnlimited() {
192  UNIMPLEMENTED();
193}
194
195void SetStackSizeLimitInBytes(uptr limit) {
196  UNIMPLEMENTED();
197}
198
199void SleepForSeconds(int seconds) {
200  Sleep(seconds * 1000);
201}
202
203void SleepForMillis(int millis) {
204  Sleep(millis);
205}
206
207u64 NanoTime() {
208  return 0;
209}
210
211void Abort() {
212  abort();
213  _exit(-1);  // abort is not NORETURN on Windows.
214}
215
216#ifndef SANITIZER_GO
217int Atexit(void (*function)(void)) {
218  return atexit(function);
219}
220#endif
221
222// ------------------ sanitizer_libc.h
223uptr internal_mmap(void *addr, uptr length, int prot, int flags,
224                   int fd, u64 offset) {
225  UNIMPLEMENTED();
226}
227
228uptr internal_munmap(void *addr, uptr length) {
229  UNIMPLEMENTED();
230}
231
232uptr internal_close(fd_t fd) {
233  UNIMPLEMENTED();
234}
235
236int internal_isatty(fd_t fd) {
237  return _isatty(fd);
238}
239
240uptr internal_open(const char *filename, int flags) {
241  UNIMPLEMENTED();
242}
243
244uptr internal_open(const char *filename, int flags, u32 mode) {
245  UNIMPLEMENTED();
246}
247
248uptr OpenFile(const char *filename, bool write) {
249  UNIMPLEMENTED();
250}
251
252uptr internal_read(fd_t fd, void *buf, uptr count) {
253  UNIMPLEMENTED();
254}
255
256uptr internal_write(fd_t fd, const void *buf, uptr count) {
257  if (fd != kStderrFd)
258    UNIMPLEMENTED();
259  HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
260  if (err == 0)
261    return 0;  // FIXME: this might not work on some apps.
262  DWORD ret;
263  if (!WriteFile(err, buf, count, &ret, 0))
264    return 0;
265  return ret;
266}
267
268uptr internal_stat(const char *path, void *buf) {
269  UNIMPLEMENTED();
270}
271
272uptr internal_lstat(const char *path, void *buf) {
273  UNIMPLEMENTED();
274}
275
276uptr internal_fstat(fd_t fd, void *buf) {
277  UNIMPLEMENTED();
278}
279
280uptr internal_filesize(fd_t fd) {
281  UNIMPLEMENTED();
282}
283
284uptr internal_dup2(int oldfd, int newfd) {
285  UNIMPLEMENTED();
286}
287
288uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
289  UNIMPLEMENTED();
290}
291
292uptr internal_sched_yield() {
293  Sleep(0);
294  return 0;
295}
296
297void internal__exit(int exitcode) {
298  _exit(exitcode);
299}
300
301// ---------------------- BlockingMutex ---------------- {{{1
302const uptr LOCK_UNINITIALIZED = 0;
303const uptr LOCK_READY = (uptr)-1;
304
305BlockingMutex::BlockingMutex(LinkerInitialized li) {
306  // FIXME: see comments in BlockingMutex::Lock() for the details.
307  CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
308
309  CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
310  InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
311  owner_ = LOCK_READY;
312}
313
314BlockingMutex::BlockingMutex() {
315  CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
316  InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
317  owner_ = LOCK_READY;
318}
319
320void BlockingMutex::Lock() {
321  if (owner_ == LOCK_UNINITIALIZED) {
322    // FIXME: hm, global BlockingMutex objects are not initialized?!?
323    // This might be a side effect of the clang+cl+link Frankenbuild...
324    new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
325
326    // FIXME: If it turns out the linker doesn't invoke our
327    // constructors, we should probably manually Lock/Unlock all the global
328    // locks while we're starting in one thread to avoid double-init races.
329  }
330  EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
331  CHECK_EQ(owner_, LOCK_READY);
332  owner_ = GetThreadSelf();
333}
334
335void BlockingMutex::Unlock() {
336  CHECK_EQ(owner_, GetThreadSelf());
337  owner_ = LOCK_READY;
338  LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
339}
340
341void BlockingMutex::CheckLocked() {
342  CHECK_EQ(owner_, GetThreadSelf());
343}
344
345uptr GetTlsSize() {
346  return 0;
347}
348
349void InitTlsSize() {
350}
351
352void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
353                          uptr *tls_addr, uptr *tls_size) {
354#ifdef SANITIZER_GO
355  *stk_addr = 0;
356  *stk_size = 0;
357  *tls_addr = 0;
358  *tls_size = 0;
359#else
360  uptr stack_top, stack_bottom;
361  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
362  *stk_addr = stack_bottom;
363  *stk_size = stack_top - stack_bottom;
364  *tls_addr = 0;
365  *tls_size = 0;
366#endif
367}
368
369void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
370                   uptr stack_top, uptr stack_bottom, bool fast) {
371  (void)fast;
372  (void)stack_top;
373  (void)stack_bottom;
374  stack->max_size = max_s;
375  void *tmp[kStackTraceMax];
376
377  // FIXME: CaptureStackBackTrace might be too slow for us.
378  // FIXME: Compare with StackWalk64.
379  // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
380  uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0);
381  uptr offset = 0;
382  // Skip the RTL frames by searching for the PC in the stacktrace.
383  // FIXME: this doesn't work well for the malloc/free stacks yet.
384  for (uptr i = 0; i < cs_ret; i++) {
385    if (pc != (uptr)tmp[i])
386      continue;
387    offset = i;
388    break;
389  }
390
391  stack->size = cs_ret - offset;
392  for (uptr i = 0; i < stack->size; i++)
393    stack->trace[i] = (uptr)tmp[i + offset];
394}
395
396}  // namespace __sanitizer
397
398#endif  // _WIN32
399