1//===-- tsan_fd.cc --------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "tsan_fd.h"
15#include "tsan_rtl.h"
16#include <sanitizer_common/sanitizer_atomic.h>
17
18namespace __tsan {
19
20const int kTableSizeL1 = 1024;
21const int kTableSizeL2 = 1024;
22const int kTableSize = kTableSizeL1 * kTableSizeL2;
23
24struct FdSync {
25  atomic_uint64_t rc;
26};
27
28struct FdDesc {
29  FdSync *sync;
30  int creation_tid;
31  u32 creation_stack;
32};
33
34struct FdContext {
35  atomic_uintptr_t tab[kTableSizeL1];
36  // Addresses used for synchronization.
37  FdSync globsync;
38  FdSync filesync;
39  FdSync socksync;
40  u64 connectsync;
41};
42
43static FdContext fdctx;
44
45static bool bogusfd(int fd) {
46  // Apparently a bogus fd value.
47  return fd < 0 || fd >= kTableSize;
48}
49
50static FdSync *allocsync(ThreadState *thr, uptr pc) {
51  FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
52      false);
53  atomic_store(&s->rc, 1, memory_order_relaxed);
54  return s;
55}
56
57static FdSync *ref(FdSync *s) {
58  if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
59    atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
60  return s;
61}
62
63static void unref(ThreadState *thr, uptr pc, FdSync *s) {
64  if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
65    if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
66      CHECK_NE(s, &fdctx.globsync);
67      CHECK_NE(s, &fdctx.filesync);
68      CHECK_NE(s, &fdctx.socksync);
69      user_free(thr, pc, s, false);
70    }
71  }
72}
73
74static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
75  CHECK_GE(fd, 0);
76  CHECK_LT(fd, kTableSize);
77  atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
78  uptr l1 = atomic_load(pl1, memory_order_consume);
79  if (l1 == 0) {
80    uptr size = kTableSizeL2 * sizeof(FdDesc);
81    // We need this to reside in user memory to properly catch races on it.
82    void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
83    internal_memset(p, 0, size);
84    MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
85    if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
86      l1 = (uptr)p;
87    else
88      user_free(thr, pc, p, false);
89  }
90  return &((FdDesc*)l1)[fd % kTableSizeL2];  // NOLINT
91}
92
93// pd must be already ref'ed.
94static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
95    bool write = true) {
96  FdDesc *d = fddesc(thr, pc, fd);
97  // As a matter of fact, we don't intercept all close calls.
98  // See e.g. libc __res_iclose().
99  if (d->sync) {
100    unref(thr, pc, d->sync);
101    d->sync = 0;
102  }
103  if (flags()->io_sync == 0) {
104    unref(thr, pc, s);
105  } else if (flags()->io_sync == 1) {
106    d->sync = s;
107  } else if (flags()->io_sync == 2) {
108    unref(thr, pc, s);
109    d->sync = &fdctx.globsync;
110  }
111  d->creation_tid = thr->tid;
112  d->creation_stack = CurrentStackId(thr, pc);
113  if (write) {
114    // To catch races between fd usage and open.
115    MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
116  } else {
117    // See the dup-related comment in FdClose.
118    MemoryRead(thr, pc, (uptr)d, kSizeLog8);
119  }
120}
121
122void FdInit() {
123  atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
124  atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
125  atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
126}
127
128void FdOnFork(ThreadState *thr, uptr pc) {
129  // On fork() we need to reset all fd's, because the child is going
130  // close all them, and that will cause races between previous read/write
131  // and the close.
132  for (int l1 = 0; l1 < kTableSizeL1; l1++) {
133    FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
134    if (tab == 0)
135      break;
136    for (int l2 = 0; l2 < kTableSizeL2; l2++) {
137      FdDesc *d = &tab[l2];
138      MemoryResetRange(thr, pc, (uptr)d, 8);
139    }
140  }
141}
142
143bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
144  for (int l1 = 0; l1 < kTableSizeL1; l1++) {
145    FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
146    if (tab == 0)
147      break;
148    if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
149      int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
150      FdDesc *d = &tab[l2];
151      *fd = l1 * kTableSizeL1 + l2;
152      *tid = d->creation_tid;
153      *stack = d->creation_stack;
154      return true;
155    }
156  }
157  return false;
158}
159
160void FdAcquire(ThreadState *thr, uptr pc, int fd) {
161  if (bogusfd(fd))
162    return;
163  FdDesc *d = fddesc(thr, pc, fd);
164  FdSync *s = d->sync;
165  DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
166  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
167  if (s)
168    Acquire(thr, pc, (uptr)s);
169}
170
171void FdRelease(ThreadState *thr, uptr pc, int fd) {
172  if (bogusfd(fd))
173    return;
174  FdDesc *d = fddesc(thr, pc, fd);
175  FdSync *s = d->sync;
176  DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
177  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
178  if (s)
179    Release(thr, pc, (uptr)s);
180}
181
182void FdAccess(ThreadState *thr, uptr pc, int fd) {
183  DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
184  if (bogusfd(fd))
185    return;
186  FdDesc *d = fddesc(thr, pc, fd);
187  MemoryRead(thr, pc, (uptr)d, kSizeLog8);
188}
189
190void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
191  DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
192  if (bogusfd(fd))
193    return;
194  FdDesc *d = fddesc(thr, pc, fd);
195  if (write) {
196    // To catch races between fd usage and close.
197    MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
198  } else {
199    // This path is used only by dup2/dup3 calls.
200    // We do read instead of write because there is a number of legitimate
201    // cases where write would lead to false positives:
202    // 1. Some software dups a closed pipe in place of a socket before closing
203    //    the socket (to prevent races actually).
204    // 2. Some daemons dup /dev/null in place of stdin/stdout.
205    // On the other hand we have not seen cases when write here catches real
206    // bugs.
207    MemoryRead(thr, pc, (uptr)d, kSizeLog8);
208  }
209  // We need to clear it, because if we do not intercept any call out there
210  // that creates fd, we will hit false postives.
211  MemoryResetRange(thr, pc, (uptr)d, 8);
212  unref(thr, pc, d->sync);
213  d->sync = 0;
214  d->creation_tid = 0;
215  d->creation_stack = 0;
216}
217
218void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
219  DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
220  if (bogusfd(fd))
221    return;
222  init(thr, pc, fd, &fdctx.filesync);
223}
224
225void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
226  DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
227  if (bogusfd(oldfd) || bogusfd(newfd))
228    return;
229  // Ignore the case when user dups not yet connected socket.
230  FdDesc *od = fddesc(thr, pc, oldfd);
231  MemoryRead(thr, pc, (uptr)od, kSizeLog8);
232  FdClose(thr, pc, newfd, write);
233  init(thr, pc, newfd, ref(od->sync), write);
234}
235
236void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
237  DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
238  FdSync *s = allocsync(thr, pc);
239  init(thr, pc, rfd, ref(s));
240  init(thr, pc, wfd, ref(s));
241  unref(thr, pc, s);
242}
243
244void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
245  DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
246  if (bogusfd(fd))
247    return;
248  init(thr, pc, fd, allocsync(thr, pc));
249}
250
251void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
252  DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
253  if (bogusfd(fd))
254    return;
255  init(thr, pc, fd, 0);
256}
257
258void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
259  DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
260  if (bogusfd(fd))
261    return;
262  init(thr, pc, fd, 0);
263}
264
265void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
266  DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
267  if (bogusfd(fd))
268    return;
269  init(thr, pc, fd, allocsync(thr, pc));
270}
271
272void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
273  DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
274  if (bogusfd(fd))
275    return;
276  // It can be a UDP socket.
277  init(thr, pc, fd, &fdctx.socksync);
278}
279
280void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
281  DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
282  if (bogusfd(fd))
283    return;
284  // Synchronize connect->accept.
285  Acquire(thr, pc, (uptr)&fdctx.connectsync);
286  init(thr, pc, newfd, &fdctx.socksync);
287}
288
289void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
290  DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
291  if (bogusfd(fd))
292    return;
293  // Synchronize connect->accept.
294  Release(thr, pc, (uptr)&fdctx.connectsync);
295}
296
297void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
298  DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
299  if (bogusfd(fd))
300    return;
301  init(thr, pc, fd, &fdctx.socksync);
302}
303
304uptr File2addr(const char *path) {
305  (void)path;
306  static u64 addr;
307  return (uptr)&addr;
308}
309
310uptr Dir2addr(const char *path) {
311  (void)path;
312  static u64 addr;
313  return (uptr)&addr;
314}
315
316}  //  namespace __tsan
317