1//===-- tsan_clock_test.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "tsan_clock.h"
14#include "tsan_rtl.h"
15#include "gtest/gtest.h"
16#include <time.h>
17
18namespace __tsan {
19
20ClockCache cache;
21
22TEST(Clock, VectorBasic) {
23  ThreadClock clk(0);
24  ASSERT_EQ(clk.size(), 1U);
25  clk.tick();
26  ASSERT_EQ(clk.size(), 1U);
27  ASSERT_EQ(clk.get(0), 1U);
28  clk.set(3, clk.get(3) + 1);
29  ASSERT_EQ(clk.size(), 4U);
30  ASSERT_EQ(clk.get(0), 1U);
31  ASSERT_EQ(clk.get(1), 0U);
32  ASSERT_EQ(clk.get(2), 0U);
33  ASSERT_EQ(clk.get(3), 1U);
34  clk.set(3, clk.get(3) + 1);
35  ASSERT_EQ(clk.get(3), 2U);
36}
37
38TEST(Clock, ChunkedBasic) {
39  ThreadClock vector(0);
40  SyncClock chunked;
41  ASSERT_EQ(vector.size(), 1U);
42  ASSERT_EQ(chunked.size(), 0U);
43  vector.acquire(&cache, &chunked);
44  ASSERT_EQ(vector.size(), 1U);
45  ASSERT_EQ(chunked.size(), 0U);
46  vector.release(&cache, &chunked);
47  ASSERT_EQ(vector.size(), 1U);
48  ASSERT_EQ(chunked.size(), 1U);
49  vector.acq_rel(&cache, &chunked);
50  ASSERT_EQ(vector.size(), 1U);
51  ASSERT_EQ(chunked.size(), 1U);
52  chunked.Reset(&cache);
53}
54
55TEST(Clock, AcquireRelease) {
56  ThreadClock vector1(100);
57  vector1.tick();
58  SyncClock chunked;
59  vector1.release(&cache, &chunked);
60  ASSERT_EQ(chunked.size(), 101U);
61  ThreadClock vector2(0);
62  vector2.acquire(&cache, &chunked);
63  ASSERT_EQ(vector2.size(), 101U);
64  ASSERT_EQ(vector2.get(0), 0U);
65  ASSERT_EQ(vector2.get(1), 0U);
66  ASSERT_EQ(vector2.get(99), 0U);
67  ASSERT_EQ(vector2.get(100), 1U);
68  chunked.Reset(&cache);
69}
70
71TEST(Clock, RepeatedAcquire) {
72  ThreadClock thr1(1);
73  thr1.tick();
74  ThreadClock thr2(2);
75  thr2.tick();
76
77  SyncClock sync;
78  thr1.ReleaseStore(&cache, &sync);
79
80  thr2.acquire(&cache, &sync);
81  thr2.acquire(&cache, &sync);
82
83  sync.Reset(&cache);
84}
85
86TEST(Clock, ManyThreads) {
87  SyncClock chunked;
88  for (unsigned i = 0; i < 100; i++) {
89    ThreadClock vector(0);
90    vector.tick();
91    vector.set(i, 1);
92    vector.release(&cache, &chunked);
93    ASSERT_EQ(i + 1, chunked.size());
94    vector.acquire(&cache, &chunked);
95    ASSERT_EQ(i + 1, vector.size());
96  }
97
98  for (unsigned i = 0; i < 100; i++)
99    ASSERT_EQ(1U, chunked.get(i));
100
101  ThreadClock vector(1);
102  vector.acquire(&cache, &chunked);
103  ASSERT_EQ(100U, vector.size());
104  for (unsigned i = 0; i < 100; i++)
105    ASSERT_EQ(1U, vector.get(i));
106
107  chunked.Reset(&cache);
108}
109
110TEST(Clock, DifferentSizes) {
111  {
112    ThreadClock vector1(10);
113    vector1.tick();
114    ThreadClock vector2(20);
115    vector2.tick();
116    {
117      SyncClock chunked;
118      vector1.release(&cache, &chunked);
119      ASSERT_EQ(chunked.size(), 11U);
120      vector2.release(&cache, &chunked);
121      ASSERT_EQ(chunked.size(), 21U);
122      chunked.Reset(&cache);
123    }
124    {
125      SyncClock chunked;
126      vector2.release(&cache, &chunked);
127      ASSERT_EQ(chunked.size(), 21U);
128      vector1.release(&cache, &chunked);
129      ASSERT_EQ(chunked.size(), 21U);
130      chunked.Reset(&cache);
131    }
132    {
133      SyncClock chunked;
134      vector1.release(&cache, &chunked);
135      vector2.acquire(&cache, &chunked);
136      ASSERT_EQ(vector2.size(), 21U);
137      chunked.Reset(&cache);
138    }
139    {
140      SyncClock chunked;
141      vector2.release(&cache, &chunked);
142      vector1.acquire(&cache, &chunked);
143      ASSERT_EQ(vector1.size(), 21U);
144      chunked.Reset(&cache);
145    }
146  }
147}
148
149TEST(Clock, Growth) {
150  {
151    ThreadClock vector(10);
152    vector.tick();
153    vector.set(5, 42);
154    SyncClock sync;
155    vector.release(&cache, &sync);
156    ASSERT_EQ(sync.size(), 11U);
157    ASSERT_EQ(sync.get(0), 0ULL);
158    ASSERT_EQ(sync.get(1), 0ULL);
159    ASSERT_EQ(sync.get(5), 42ULL);
160    ASSERT_EQ(sync.get(9), 0ULL);
161    ASSERT_EQ(sync.get(10), 1ULL);
162    sync.Reset(&cache);
163  }
164  {
165    ThreadClock vector1(10);
166    vector1.tick();
167    ThreadClock vector2(20);
168    vector2.tick();
169    SyncClock sync;
170    vector1.release(&cache, &sync);
171    vector2.release(&cache, &sync);
172    ASSERT_EQ(sync.size(), 21U);
173    ASSERT_EQ(sync.get(0), 0ULL);
174    ASSERT_EQ(sync.get(10), 1ULL);
175    ASSERT_EQ(sync.get(19), 0ULL);
176    ASSERT_EQ(sync.get(20), 1ULL);
177    sync.Reset(&cache);
178  }
179  {
180    ThreadClock vector(100);
181    vector.tick();
182    vector.set(5, 42);
183    vector.set(90, 84);
184    SyncClock sync;
185    vector.release(&cache, &sync);
186    ASSERT_EQ(sync.size(), 101U);
187    ASSERT_EQ(sync.get(0), 0ULL);
188    ASSERT_EQ(sync.get(1), 0ULL);
189    ASSERT_EQ(sync.get(5), 42ULL);
190    ASSERT_EQ(sync.get(60), 0ULL);
191    ASSERT_EQ(sync.get(70), 0ULL);
192    ASSERT_EQ(sync.get(90), 84ULL);
193    ASSERT_EQ(sync.get(99), 0ULL);
194    ASSERT_EQ(sync.get(100), 1ULL);
195    sync.Reset(&cache);
196  }
197  {
198    ThreadClock vector1(10);
199    vector1.tick();
200    ThreadClock vector2(100);
201    vector2.tick();
202    SyncClock sync;
203    vector1.release(&cache, &sync);
204    vector2.release(&cache, &sync);
205    ASSERT_EQ(sync.size(), 101U);
206    ASSERT_EQ(sync.get(0), 0ULL);
207    ASSERT_EQ(sync.get(10), 1ULL);
208    ASSERT_EQ(sync.get(99), 0ULL);
209    ASSERT_EQ(sync.get(100), 1ULL);
210    sync.Reset(&cache);
211  }
212}
213
214const uptr kThreads = 4;
215const uptr kClocks = 4;
216
217// SimpleSyncClock and SimpleThreadClock implement the same thing as
218// SyncClock and ThreadClock, but in a very simple way.
219struct SimpleSyncClock {
220  u64 clock[kThreads];
221  uptr size;
222
223  SimpleSyncClock() {
224    Reset();
225  }
226
227  void Reset() {
228    size = 0;
229    for (uptr i = 0; i < kThreads; i++)
230      clock[i] = 0;
231  }
232
233  bool verify(const SyncClock *other) const {
234    for (uptr i = 0; i < min(size, other->size()); i++) {
235      if (clock[i] != other->get(i))
236        return false;
237    }
238    for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
239      if (i < size && clock[i] != 0)
240        return false;
241      if (i < other->size() && other->get(i) != 0)
242        return false;
243    }
244    return true;
245  }
246};
247
248struct SimpleThreadClock {
249  u64 clock[kThreads];
250  uptr size;
251  unsigned tid;
252
253  explicit SimpleThreadClock(unsigned tid) {
254    this->tid = tid;
255    size = tid + 1;
256    for (uptr i = 0; i < kThreads; i++)
257      clock[i] = 0;
258  }
259
260  void tick() {
261    clock[tid]++;
262  }
263
264  void acquire(const SimpleSyncClock *src) {
265    if (size < src->size)
266      size = src->size;
267    for (uptr i = 0; i < kThreads; i++)
268      clock[i] = max(clock[i], src->clock[i]);
269  }
270
271  void release(SimpleSyncClock *dst) const {
272    if (dst->size < size)
273      dst->size = size;
274    for (uptr i = 0; i < kThreads; i++)
275      dst->clock[i] = max(dst->clock[i], clock[i]);
276  }
277
278  void acq_rel(SimpleSyncClock *dst) {
279    acquire(dst);
280    release(dst);
281  }
282
283  void ReleaseStore(SimpleSyncClock *dst) const {
284    if (dst->size < size)
285      dst->size = size;
286    for (uptr i = 0; i < kThreads; i++)
287      dst->clock[i] = clock[i];
288  }
289
290  bool verify(const ThreadClock *other) const {
291    for (uptr i = 0; i < min(size, other->size()); i++) {
292      if (clock[i] != other->get(i))
293        return false;
294    }
295    for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
296      if (i < size && clock[i] != 0)
297        return false;
298      if (i < other->size() && other->get(i) != 0)
299        return false;
300    }
301    return true;
302  }
303};
304
305static bool ClockFuzzer(bool printing) {
306  // Create kThreads thread clocks.
307  SimpleThreadClock *thr0[kThreads];
308  ThreadClock *thr1[kThreads];
309  unsigned reused[kThreads];
310  for (unsigned i = 0; i < kThreads; i++) {
311    reused[i] = 0;
312    thr0[i] = new SimpleThreadClock(i);
313    thr1[i] = new ThreadClock(i, reused[i]);
314  }
315
316  // Create kClocks sync clocks.
317  SimpleSyncClock *sync0[kClocks];
318  SyncClock *sync1[kClocks];
319  for (unsigned i = 0; i < kClocks; i++) {
320    sync0[i] = new SimpleSyncClock();
321    sync1[i] = new SyncClock();
322  }
323
324  // Do N random operations (acquire, release, etc) and compare results
325  // for SimpleThread/SyncClock and real Thread/SyncClock.
326  for (int i = 0; i < 10000; i++) {
327    unsigned tid = rand() % kThreads;
328    unsigned cid = rand() % kClocks;
329    thr0[tid]->tick();
330    thr1[tid]->tick();
331
332    switch (rand() % 6) {
333    case 0:
334      if (printing)
335        printf("acquire thr%d <- clk%d\n", tid, cid);
336      thr0[tid]->acquire(sync0[cid]);
337      thr1[tid]->acquire(&cache, sync1[cid]);
338      break;
339    case 1:
340      if (printing)
341        printf("release thr%d -> clk%d\n", tid, cid);
342      thr0[tid]->release(sync0[cid]);
343      thr1[tid]->release(&cache, sync1[cid]);
344      break;
345    case 2:
346      if (printing)
347        printf("acq_rel thr%d <> clk%d\n", tid, cid);
348      thr0[tid]->acq_rel(sync0[cid]);
349      thr1[tid]->acq_rel(&cache, sync1[cid]);
350      break;
351    case 3:
352      if (printing)
353        printf("rel_str thr%d >> clk%d\n", tid, cid);
354      thr0[tid]->ReleaseStore(sync0[cid]);
355      thr1[tid]->ReleaseStore(&cache, sync1[cid]);
356      break;
357    case 4:
358      if (printing)
359        printf("reset clk%d\n", cid);
360      sync0[cid]->Reset();
361      sync1[cid]->Reset(&cache);
362      break;
363    case 5:
364      if (printing)
365        printf("reset thr%d\n", tid);
366      u64 epoch = thr0[tid]->clock[tid] + 1;
367      reused[tid]++;
368      delete thr0[tid];
369      thr0[tid] = new SimpleThreadClock(tid);
370      thr0[tid]->clock[tid] = epoch;
371      delete thr1[tid];
372      thr1[tid] = new ThreadClock(tid, reused[tid]);
373      thr1[tid]->set(epoch);
374      break;
375    }
376
377    if (printing) {
378      for (unsigned i = 0; i < kThreads; i++) {
379        printf("thr%d: ", i);
380        thr1[i]->DebugDump(printf);
381        printf("\n");
382      }
383      for (unsigned i = 0; i < kClocks; i++) {
384        printf("clk%d: ", i);
385        sync1[i]->DebugDump(printf);
386        printf("\n");
387      }
388
389      printf("\n");
390    }
391
392    if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
393      if (!printing)
394        return false;
395      printf("differs with model:\n");
396      for (unsigned i = 0; i < kThreads; i++) {
397        printf("thr%d: clock=[", i);
398        for (uptr j = 0; j < thr0[i]->size; j++)
399          printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
400        printf("]\n");
401      }
402      for (unsigned i = 0; i < kClocks; i++) {
403        printf("clk%d: clock=[", i);
404        for (uptr j = 0; j < sync0[i]->size; j++)
405          printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
406        printf("]\n");
407      }
408      return false;
409    }
410  }
411
412  for (unsigned i = 0; i < kClocks; i++) {
413    sync1[i]->Reset(&cache);
414  }
415  return true;
416}
417
418TEST(Clock, Fuzzer) {
419  timespec ts;
420  clock_gettime(CLOCK_MONOTONIC, &ts);
421  int seed = ts.tv_sec + ts.tv_nsec;
422  printf("seed=%d\n", seed);
423  srand(seed);
424  if (!ClockFuzzer(false)) {
425    // Redo the test with the same seed, but logging operations.
426    srand(seed);
427    ClockFuzzer(true);
428    ASSERT_TRUE(false);
429  }
430}
431
432}  // namespace __tsan
433