1/* ThreadSanitizer
2 * Copyright (c) 2011, Google Inc. All rights reserved.
3 * Author: Dmitry Vyukov (dvyukov)
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 *     * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *     * Neither the name of Google Inc. nor the names of its
12 * contributors may be used to endorse or promote products derived from
13 * this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include "ts_util.h"
29#include "ts_atomic_int.h"
30
31
32char const* tsan_atomic_to_str(tsan_memory_order mo) {
33  switch (mo) {
34    case tsan_memory_order_invalid: return "invalid";
35    case tsan_memory_order_natomic: return "natomic";
36    case tsan_memory_order_relaxed: return "relaxed";
37    case tsan_memory_order_consume: return "consume";
38    case tsan_memory_order_acquire: return "acquire";
39    case tsan_memory_order_release: return "release";
40    case tsan_memory_order_acq_rel: return "acq_rel";
41    case tsan_memory_order_seq_cst: return "seq_cst";
42    default: return "-------";
43  }
44}
45
46
47char const* tsan_atomic_to_str(tsan_atomic_op op) {
48  switch (op) {
49    case tsan_atomic_op_invalid: return "invalid";
50    case tsan_atomic_op_fence: return "fence";
51    case tsan_atomic_op_load: return "load";
52    case tsan_atomic_op_store: return "store";
53    case tsan_atomic_op_exchange: return "exchange";
54    case tsan_atomic_op_fetch_add: return "fetch_add";
55    case tsan_atomic_op_fetch_sub: return "fetch_sub";
56    case tsan_atomic_op_fetch_and: return "fetch_and";
57    case tsan_atomic_op_fetch_xor: return "fetch_xor";
58    case tsan_atomic_op_fetch_or: return "fetch_or";
59    case tsan_atomic_op_compare_exchange_weak: return "compare_exchange_weak";
60    case tsan_atomic_op_compare_exchange_strong:
61      return "compare_exchange_strong";
62    default: return "---";
63  }
64}
65
66
67bool tsan_atomic_is_acquire(tsan_memory_order mo) {
68  return !!(mo & (tsan_memory_order_consume
69      | tsan_memory_order_acquire
70      | tsan_memory_order_acq_rel
71      | tsan_memory_order_seq_cst));
72}
73
74
75bool tsan_atomic_is_release(tsan_memory_order mo) {
76  return !!(mo & (tsan_memory_order_release
77      | tsan_memory_order_acq_rel
78      | tsan_memory_order_seq_cst));
79}
80
81
82bool tsan_atomic_is_rmw(tsan_atomic_op op) {
83  return !!(op & (tsan_atomic_op_exchange
84      | tsan_atomic_op_fetch_add
85      | tsan_atomic_op_fetch_sub
86      | tsan_atomic_op_fetch_and
87      | tsan_atomic_op_fetch_xor
88      | tsan_atomic_op_fetch_or
89      | tsan_atomic_op_compare_exchange_weak
90      | tsan_atomic_op_compare_exchange_strong));
91}
92
93
94void tsan_atomic_verify(tsan_atomic_op op,
95                        tsan_memory_order mo,
96                        tsan_memory_order fail_mo,
97                        size_t size,
98                        void volatile* a) {
99  CHECK(size == 1 || size == 2 || size == 4 || size == 8);
100  CHECK((((uintptr_t)a) % size) == 0);
101
102  if (op == tsan_atomic_op_load) {
103    CHECK(mo & (tsan_memory_order_natomic
104        | tsan_memory_order_relaxed
105        | tsan_memory_order_consume
106        | tsan_memory_order_acquire
107        | tsan_memory_order_seq_cst));
108  } else if (op == tsan_atomic_op_store) {
109    CHECK(mo & (tsan_memory_order_natomic
110        | tsan_memory_order_relaxed
111        | tsan_memory_order_release
112        | tsan_memory_order_seq_cst));
113  } else if (op == tsan_atomic_op_fence) {
114    CHECK(mo & (tsan_memory_order_consume
115        | tsan_memory_order_acquire
116        | tsan_memory_order_release
117        | tsan_memory_order_acq_rel
118        | tsan_memory_order_seq_cst));
119  } else if (op & (tsan_atomic_op_exchange
120        | tsan_atomic_op_fetch_add
121        | tsan_atomic_op_fetch_sub
122        | tsan_atomic_op_fetch_and
123        | tsan_atomic_op_fetch_xor
124        | tsan_atomic_op_fetch_or
125        | tsan_atomic_op_compare_exchange_weak
126        | tsan_atomic_op_compare_exchange_strong)) {
127    CHECK(mo & (tsan_memory_order_relaxed
128        | tsan_memory_order_consume
129        | tsan_memory_order_acquire
130        | tsan_memory_order_release
131        | tsan_memory_order_acq_rel
132        | tsan_memory_order_seq_cst));
133  } else {
134    CHECK("unknown tsan_atomic_op" == 0);
135  }
136}
137
138
139#if defined(__i386__)
140# define __x86__
141#elif defined(__x86_64__)
142# define __x86__
143#endif
144
145#if defined(__GNUC__) && defined(__x86_64__)
146uint64_t tsan_atomic_do_op(tsan_atomic_op op,
147                           tsan_memory_order mo,
148                           tsan_memory_order fail_mo,
149                           size_t size,
150                           void volatile* a,
151                           uint64_t v,
152                           uint64_t cmp,
153                           uint64_t* newv,
154                           uint64_t* prev) {
155  *newv = v;
156  if (op != tsan_atomic_op_fence) {
157    if (size == 1) {
158      *prev = *(uint8_t volatile*)a;
159    } else if (size == 2) {
160      *prev =  *(uint16_t volatile*)a;
161    } else if (size == 4) {
162      *prev =  *(uint32_t volatile*)a;
163    } else if (size == 8) {
164      *prev =  *(uint64_t volatile*)a;
165    }
166  }
167
168  if (op == tsan_atomic_op_load) {
169    return *prev;
170
171  } else if (op == tsan_atomic_op_store) {
172    if (mo == tsan_memory_order_seq_cst) {
173      if (size == 1) {
174        uint8_t vv = (uint8_t)v;
175        __asm__ __volatile__ ("xchgb %1, %0"
176            : "=r" (vv) : "m" (*(uint8_t volatile*)a), "0" (vv));
177        *prev = vv;
178      } else if (size == 2) {
179        uint16_t vv = (uint16_t)v;
180        __asm__ __volatile__ ("xchgw %1, %0"
181            : "=r" (vv) : "m" (*(uint16_t volatile*)a), "0" (vv));
182        *prev = vv;
183      } else if (size == 4) {
184        uint32_t vv = (uint32_t)v;
185        __asm__ __volatile__ ("xchgl %1, %0"
186            : "=r" (vv) : "m" (*(uint32_t volatile*)a), "0" (vv));
187        *prev = vv;
188      } else if (size == 8) {
189#ifdef __x86_64__
190        uint64_t vv = (uint64_t)v;
191        __asm__ __volatile__ ("xchgq %1, %0"
192            : "=r" (vv) : "m" (*(uint64_t volatile*)a), "0" (vv));
193        *prev = vv;
194#else
195#error "IMPLEMENT ME, PLZ"
196        //uint64_t cmp = *a;
197        //!!!while (!tsan_atomic64_compare_exchange_strong(a, &cmp, v, mo, mo))
198        //!!! {}
199#endif
200      }
201    } else {
202      if (size == 1) {
203        *(uint8_t volatile*)a = v;
204      } else if (size == 2) {
205        *(uint16_t volatile*)a = v;
206      } else if (size == 4) {
207        *(uint32_t volatile*)a = v;
208      } else if (size == 8) {
209        *(uint64_t volatile*)a = v;
210      }
211    }
212    return 0;
213
214  } else if (op == tsan_atomic_op_exchange) {
215    if (size == 1) {
216      uint8_t vv = (uint8_t)v;
217      __asm__ __volatile__ ("xchgb %1, %0"
218          : "=r" (vv) : "m" (*(uint8_t volatile*)a), "0" (vv));
219      *prev = vv;
220      return vv;
221    } else if (size == 2) {
222      uint16_t vv = (uint16_t)v;
223      __asm__ __volatile__ ("xchgw %1, %0"
224          : "=r" (vv) : "m" (*(uint16_t volatile*)a), "0" (vv));
225      *prev = vv;
226      return vv;
227    } else if (size == 4) {
228      uint32_t vv = (uint32_t)v;
229      __asm__ __volatile__ ("xchgl %1, %0"
230          : "=r" (vv) : "m" (*(uint32_t volatile*)a), "0" (vv));
231      *prev = vv;
232      return vv;
233    } else if (size == 8) {
234# ifdef __x86_64__
235      uint64_t vv = (uint64_t)v;
236      __asm__ __volatile__ ("xchgq %1, %0"
237          : "=r" (vv) : "m" (*(uint64_t volatile*)a), "0" (vv));
238      *prev = vv;
239      return vv;
240#else
241#error "IMPLEMENT ME, PLZ"
242      //uint64_t cmp = *a;
243      //while (!tsan_atomic64_compare_exchange_strong(a, &cmp, v, mo, mo))
244      // {}
245      //return cmp;
246#endif
247    }
248
249  } else if (op == tsan_atomic_op_fetch_add) {
250    if (size == 1) {
251      uint8_t prevv = __sync_fetch_and_add((uint8_t volatile*)a, (uint8_t)v);
252      *prev = prevv;
253      *newv = prevv + (uint8_t)v;
254      return prevv;
255    } else if (size == 2) {
256      uint16_t prevv = __sync_fetch_and_add(
257          (uint16_t volatile*)a, (uint16_t)v);
258      *prev = prevv;
259      *newv = prevv + (uint16_t)v;
260      return prevv;
261    } else if (size == 4) {
262      uint32_t prevv = __sync_fetch_and_add(
263          (uint32_t volatile*)a, (uint32_t)v);
264      *prev = prevv;
265      *newv = prevv + (uint32_t)v;
266      return prevv;
267    } else if (size == 8) {
268      uint64_t prevv = __sync_fetch_and_add(
269          (uint64_t volatile*)a, (uint64_t)v);
270      *prev = prevv;
271      *newv = prevv + v;
272      return prevv;
273    }
274
275  } else if (op == tsan_atomic_op_fetch_sub) {
276    if (size == 1) {
277      uint8_t prevv = __sync_fetch_and_sub(
278          (uint8_t volatile*)a, (uint8_t)v);
279      *prev = prevv;
280      *newv = prevv - (uint8_t)v;
281      return prevv;
282    } else if (size == 2) {
283      uint16_t prevv = __sync_fetch_and_sub(
284          (uint16_t volatile*)a, (uint16_t)v);
285      *prev = prevv;
286      *newv = prevv - (uint16_t)v;
287      return prevv;
288    } else if (size == 4) {
289      uint32_t prevv = __sync_fetch_and_sub(
290          (uint32_t volatile*)a, (uint32_t)v);
291      *prev = prevv;
292      *newv = prevv - (uint32_t)v;
293      return prevv;
294    } else if (size == 8) {
295      uint64_t prevv = __sync_fetch_and_sub(
296          (uint64_t volatile*)a, (uint64_t)v);
297      *prev = prevv;
298      *newv = prevv - v;
299      return prevv;
300    }
301
302  } else if (op == tsan_atomic_op_fetch_and) {
303    if (size == 1) {
304      uint8_t prevv = __sync_fetch_and_and(
305          (uint8_t volatile*)a, (uint8_t)v);
306      *prev = prevv;
307      *newv = prevv & (uint8_t)v;
308      return prevv;
309    } else if (size == 2) {
310      uint16_t prevv = __sync_fetch_and_and(
311          (uint16_t volatile*)a, (uint16_t)v);
312      *prev = prevv;
313      *newv = prevv & (uint16_t)v;
314      return prevv;
315    } else if (size == 4) {
316      uint32_t prevv = __sync_fetch_and_and(
317          (uint32_t volatile*)a, (uint32_t)v);
318      *prev = prevv;
319      *newv = prevv & (uint32_t)v;
320      return prevv;
321    } else if (size == 8) {
322      uint64_t prevv = __sync_fetch_and_and(
323          (uint64_t volatile*)a, (uint64_t)v);
324      *prev = prevv;
325      *newv = prevv & v;
326      return prevv;
327    }
328
329  } else if (op == tsan_atomic_op_fetch_xor) {
330    if (size == 1) {
331      uint8_t prevv = __sync_fetch_and_xor(
332          (uint8_t volatile*)a, (uint8_t)v);
333      *prev = prevv;
334      *newv = prevv ^ (uint8_t)v;
335      return prevv;
336    } else if (size == 2) {
337      uint16_t prevv = __sync_fetch_and_xor(
338          (uint16_t volatile*)a, (uint16_t)v);
339      *prev = prevv;
340      *newv = prevv ^ (uint16_t)v;
341      return prevv;
342    } else if (size == 4) {
343      uint32_t prevv = __sync_fetch_and_xor(
344          (uint32_t volatile*)a, (uint32_t)v);
345      *prev = prevv;
346      *newv = prevv ^ (uint32_t)v;
347      return prevv;
348    } else if (size == 8) {
349      uint64_t prevv = __sync_fetch_and_xor(
350          (uint64_t volatile*)a, (uint64_t)v);
351      *prev = prevv;
352      *newv = prevv ^ v;
353      return prevv;
354    }
355
356  } else if (op == tsan_atomic_op_fetch_or) {
357    if (size == 1) {
358      uint8_t prevv = __sync_fetch_and_or(
359          (uint8_t volatile*)a, (uint8_t)v);
360      *prev = prevv;
361      *newv = prevv | (uint8_t)v;
362      return prevv;
363    } else if (size == 2) {
364      uint16_t prevv = __sync_fetch_and_or(
365          (uint16_t volatile*)a, (uint16_t)v);
366      *prev = prevv;
367      *newv = prevv | (uint16_t)v;
368      return prevv;
369    } else if (size == 4) {
370      uint32_t prevv = __sync_fetch_and_or(
371          (uint32_t volatile*)a, (uint32_t)v);
372      *prev = prevv;
373      *newv = prevv | (uint32_t)v;
374      return prevv;
375    } else if (size == 8) {
376      uint64_t prevv = __sync_fetch_and_or(
377          (uint64_t volatile*)a, (uint64_t)v);
378      *prev = prevv;
379      *newv = prevv | v;
380      return prevv;
381    }
382
383  } else if (op == tsan_atomic_op_compare_exchange_strong
384          || op == tsan_atomic_op_compare_exchange_weak) {
385    uint64_t prevv = 0;
386    if (size == 1) {
387      prevv = __sync_val_compare_and_swap((uint8_t volatile*)a, cmp, v);
388    } else if (size == 2) {
389      prevv = __sync_val_compare_and_swap((uint16_t volatile*)a, cmp, v);
390    } else if (size == 4) {
391      prevv = __sync_val_compare_and_swap((uint32_t volatile*)a, cmp, v);
392    } else if (size == 8) {
393      prevv = __sync_val_compare_and_swap((uint64_t volatile*)a, cmp, v);
394    }
395    *prev = prevv;
396    return prevv;
397
398  } else if (op == tsan_atomic_op_fence) {
399    if (mo == tsan_memory_order_seq_cst)
400      __sync_synchronize();
401    return 0;
402  }
403
404  CHECK("unknown atomic operation" == 0);
405  return 0;
406}
407
408#else
409
410uint64_t tsan_atomic_do_op(tsan_atomic_op op,
411                           tsan_memory_order mo,
412                           tsan_memory_order fail_mo,
413                           size_t size,
414                           void volatile* a,
415                           uint64_t v,
416                           uint64_t cmp,
417                           uint64_t* newv,
418                           uint64_t* prev) {
419  CHECK(!"IMPLEMENTED" == 0);
420  return 0;
421}
422
423#endif
424
425
426
427
428
429
430
431
432
433