1// RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-apple-darwin9 | FileCheck %s
2
3// Also test serialization of atomic operations here, to avoid duplicating the
4// test.
5// RUN: %clang_cc1 %s -emit-pch -o %t -triple=i686-apple-darwin9
6// RUN: %clang_cc1 %s -include-pch %t -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
7#ifndef ALREADY_INCLUDED
8#define ALREADY_INCLUDED
9
10// Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
11
12typedef enum memory_order {
13  memory_order_relaxed, memory_order_consume, memory_order_acquire,
14  memory_order_release, memory_order_acq_rel, memory_order_seq_cst
15} memory_order;
16
17int fi1(_Atomic(int) *i) {
18  // CHECK-LABEL: @fi1
19  // CHECK: load atomic i32* {{.*}} seq_cst
20  return __c11_atomic_load(i, memory_order_seq_cst);
21}
22
23int fi1a(int *i) {
24  // CHECK-LABEL: @fi1a
25  // CHECK: load atomic i32* {{.*}} seq_cst
26  int v;
27  __atomic_load(i, &v, memory_order_seq_cst);
28  return v;
29}
30
31int fi1b(int *i) {
32  // CHECK-LABEL: @fi1b
33  // CHECK: load atomic i32* {{.*}} seq_cst
34  return __atomic_load_n(i, memory_order_seq_cst);
35}
36
37void fi2(_Atomic(int) *i) {
38  // CHECK-LABEL: @fi2
39  // CHECK: store atomic i32 {{.*}} seq_cst
40  __c11_atomic_store(i, 1, memory_order_seq_cst);
41}
42
43void fi2a(int *i) {
44  // CHECK-LABEL: @fi2a
45  // CHECK: store atomic i32 {{.*}} seq_cst
46  int v = 1;
47  __atomic_store(i, &v, memory_order_seq_cst);
48}
49
50void fi2b(int *i) {
51  // CHECK-LABEL: @fi2b
52  // CHECK: store atomic i32 {{.*}} seq_cst
53  __atomic_store_n(i, 1, memory_order_seq_cst);
54}
55
56int fi3(_Atomic(int) *i) {
57  // CHECK-LABEL: @fi3
58  // CHECK: atomicrmw and
59  // CHECK-NOT: and
60  return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
61}
62
63int fi3a(int *i) {
64  // CHECK-LABEL: @fi3a
65  // CHECK: atomicrmw xor
66  // CHECK-NOT: xor
67  return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
68}
69
70int fi3b(int *i) {
71  // CHECK-LABEL: @fi3b
72  // CHECK: atomicrmw add
73  // CHECK: add
74  return __atomic_add_fetch(i, 1, memory_order_seq_cst);
75}
76
77int fi3c(int *i) {
78  // CHECK-LABEL: @fi3c
79  // CHECK: atomicrmw nand
80  // CHECK-NOT: and
81  return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
82}
83
84int fi3d(int *i) {
85  // CHECK-LABEL: @fi3d
86  // CHECK: atomicrmw nand
87  // CHECK: and
88  // CHECK: xor
89  return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
90}
91
92_Bool fi4(_Atomic(int) *i) {
93  // CHECK-LABEL: @fi4
94  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
95  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
96  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
97  // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
98  // CHECK: store i32 [[OLD]]
99  int cmp = 0;
100  return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
101}
102
103_Bool fi4a(int *i) {
104  // CHECK-LABEL: @fi4
105  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
106  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
107  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
108  // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
109  // CHECK: store i32 [[OLD]]
110  int cmp = 0;
111  int desired = 1;
112  return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
113}
114
115_Bool fi4b(int *i) {
116  // CHECK-LABEL: @fi4
117  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
118  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
119  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
120  // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
121  // CHECK: store i32 [[OLD]]
122  int cmp = 0;
123  return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
124}
125
126float ff1(_Atomic(float) *d) {
127  // CHECK-LABEL: @ff1
128  // CHECK: load atomic i32* {{.*}} monotonic
129  return __c11_atomic_load(d, memory_order_relaxed);
130}
131
132void ff2(_Atomic(float) *d) {
133  // CHECK-LABEL: @ff2
134  // CHECK: store atomic i32 {{.*}} release
135  __c11_atomic_store(d, 1, memory_order_release);
136}
137
138float ff3(_Atomic(float) *d) {
139  return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
140}
141
142int* fp1(_Atomic(int*) *p) {
143  // CHECK-LABEL: @fp1
144  // CHECK: load atomic i32* {{.*}} seq_cst
145  return __c11_atomic_load(p, memory_order_seq_cst);
146}
147
148int* fp2(_Atomic(int*) *p) {
149  // CHECK-LABEL: @fp2
150  // CHECK: store i32 4
151  // CHECK: atomicrmw add {{.*}} monotonic
152  return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
153}
154
155int *fp2a(int **p) {
156  // CHECK-LABEL: @fp2a
157  // CHECK: store i32 4
158  // CHECK: atomicrmw sub {{.*}} monotonic
159  // Note, the GNU builtins do not multiply by sizeof(T)!
160  return __atomic_fetch_sub(p, 4, memory_order_relaxed);
161}
162
163_Complex float fc(_Atomic(_Complex float) *c) {
164  // CHECK-LABEL: @fc
165  // CHECK: atomicrmw xchg i64*
166  return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
167}
168
169typedef struct X { int x; } X;
170X fs(_Atomic(X) *c) {
171  // CHECK-LABEL: @fs
172  // CHECK: atomicrmw xchg i32*
173  return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
174}
175
176X fsa(X *c, X *d) {
177  // CHECK-LABEL: @fsa
178  // CHECK: atomicrmw xchg i32*
179  X ret;
180  __atomic_exchange(c, d, &ret, memory_order_seq_cst);
181  return ret;
182}
183
184_Bool fsb(_Bool *c) {
185  // CHECK-LABEL: @fsb
186  // CHECK: atomicrmw xchg i8*
187  return __atomic_exchange_n(c, 1, memory_order_seq_cst);
188}
189
190char flag1;
191volatile char flag2;
192void test_and_set() {
193  // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
194  __atomic_test_and_set(&flag1, memory_order_seq_cst);
195  // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
196  __atomic_test_and_set(&flag2, memory_order_acquire);
197  // CHECK: store atomic volatile i8 0, i8* @flag2 release
198  __atomic_clear(&flag2, memory_order_release);
199  // CHECK: store atomic i8 0, i8* @flag1 seq_cst
200  __atomic_clear(&flag1, memory_order_seq_cst);
201}
202
203struct Sixteen {
204  char c[16];
205} sixteen;
206struct Seventeen {
207  char c[17];
208} seventeen;
209
210int lock_free(struct Incomplete *incomplete) {
211  // CHECK-LABEL: @lock_free
212
213  // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
214  __c11_atomic_is_lock_free(3);
215
216  // CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}})
217  __atomic_is_lock_free(16, &sixteen);
218
219  // CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}})
220  __atomic_is_lock_free(17, &seventeen);
221
222  // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
223  __atomic_is_lock_free(4, incomplete);
224
225  char cs[20];
226  // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
227  __atomic_is_lock_free(4, cs+1);
228
229  // CHECK-NOT: call
230  __atomic_always_lock_free(3, 0);
231  __atomic_always_lock_free(16, 0);
232  __atomic_always_lock_free(17, 0);
233  __atomic_always_lock_free(16, &sixteen);
234  __atomic_always_lock_free(17, &seventeen);
235
236  int n;
237  __atomic_is_lock_free(4, &n);
238
239  // CHECK: ret i32 1
240  return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
241}
242
243// Tests for atomic operations on big values.  These should call the functions
244// defined here:
245// http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
246
247struct foo {
248  int big[128];
249};
250struct bar {
251  char c[3];
252};
253
254struct bar smallThing, thing1, thing2;
255struct foo bigThing;
256_Atomic(struct foo) bigAtomic;
257
258void structAtomicStore() {
259  // CHECK-LABEL: @structAtomicStore
260  struct foo f = {0};
261  struct bar b = {0};
262  __atomic_store(&smallThing, &b, 5);
263  // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing
264
265  __atomic_store(&bigThing, &f, 5);
266  // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
267}
268void structAtomicLoad() {
269  // CHECK-LABEL: @structAtomicLoad
270  struct bar b;
271  __atomic_load(&smallThing, &b, 5);
272  // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
273
274  struct foo f = {0};
275  __atomic_load(&bigThing, &f, 5);
276  // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
277}
278struct foo structAtomicExchange() {
279  // CHECK-LABEL: @structAtomicExchange
280  struct foo f = {0};
281  struct foo old;
282  __atomic_exchange(&f, &bigThing, &old, 5);
283  // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
284
285  return __c11_atomic_exchange(&bigAtomic, f, 5);
286  // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
287}
288int structAtomicCmpExchange() {
289  // CHECK-LABEL: @structAtomicCmpExchange
290  _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
291  // CHECK: call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
292
293  struct foo f = {0};
294  struct foo g = {0};
295  g.big[12] = 12;
296  return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
297  // CHECK: call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
298}
299
300// Check that no atomic operations are used in any initialisation of _Atomic
301// types.
302_Atomic(int) atomic_init_i = 42;
303
304// CHECK-LABEL: @atomic_init_foo
305void atomic_init_foo()
306{
307  // CHECK-NOT: }
308  // CHECK-NOT: atomic
309  // CHECK: store
310  _Atomic(int) j = 12;
311
312  // CHECK-NOT: }
313  // CHECK-NOT: atomic
314  // CHECK: store
315  __c11_atomic_init(&j, 42);
316
317  // CHECK-NOT: atomic
318  // CHECK: }
319}
320
321// CHECK-LABEL: @failureOrder
322void failureOrder(_Atomic(int) *ptr, int *ptr2) {
323  __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
324  // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic
325
326  __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
327  // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
328
329  // Unknown ordering: conservatively pick strongest valid option (for now!).
330  __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
331  // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire
332
333  // Undefined behaviour: don't really care what that last ordering is so leave
334  // it out:
335  __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
336  // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
337}
338
339// CHECK-LABEL: @generalFailureOrder
340void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) {
341  __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail);
342  // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [
343  // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]]
344  // CHECK-NEXT: i32 2, label %[[ACQUIRE]]
345  // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]]
346  // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]]
347  // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]]
348
349  // CHECK: [[MONOTONIC]]
350  // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [
351  // CHECK-NEXT: ]
352
353  // CHECK: [[ACQUIRE]]
354  // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [
355  // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
356  // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
357  // CHECK-NEXT: ]
358
359  // CHECK: [[RELEASE]]
360  // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [
361  // CHECK-NEXT: ]
362
363  // CHECK: [[ACQREL]]
364  // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [
365  // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
366  // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
367  // CHECK-NEXT: ]
368
369  // CHECK: [[SEQCST]]
370  // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [
371  // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
372  // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
373  // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]]
374  // CHECK-NEXT: ]
375
376  // CHECK: [[MONOTONIC_MONOTONIC]]
377  // CHECK: cmpxchg {{.*}} monotonic monotonic
378  // CHECK: br
379
380  // CHECK: [[ACQUIRE_MONOTONIC]]
381  // CHECK: cmpxchg {{.*}} acquire monotonic
382  // CHECK: br
383
384  // CHECK: [[ACQUIRE_ACQUIRE]]
385  // CHECK: cmpxchg {{.*}} acquire acquire
386  // CHECK: br
387
388  // CHECK: [[ACQREL_MONOTONIC]]
389  // CHECK: cmpxchg {{.*}} acq_rel monotonic
390  // CHECK: br
391
392  // CHECK: [[ACQREL_ACQUIRE]]
393  // CHECK: cmpxchg {{.*}} acq_rel acquire
394  // CHECK: br
395
396  // CHECK: [[SEQCST_MONOTONIC]]
397  // CHECK: cmpxchg {{.*}} seq_cst monotonic
398  // CHECK: br
399
400  // CHECK: [[SEQCST_ACQUIRE]]
401  // CHECK: cmpxchg {{.*}} seq_cst acquire
402  // CHECK: br
403
404  // CHECK: [[SEQCST_SEQCST]]
405  // CHECK: cmpxchg {{.*}} seq_cst seq_cst
406  // CHECK: br
407}
408
409void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
410  __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
411  // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
412  // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
413
414  // CHECK: [[STRONG]]
415  // CHECK-NOT: br
416  // CHECK: cmpxchg {{.*}} seq_cst seq_cst
417  // CHECK: br
418
419  // CHECK: [[WEAK]]
420  // CHECK-NOT: br
421  // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst
422  // CHECK: br
423}
424
425// Having checked the flow in the previous two cases, we'll trust clang to
426// combine them sanely.
427void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
428  __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
429
430  // CHECK: = cmpxchg {{.*}} monotonic monotonic
431  // CHECK: = cmpxchg weak {{.*}} monotonic monotonic
432  // CHECK: = cmpxchg {{.*}} acquire monotonic
433  // CHECK: = cmpxchg {{.*}} acquire acquire
434  // CHECK: = cmpxchg weak {{.*}} acquire monotonic
435  // CHECK: = cmpxchg weak {{.*}} acquire acquire
436  // CHECK: = cmpxchg {{.*}} release monotonic
437  // CHECK: = cmpxchg weak {{.*}} release monotonic
438  // CHECK: = cmpxchg {{.*}} acq_rel monotonic
439  // CHECK: = cmpxchg {{.*}} acq_rel acquire
440  // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic
441  // CHECK: = cmpxchg weak {{.*}} acq_rel acquire
442  // CHECK: = cmpxchg {{.*}} seq_cst monotonic
443  // CHECK: = cmpxchg {{.*}} seq_cst acquire
444  // CHECK: = cmpxchg {{.*}} seq_cst seq_cst
445  // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic
446  // CHECK: = cmpxchg weak {{.*}} seq_cst acquire
447  // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst
448}
449
450#endif
451