1// RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -triple=i686-apple-darwin9 | FileCheck %s
2
3// Also test serialization of atomic operations here, to avoid duplicating the
4// test.
5// RUN: %clang_cc1 %s -emit-pch -o %t -ffreestanding -triple=i686-apple-darwin9
6// RUN: %clang_cc1 %s -include-pch %t -ffreestanding -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
7#ifndef ALREADY_INCLUDED
8#define ALREADY_INCLUDED
9
10#include <stdatomic.h>
11
12// Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
13
14int fi1(_Atomic(int) *i) {
15  // CHECK-LABEL: @fi1
16  // CHECK: load atomic i32, i32* {{.*}} seq_cst
17  return __c11_atomic_load(i, memory_order_seq_cst);
18}
19
20int fi1a(int *i) {
21  // CHECK-LABEL: @fi1a
22  // CHECK: load atomic i32, i32* {{.*}} seq_cst
23  int v;
24  __atomic_load(i, &v, memory_order_seq_cst);
25  return v;
26}
27
28int fi1b(int *i) {
29  // CHECK-LABEL: @fi1b
30  // CHECK: load atomic i32, i32* {{.*}} seq_cst
31  return __atomic_load_n(i, memory_order_seq_cst);
32}
33
34int fi1c(atomic_int *i) {
35  // CHECK-LABEL: @fi1c
36  // CHECK: load atomic i32, i32* {{.*}} seq_cst
37  return atomic_load(i);
38}
39
40void fi2(_Atomic(int) *i) {
41  // CHECK-LABEL: @fi2
42  // CHECK: store atomic i32 {{.*}} seq_cst
43  __c11_atomic_store(i, 1, memory_order_seq_cst);
44}
45
46void fi2a(int *i) {
47  // CHECK-LABEL: @fi2a
48  // CHECK: store atomic i32 {{.*}} seq_cst
49  int v = 1;
50  __atomic_store(i, &v, memory_order_seq_cst);
51}
52
53void fi2b(int *i) {
54  // CHECK-LABEL: @fi2b
55  // CHECK: store atomic i32 {{.*}} seq_cst
56  __atomic_store_n(i, 1, memory_order_seq_cst);
57}
58
59void fi2c(atomic_int *i) {
60  // CHECK-LABEL: @fi2c
61  // CHECK: store atomic i32 {{.*}} seq_cst
62  atomic_store(i, 1);
63}
64
65int fi3(_Atomic(int) *i) {
66  // CHECK-LABEL: @fi3
67  // CHECK: atomicrmw and
68  // CHECK-NOT: and
69  return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
70}
71
72int fi3a(int *i) {
73  // CHECK-LABEL: @fi3a
74  // CHECK: atomicrmw xor
75  // CHECK-NOT: xor
76  return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
77}
78
79int fi3b(int *i) {
80  // CHECK-LABEL: @fi3b
81  // CHECK: atomicrmw add
82  // CHECK: add
83  return __atomic_add_fetch(i, 1, memory_order_seq_cst);
84}
85
86int fi3c(int *i) {
87  // CHECK-LABEL: @fi3c
88  // CHECK: atomicrmw nand
89  // CHECK-NOT: and
90  return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
91}
92
93int fi3d(int *i) {
94  // CHECK-LABEL: @fi3d
95  // CHECK: atomicrmw nand
96  // CHECK: and
97  // CHECK: xor
98  return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
99}
100
101int fi3e(atomic_int *i) {
102  // CHECK-LABEL: @fi3e
103  // CHECK: atomicrmw or
104  // CHECK-NOT: {{ or }}
105  return atomic_fetch_or(i, 1);
106}
107
108_Bool fi4(_Atomic(int) *i) {
109  // CHECK-LABEL: @fi4(
110  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
111  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
112  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
113  // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
114  // CHECK: store i32 [[OLD]]
115  int cmp = 0;
116  return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
117}
118
119_Bool fi4a(int *i) {
120  // CHECK-LABEL: @fi4a
121  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
122  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
123  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
124  // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
125  // CHECK: store i32 [[OLD]]
126  int cmp = 0;
127  int desired = 1;
128  return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
129}
130
131_Bool fi4b(int *i) {
132  // CHECK-LABEL: @fi4b(
133  // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
134  // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
135  // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
136  // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
137  // CHECK: store i32 [[OLD]]
138  int cmp = 0;
139  return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
140}
141
142_Bool fi4c(atomic_int *i) {
143  // CHECK-LABEL: @fi4c
144  // CHECK: cmpxchg i32*
145  int cmp = 0;
146  return atomic_compare_exchange_strong(i, &cmp, 1);
147}
148
149float ff1(_Atomic(float) *d) {
150  // CHECK-LABEL: @ff1
151  // CHECK: load atomic i32, i32* {{.*}} monotonic
152  return __c11_atomic_load(d, memory_order_relaxed);
153}
154
155void ff2(_Atomic(float) *d) {
156  // CHECK-LABEL: @ff2
157  // CHECK: store atomic i32 {{.*}} release
158  __c11_atomic_store(d, 1, memory_order_release);
159}
160
161float ff3(_Atomic(float) *d) {
162  return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
163}
164
165struct S {
166  double x;
167};
168
169struct S fd1(struct S *a) {
170  // CHECK-LABEL: @fd1
171  // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
172  // CHECK: [[RET:%.*]]    = alloca %struct.S, align 4
173  // CHECK: [[CALL:%.*]]   = call i64 @__atomic_load_8(
174  // CHECK: [[CAST:%.*]]   = bitcast %struct.S* [[RET]] to i64*
175  // CHECK: store i64 [[CALL]], i64* [[CAST]], align 4
176  struct S ret;
177  __atomic_load(a, &ret, memory_order_seq_cst);
178  return ret;
179}
180
181void fd2(struct S *a, struct S *b) {
182  // CHECK-LABEL: @fd2
183  // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
184  // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
185  // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
186  // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
187  // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
188  // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
189  // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8*
190  // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
191  // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
192  // CHECK-NEXT: call void @__atomic_store_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
193  // CHECK-NEXT: ret void
194  __atomic_store(a, b, memory_order_seq_cst);
195}
196
197void fd3(struct S *a, struct S *b, struct S *c) {
198  // CHECK-LABEL: @fd3
199  // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
200  // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
201  // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
202  // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
203  // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
204  // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
205  // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
206  // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
207  // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
208  // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8*
209  // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
210  // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
211  // CHECK-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
212  // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
213  // CHECK-NEXT: store i64 [[CALL]], i64* [[COERCED_C]], align 4
214
215  __atomic_exchange(a, b, c, memory_order_seq_cst);
216}
217
218_Bool fd4(struct S *a, struct S *b, struct S *c) {
219  // CHECK-LABEL: @fd4
220  // CHECK:      [[A_ADDR:%.*]] = alloca %struct.S*, align 4
221  // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
222  // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
223  // CHECK:      store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
224  // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
225  // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
226  // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
227  // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
228  // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
229  // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8*
230  // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i8*
231  // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
232  // CHECK-NEXT: [[LOAD_C:%.*]] = load i64, i64* [[COERCED_C]], align 4
233  // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(i8* [[COERCED_A]], i8* [[COERCED_B]], i64 [[LOAD_C]]
234  // CHECK-NEXT: ret i1 [[CALL]]
235  return __atomic_compare_exchange(a, b, c, 1, 5, 5);
236}
237
238int* fp1(_Atomic(int*) *p) {
239  // CHECK-LABEL: @fp1
240  // CHECK: load atomic i32, i32* {{.*}} seq_cst
241  return __c11_atomic_load(p, memory_order_seq_cst);
242}
243
244int* fp2(_Atomic(int*) *p) {
245  // CHECK-LABEL: @fp2
246  // CHECK: store i32 4
247  // CHECK: atomicrmw add {{.*}} monotonic
248  return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
249}
250
251int *fp2a(int **p) {
252  // CHECK-LABEL: @fp2a
253  // CHECK: store i32 4
254  // CHECK: atomicrmw sub {{.*}} monotonic
255  // Note, the GNU builtins do not multiply by sizeof(T)!
256  return __atomic_fetch_sub(p, 4, memory_order_relaxed);
257}
258
259_Complex float fc(_Atomic(_Complex float) *c) {
260  // CHECK-LABEL: @fc
261  // CHECK: atomicrmw xchg i64*
262  return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
263}
264
265typedef struct X { int x; } X;
266X fs(_Atomic(X) *c) {
267  // CHECK-LABEL: @fs
268  // CHECK: atomicrmw xchg i32*
269  return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
270}
271
272X fsa(X *c, X *d) {
273  // CHECK-LABEL: @fsa
274  // CHECK: atomicrmw xchg i32*
275  X ret;
276  __atomic_exchange(c, d, &ret, memory_order_seq_cst);
277  return ret;
278}
279
280_Bool fsb(_Bool *c) {
281  // CHECK-LABEL: @fsb
282  // CHECK: atomicrmw xchg i8*
283  return __atomic_exchange_n(c, 1, memory_order_seq_cst);
284}
285
286char flag1;
287volatile char flag2;
288void test_and_set() {
289  // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
290  __atomic_test_and_set(&flag1, memory_order_seq_cst);
291  // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
292  __atomic_test_and_set(&flag2, memory_order_acquire);
293  // CHECK: store atomic volatile i8 0, i8* @flag2 release
294  __atomic_clear(&flag2, memory_order_release);
295  // CHECK: store atomic i8 0, i8* @flag1 seq_cst
296  __atomic_clear(&flag1, memory_order_seq_cst);
297}
298
299struct Sixteen {
300  char c[16];
301} sixteen;
302struct Seventeen {
303  char c[17];
304} seventeen;
305
306int lock_free(struct Incomplete *incomplete) {
307  // CHECK-LABEL: @lock_free
308
309  // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
310  __c11_atomic_is_lock_free(3);
311
312  // CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}})
313  __atomic_is_lock_free(16, &sixteen);
314
315  // CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}})
316  __atomic_is_lock_free(17, &seventeen);
317
318  // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
319  __atomic_is_lock_free(4, incomplete);
320
321  char cs[20];
322  // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
323  __atomic_is_lock_free(4, cs+1);
324
325  // CHECK-NOT: call
326  __atomic_always_lock_free(3, 0);
327  __atomic_always_lock_free(16, 0);
328  __atomic_always_lock_free(17, 0);
329  __atomic_always_lock_free(16, &sixteen);
330  __atomic_always_lock_free(17, &seventeen);
331
332  int n;
333  __atomic_is_lock_free(4, &n);
334
335  // CHECK: ret i32 1
336  return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
337}
338
339// Tests for atomic operations on big values.  These should call the functions
340// defined here:
341// http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
342
343struct foo {
344  int big[128];
345};
346struct bar {
347  char c[3];
348};
349
350struct bar smallThing, thing1, thing2;
351struct foo bigThing;
352_Atomic(struct foo) bigAtomic;
353
354void structAtomicStore() {
355  // CHECK-LABEL: @structAtomicStore
356  struct foo f = {0};
357  struct bar b = {0};
358  __atomic_store(&smallThing, &b, 5);
359  // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing
360
361  __atomic_store(&bigThing, &f, 5);
362  // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
363}
364void structAtomicLoad() {
365  // CHECK-LABEL: @structAtomicLoad
366  struct bar b;
367  __atomic_load(&smallThing, &b, 5);
368  // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
369
370  struct foo f = {0};
371  __atomic_load(&bigThing, &f, 5);
372  // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
373}
374struct foo structAtomicExchange() {
375  // CHECK-LABEL: @structAtomicExchange
376  struct foo f = {0};
377  struct foo old;
378  __atomic_exchange(&f, &bigThing, &old, 5);
379  // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
380
381  return __c11_atomic_exchange(&bigAtomic, f, 5);
382  // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
383}
384int structAtomicCmpExchange() {
385  // CHECK-LABEL: @structAtomicCmpExchange
386  // CHECK: %[[x_mem:.*]] = alloca i8
387  _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
388  // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
389  // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8
390  // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1
391  // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]]
392  // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1
393  // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32
394
395  struct foo f = {0};
396  struct foo g = {0};
397  g.big[12] = 12;
398  return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
399  // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
400  // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32
401  // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]]
402  // CHECK: ret i32 %[[and]]
403}
404
405// Check that no atomic operations are used in any initialisation of _Atomic
406// types.
407_Atomic(int) atomic_init_i = 42;
408
409// CHECK-LABEL: @atomic_init_foo
410void atomic_init_foo()
411{
412  // CHECK-NOT: }
413  // CHECK-NOT: atomic
414  // CHECK: store
415  _Atomic(int) j = 12;
416
417  // CHECK-NOT: }
418  // CHECK-NOT: atomic
419  // CHECK: store
420  __c11_atomic_init(&j, 42);
421
422  // CHECK-NOT: atomic
423  // CHECK: }
424}
425
426// CHECK-LABEL: @failureOrder
427void failureOrder(_Atomic(int) *ptr, int *ptr2) {
428  __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
429  // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic
430
431  __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
432  // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
433
434  // Unknown ordering: conservatively pick strongest valid option (for now!).
435  __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
436  // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire
437
438  // Undefined behaviour: don't really care what that last ordering is so leave
439  // it out:
440  __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
441  // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
442}
443
444// CHECK-LABEL: @generalFailureOrder
445void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) {
446  __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail);
447  // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [
448  // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]]
449  // CHECK-NEXT: i32 2, label %[[ACQUIRE]]
450  // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]]
451  // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]]
452  // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]]
453
454  // CHECK: [[MONOTONIC]]
455  // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [
456  // CHECK-NEXT: ]
457
458  // CHECK: [[ACQUIRE]]
459  // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [
460  // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
461  // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
462  // CHECK-NEXT: ]
463
464  // CHECK: [[RELEASE]]
465  // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [
466  // CHECK-NEXT: ]
467
468  // CHECK: [[ACQREL]]
469  // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [
470  // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
471  // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
472  // CHECK-NEXT: ]
473
474  // CHECK: [[SEQCST]]
475  // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [
476  // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
477  // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
478  // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]]
479  // CHECK-NEXT: ]
480
481  // CHECK: [[MONOTONIC_MONOTONIC]]
482  // CHECK: cmpxchg {{.*}} monotonic monotonic
483  // CHECK: br
484
485  // CHECK: [[ACQUIRE_MONOTONIC]]
486  // CHECK: cmpxchg {{.*}} acquire monotonic
487  // CHECK: br
488
489  // CHECK: [[ACQUIRE_ACQUIRE]]
490  // CHECK: cmpxchg {{.*}} acquire acquire
491  // CHECK: br
492
493  // CHECK: [[ACQREL_MONOTONIC]]
494  // CHECK: cmpxchg {{.*}} acq_rel monotonic
495  // CHECK: br
496
497  // CHECK: [[ACQREL_ACQUIRE]]
498  // CHECK: cmpxchg {{.*}} acq_rel acquire
499  // CHECK: br
500
501  // CHECK: [[SEQCST_MONOTONIC]]
502  // CHECK: cmpxchg {{.*}} seq_cst monotonic
503  // CHECK: br
504
505  // CHECK: [[SEQCST_ACQUIRE]]
506  // CHECK: cmpxchg {{.*}} seq_cst acquire
507  // CHECK: br
508
509  // CHECK: [[SEQCST_SEQCST]]
510  // CHECK: cmpxchg {{.*}} seq_cst seq_cst
511  // CHECK: br
512}
513
514void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
515  __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
516  // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
517  // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
518
519  // CHECK: [[STRONG]]
520  // CHECK-NOT: br
521  // CHECK: cmpxchg {{.*}} seq_cst seq_cst
522  // CHECK: br
523
524  // CHECK: [[WEAK]]
525  // CHECK-NOT: br
526  // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst
527  // CHECK: br
528}
529
530// Having checked the flow in the previous two cases, we'll trust clang to
531// combine them sanely.
532void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
533  __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
534
535  // CHECK: = cmpxchg {{.*}} monotonic monotonic
536  // CHECK: = cmpxchg weak {{.*}} monotonic monotonic
537  // CHECK: = cmpxchg {{.*}} acquire monotonic
538  // CHECK: = cmpxchg {{.*}} acquire acquire
539  // CHECK: = cmpxchg weak {{.*}} acquire monotonic
540  // CHECK: = cmpxchg weak {{.*}} acquire acquire
541  // CHECK: = cmpxchg {{.*}} release monotonic
542  // CHECK: = cmpxchg weak {{.*}} release monotonic
543  // CHECK: = cmpxchg {{.*}} acq_rel monotonic
544  // CHECK: = cmpxchg {{.*}} acq_rel acquire
545  // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic
546  // CHECK: = cmpxchg weak {{.*}} acq_rel acquire
547  // CHECK: = cmpxchg {{.*}} seq_cst monotonic
548  // CHECK: = cmpxchg {{.*}} seq_cst acquire
549  // CHECK: = cmpxchg {{.*}} seq_cst seq_cst
550  // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic
551  // CHECK: = cmpxchg weak {{.*}} seq_cst acquire
552  // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst
553}
554
555int PR21643() {
556  return __atomic_or_fetch((int __attribute__((address_space(257))) *)0x308, 1,
557                           __ATOMIC_RELAXED);
558  // CHECK: %[[atomictmp:.*]] = alloca i32, align 4
559  // CHECK: %[[atomicdst:.*]] = alloca i32, align 4
560  // CHECK: store i32 1, i32* %[[atomictmp]]
561  // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4
562  // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic
563  // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]]
564  // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
565  // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4
566  // CHECK: ret i32 %[[ret]]
567}
568
569int PR17306_1(volatile _Atomic(int) *i) {
570  // CHECK-LABEL: @PR17306_1
571  // CHECK:      %[[i_addr:.*]] = alloca i32
572  // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
573  // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
574  // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]]
575  // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst
576  // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
577  // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
578  // CHECK-NEXT: ret i32 %[[retval]]
579  return __c11_atomic_load(i, memory_order_seq_cst);
580}
581
582int PR17306_2(volatile int *i, int value) {
583  // CHECK-LABEL: @PR17306_2
584  // CHECK:      %[[i_addr:.*]] = alloca i32*
585  // CHECK-NEXT: %[[value_addr:.*]] = alloca i32
586  // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
587  // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
588  // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
589  // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
590  // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]]
591  // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]]
592  // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
593  // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]]
594  // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst
595  // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
596  // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
597  // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
598  // CHECK-NEXT: ret i32 %[[retval]]
599  return __atomic_add_fetch(i, value, memory_order_seq_cst);
600}
601
602#endif
603