1// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s
2// RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -std=c++11 -triple %itanium_abi_triple -emit-pch -o %t %s
3// RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -triple %itanium_abi_triple -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -std=c++11 -DLAMBDA -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5// RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -fblocks -DBLOCKS -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6// expected-no-diagnostics
7#ifndef HEADER
8#define HEADER
9
10volatile int g = 1212;
11#pragma omp threadprivate(g)
12
13template <class T>
14struct S {
15  T f;
16  S(T a) : f(a + g) {}
17  S() : f(g) {}
18  S &operator=(const S &) { return *this; };
19  operator T() { return T(); }
20  ~S() {}
21};
22
23// CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float }
24// CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} }
25// CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8*
26
27
28// CHECK-DAG: [[T_VAR:@.+]] = internal global i{{[0-9]+}} 1122,
29// CHECK-DAG: [[VEC:@.+]] = internal global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 1, i{{[0-9]+}} 2],
30// CHECK-DAG: [[S_ARR:@.+]] = internal global [2 x [[S_FLOAT_TY]]] zeroinitializer,
31// CHECK-DAG: [[VAR:@.+]] = internal global [[S_FLOAT_TY]] zeroinitializer,
32// CHECK-DAG: [[TMAIN_T_VAR:@.+]] = linkonce_odr global i{{[0-9]+}} 333,
33// CHECK-DAG: [[TMAIN_VEC:@.+]] = linkonce_odr global [2 x i{{[0-9]+}}] [i{{[0-9]+}} 3, i{{[0-9]+}} 3],
34// CHECK-DAG: [[TMAIN_S_ARR:@.+]] = linkonce_odr global [2 x [[S_INT_TY]]] zeroinitializer,
35// CHECK-DAG: [[TMAIN_VAR:@.+]] = linkonce_odr global [[S_INT_TY]] zeroinitializer,
36template <typename T>
37T tmain() {
38  S<T> test;
39  test = S<T>();
40  static T t_var = 333;
41  static T vec[] = {3, 3};
42  static S<T> s_arr[] = {1, 2};
43  static S<T> var(3);
44#pragma omp threadprivate(t_var, vec, s_arr, var)
45#pragma omp parallel copyin(t_var, vec, s_arr, var)
46  {
47    vec[0] = t_var;
48    s_arr[0] = var;
49  }
50#pragma omp parallel copyin(t_var)
51  {}
52  return T();
53}
54
55int main() {
56#ifdef LAMBDA
57  // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
58  // LAMBDA-LABEL: @main
59  // LAMBDA: call{{( x86_thiscallcc)?}} void [[OUTER_LAMBDA:@.+]](
60  [&]() {
61  // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
62  // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
63#pragma omp parallel copyin(g)
64  {
65    // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
66
67    // threadprivate_g = g;
68    // LAMBDA: call i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
69    // LAMBDA: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
70    // LAMBDA: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[G]] to i{{[0-9]+}}), %{{.+}}
71    // LAMBDA: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
72    // LAMBDA: [[NOT_MASTER]]
73    // LAMBDA: load i{{[0-9]+}}, i{{[0-9]+}}* [[G]],
74    // LAMBDA: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
75    // LAMBDA: [[DONE]]
76
77    // LAMBDA: call i32 @__kmpc_cancel_barrier(
78    g = 1;
79    // LAMBDA: call{{( x86_thiscallcc)?}} void [[INNER_LAMBDA:@.+]](%{{.+}}*
80    [&]() {
81      // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
82      // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
83      g = 2;
84      // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
85    }();
86  }
87  }();
88  return 0;
89#elif defined(BLOCKS)
90  // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
91  // BLOCKS-LABEL: @main
92  // BLOCKS: call void {{%.+}}(i8
93  ^{
94  // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
95  // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8*
96#pragma omp parallel copyin(g)
97  {
98    // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]])
99
100    // threadprivate_g = g;
101    // BLOCKS: call i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
102    // BLOCKS: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
103    // BLOCKS: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[G]] to i{{[0-9]+}}), %{{.+}}
104    // BLOCKS: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
105    // BLOCKS: [[NOT_MASTER]]
106    // BLOCKS: load i{{[0-9]+}}, i{{[0-9]+}}* [[G]],
107    // BLOCKS: store volatile i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
108    // BLOCKS: [[DONE]]
109
110    // BLOCKS: call i32 @__kmpc_cancel_barrier(
111    g = 1;
112    // BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}*
113    // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
114    // BLOCKS: call void {{%.+}}(i8
115    ^{
116      // BLOCKS: define {{.+}} void {{@.+}}(i8*
117      g = 2;
118      // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
119      // BLOCKS: call i8* @__kmpc_threadprivate_cached({{.+}} [[G]]
120      // BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}*
121      // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
122      // BLOCKS: ret
123    }();
124  }
125  }();
126  return 0;
127#else
128  S<float> test;
129  test = S<float>();
130  static int t_var = 1122;
131  static int vec[] = {1, 2};
132  static S<float> s_arr[] = {1, 2};
133  static S<float> var(3);
134#pragma omp threadprivate(t_var, vec, s_arr, var)
135#pragma omp parallel copyin(t_var, vec, s_arr, var)
136  {
137    vec[0] = t_var;
138    s_arr[0] = var;
139  }
140#pragma omp parallel copyin(t_var)
141  {}
142  return tmain<int>();
143#endif
144}
145
146// CHECK-LABEL: @main
147// CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
148// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[TEST]], [[S_FLOAT_TY]]*
149// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
150// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[MAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
151// CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]()
152// CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
153// CHECK: ret
154//
155// CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
156// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
157// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
158// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
159
160// threadprivate_t_var = t_var;
161// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
162// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
163// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}}), %{{.+}}
164// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
165// CHECK: [[NOT_MASTER]]
166// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR]],
167// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
168
169// threadprivate_vec = vec;
170// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[VEC]]
171// CHECK: call void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[VEC]] to i8*),
172
173// threadprivate_s_arr = s_arr;
174// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[S_ARR]]
175// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
176// CHECK: [[S_ARR_PRIV_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_PRIV_BEGIN]], i{{[0-9]+}} 2
177// CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_PRIV_BEGIN]], [[S_ARR_PRIV_END]]
178// CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
179// CHECK: [[S_ARR_BODY]]
180// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}})
181// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
182
183// threadprivate_var = var;
184// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[VAR]]
185// CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN]]([[S_FLOAT_TY]]* {{%.+}}, [[S_FLOAT_TY]]* {{.*}}[[VAR]])
186// CHECK: [[DONE]]
187
188// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
189// CHECK: ret void
190
191// CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
192// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
193// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
194// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
195
196// threadprivate_t_var = t_var;
197// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[T_VAR]]
198// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
199// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[T_VAR]] to i{{[0-9]+}}), %{{.+}}
200// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
201// CHECK: [[NOT_MASTER]]
202// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR]],
203// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
204// CHECK: [[DONE]]
205
206// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
207// CHECK: ret void
208
209// CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
210// CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
211// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[TEST]], [[S_INT_TY]]*
212// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
213// CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, {{%.+}}*)* [[TMAIN_MICROTASK1:@.+]] to void (i32*, i32*, ...)*), i8* %{{.+}})
214// CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
215// CHECK: ret
216//
217// CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
218// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
219// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
220// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
221
222// threadprivate_t_var = t_var;
223// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
224// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
225// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}}), %{{.+}}
226// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
227// CHECK: [[NOT_MASTER]]
228// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[TMAIN_T_VAR]],
229// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
230
231// threadprivate_vec = vec;
232// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VEC]]
233// CHECK: call void @llvm.memcpy{{.*}}(i8* %{{.+}}, i8* bitcast ([2 x i{{[0-9]+}}]* [[TMAIN_VEC]] to i8*),
234
235// threadprivate_s_arr = s_arr;
236// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_S_ARR]]
237// CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* {{%.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0
238// CHECK: [[S_ARR_PRIV_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_PRIV_BEGIN]], i{{[0-9]+}} 2
239// CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_PRIV_BEGIN]], [[S_ARR_PRIV_END]]
240// CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
241// CHECK: [[S_ARR_BODY]]
242// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
243// CHECK: br i1 {{.+}}, label %{{.+}}, label %[[S_ARR_BODY]]
244
245// threadprivate_var = var;
246// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_VAR]]
247// CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN]]([[S_INT_TY]]* {{%.+}}, [[S_INT_TY]]* {{.*}}[[TMAIN_VAR]])
248// CHECK: [[DONE]]
249
250// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
251// CHECK: ret void
252
253// CHECK: define internal void [[TMAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, {{%.+}}* %{{.+}})
254// CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
255// CHECK: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID_ADDR_ADDR]],
256// CHECK: [[GTID:%.+]] = load i32, i32* [[GTID_ADDR]],
257
258// threadprivate_t_var = t_var;
259// CHECK: call i8* @__kmpc_threadprivate_cached({{.+}} [[TMAIN_T_VAR]]
260// CHECK: ptrtoint i{{[0-9]+}}* %{{.+}} to i{{[0-9]+}}
261// CHECK: icmp ne i{{[0-9]+}} ptrtoint (i{{[0-9]+}}* [[TMAIN_T_VAR]] to i{{[0-9]+}}), %{{.+}}
262// CHECK: br i1 %{{.+}}, label %[[NOT_MASTER:.+]], label %[[DONE:.+]]
263// CHECK: [[NOT_MASTER]]
264// CHECK: load i{{[0-9]+}}, i{{[0-9]+}}* [[TMAIN_T_VAR]],
265// CHECK: store i{{[0-9]+}} %{{.+}}, i{{[0-9]+}}* %{{.+}},
266// CHECK: [[DONE]]
267
268// CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i32 [[GTID]])
269// CHECK: ret void
270
271#endif
272
273