1// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm -fexceptions -fcxx-exceptions -o - < %s | FileCheck %s
2// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t < %s
3// RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify -emit-llvm -o - < %s | FileCheck %s
4// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm -o - < %s | FileCheck %s --check-prefix=TERM_DEBUG
5// REQUIRES: x86-registered-target
6// expected-no-diagnostics
7#ifndef HEADER
8#define HEADER
9
10long long get_val() { return 0; }
11double *g_ptr;
12
13// CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}})
14void simple(float *a, float *b, float *c, float *d) {
15  #pragma omp for simd
16// CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
17// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
18// CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 5
19// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
20// CHECK: [[TRUE]]
21// CHECK: br label %[[SWITCH:[^,]+]]
22// CHECK: [[FALSE]]
23// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
24// CHECK: br label %[[SWITCH]]
25// CHECK: [[SWITCH]]
26// CHECK: [[UP:%.+]] = phi i32 [ 5, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
27// CHECK: store i32 [[UP]], i32* [[UB]],
28// CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
29// CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]],
30
31// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
32// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
33// CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]]
34// CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]]
35  for (int i = 3; i < 32; i += 5) {
36// CHECK: [[SIMPLE_LOOP1_BODY]]
37// Start of body: calculate i from IV:
38// CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}
39// CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5
40// CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]]
41// CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
42// ... loop body ...
43// End of body: store into a[i]:
44// CHECK: store float [[RESULT:%.+]], float*
45    a[i] = b[i] * c[i] * d[i];
46// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]
47// CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1
48// CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]
49// br label %{{.+}}, !llvm.loop !{{.+}}
50  }
51// CHECK: [[SIMPLE_LOOP1_END]]
52// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
53// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
54
55  long long k = get_val();
56
57  #pragma omp for simd linear(k : 3) schedule(simd, nonmonotonic: dynamic)
58// CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val
59// CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]]
60// CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR]]
61// CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]]
62
63// CHECK: call void @__kmpc_dispatch_init_4(%ident_t* {{.+}}, i32 %{{.+}}, i32 1073741859, i32 0, i32 8, i32 1, i32 1)
64// CHECK: [[NEXT:%.+]] = call i32 @__kmpc_dispatch_next_4(%ident_t* {{.+}}, i32 %{{.+}}, i32* %{{.+}}, i32* [[LB:%.+]], i32* [[UB:%.+]], i32* %{{.+}})
65// CHECK: [[COND:%.+]] = icmp ne i32 [[NEXT]], 0
66// CHECK: br i1 [[COND]], label %[[CONT:.+]], label %[[END:.+]]
67// CHECK: [[CONT]]
68// CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
69// CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]],
70
71// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]]
72// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
73// CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]]
74// CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]]
75  for (int i = 10; i > 1; i--) {
76// CHECK: [[SIMPLE_LOOP2_BODY]]
77// Start of body: calculate i from IV:
78// CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
79// FIXME: It is interesting, why the following "mul 1" was not constant folded?
80// CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1
81// CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]]
82// CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
83//
84// CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
85// CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
86// CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3
87// CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64
88// CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]]
89// Update of the privatized version of linear variable!
90// CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]]
91    a[k]++;
92    k = k + 3;
93// CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
94// CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1
95// CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]]
96// br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]]
97  }
98// CHECK: [[SIMPLE_LOOP2_END]]
99//
100// Update linear vars after loop, as the loop was operating on a private version.
101// CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]]
102// CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27
103// CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_VAR]]
104// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
105
106  int lin = 12;
107  #pragma omp for simd linear(lin : get_val()), linear(g_ptr)
108
109// Init linear private var.
110// CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]]
111// CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]]
112// CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]]
113// Remember linear step.
114// CHECK: [[CALL_VAL:%.+]] = invoke
115// CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]]
116
117// CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:@[^,]+]]
118// CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]]
119
120// CHECK: call void @__kmpc_for_static_init_8u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
121// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
122// CHECK: [[CMP:%.+]] = icmp ugt i64 [[UB_VAL]], 3
123// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
124// CHECK: [[TRUE]]
125// CHECK: br label %[[SWITCH:[^,]+]]
126// CHECK: [[FALSE]]
127// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
128// CHECK: br label %[[SWITCH]]
129// CHECK: [[SWITCH]]
130// CHECK: [[UP:%.+]] = phi i64 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
131// CHECK: store i64 [[UP]], i64* [[UB]],
132// CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
133// CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV3:%[^,]+]],
134
135// CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]
136// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
137// CHECK-NEXT: [[CMP3:%.+]] = icmp ule i64 [[IV3]], [[UB_VAL]]
138// CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]]
139  for (unsigned long long it = 2000; it >= 600; it-=400) {
140// CHECK: [[SIMPLE_LOOP3_BODY]]
141// Start of body: calculate it from IV:
142// CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]
143// CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400
144// CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]]
145// CHECK-NEXT: store i64 [[LC_IT_2]], i64*
146//
147// Linear start and step are used to calculate current value of the linear variable.
148// CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]
149// CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]
150// CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],
151// CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]
152// CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]
153// CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1
154// CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]]
155// CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]
156    *g_ptr++ = 0.0;
157// CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]
158// CHECK: store double{{.*}}[[GEP_VAL]]
159    a[it + lin]++;
160// CHECK: [[FLT_INC:%.+]] = fadd float
161// CHECK-NEXT: store float [[FLT_INC]],
162// CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]
163// CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1
164// CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]
165  }
166// CHECK: [[SIMPLE_LOOP3_END]]
167// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
168//
169// Linear start and step are used to calculate final value of the linear variables.
170// CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]
171// CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]
172// CHECK: store i32 {{.+}}, i32* [[LIN_VAR]],
173// CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]
174// CHECK: store double* {{.*}}[[GLIN_VAR]]
175// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
176
177  #pragma omp for simd
178// CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
179// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
180// CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 3
181// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
182// CHECK: [[TRUE]]
183// CHECK: br label %[[SWITCH:[^,]+]]
184// CHECK: [[FALSE]]
185// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
186// CHECK: br label %[[SWITCH]]
187// CHECK: [[SWITCH]]
188// CHECK: [[UP:%.+]] = phi i32 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
189// CHECK: store i32 [[UP]], i32* [[UB]],
190// CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
191// CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV4:%[^,]+]],
192
193// CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]
194// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
195// CHECK-NEXT: [[CMP4:%.+]] = icmp sle i32 [[IV4]], [[UB_VAL]]
196// CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]]
197  for (short it = 6; it <= 20; it-=-4) {
198// CHECK: [[SIMPLE_LOOP4_BODY]]
199// Start of body: calculate it from IV:
200// CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]
201// CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4
202// CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]]
203// CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16
204// CHECK-NEXT: store i16 [[LC_IT_3]], i16*
205
206// CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]
207// CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1
208// CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]
209  }
210// CHECK: [[SIMPLE_LOOP4_END]]
211// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
212// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
213
214  #pragma omp for simd
215// CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
216// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
217// CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 25
218// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
219// CHECK: [[TRUE]]
220// CHECK: br label %[[SWITCH:[^,]+]]
221// CHECK: [[FALSE]]
222// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
223// CHECK: br label %[[SWITCH]]
224// CHECK: [[SWITCH]]
225// CHECK: [[UP:%.+]] = phi i32 [ 25, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
226// CHECK: store i32 [[UP]], i32* [[UB]],
227// CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
228// CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV5:%[^,]+]],
229
230// CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]
231// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
232// CHECK-NEXT: [[CMP5:%.+]] = icmp sle i32 [[IV5]], [[UB_VAL]]
233// CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]]
234  for (unsigned char it = 'z'; it >= 'a'; it+=-1) {
235// CHECK: [[SIMPLE_LOOP5_BODY]]
236// Start of body: calculate it from IV:
237// CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]
238// CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1
239// CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]]
240// CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8
241// CHECK-NEXT: store i8 [[LC_IT_2]], i8*
242
243// CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]
244// CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1
245// CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]
246  }
247// CHECK: [[SIMPLE_LOOP5_END]]
248// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
249// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
250
251// CHECK-NOT: mul i32 %{{.+}}, 10
252  #pragma omp for simd
253  for (unsigned i=100; i<10; i+=10) {
254  }
255
256  int A;
257  #pragma omp parallel
258  {
259  // CHECK: store i32 -1, i32* [[A:%.+]],
260  A = -1;
261  #pragma omp for simd lastprivate(A)
262// CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
263// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
264// CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6
265// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
266// CHECK: [[TRUE]]
267// CHECK: br label %[[SWITCH:[^,]+]]
268// CHECK: [[FALSE]]
269// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
270// CHECK: br label %[[SWITCH]]
271// CHECK: [[SWITCH]]
272// CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
273// CHECK: store i64 [[UP]], i64* [[UB]],
274// CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
275// CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV7:%[^,]+]],
276
277// CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]]
278// CHECK: [[SIMD_LOOP7_COND]]
279// CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]
280// CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
281// CHECK-NEXT: [[CMP7:%.+]] = icmp sle i64 [[IV7]], [[UB_VAL]]
282// CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]]
283  for (long long i = -10; i < 10; i += 3) {
284// CHECK: [[SIMPLE_LOOP7_BODY]]
285// Start of body: calculate i from IV:
286// CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]
287// CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3
288// CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
289// CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],
290// CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]
291// CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32
292// CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],
293    A = i;
294// CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]
295// CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1
296// CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]
297  }
298// CHECK: [[SIMPLE_LOOP7_END]]
299// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
300// CHECK: load i32, i32*
301// CHECK: icmp ne i32 %{{.+}}, 0
302// CHECK: br i1 %{{.+}}, label
303// CHECK: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]],
304// CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* %{{.+}},
305// CHECK-NEXT: br label
306// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
307  }
308  int R;
309  #pragma omp parallel
310  {
311  // CHECK: store i32 -1, i32* [[R:%[^,]+]],
312  R = -1;
313// CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]],
314  #pragma omp for simd reduction(*:R)
315// CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
316// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
317// CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6
318// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
319// CHECK: [[TRUE]]
320// CHECK: br label %[[SWITCH:[^,]+]]
321// CHECK: [[FALSE]]
322// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
323// CHECK: br label %[[SWITCH]]
324// CHECK: [[SWITCH]]
325// CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
326// CHECK: store i64 [[UP]], i64* [[UB]],
327// CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
328// CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV8:%[^,]+]],
329
330// CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]]
331// CHECK: [[SIMD_LOOP8_COND]]
332// CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]
333// CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
334// CHECK-NEXT: [[CMP8:%.+]] = icmp sle i64 [[IV8]], [[UB_VAL]]
335// CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]]
336  for (long long i = -10; i < 10; i += 3) {
337// CHECK: [[SIMPLE_LOOP8_BODY]]
338// Start of body: calculate i from IV:
339// CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]
340// CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3
341// CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]]
342// CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],
343// CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]
344// CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],
345    R *= i;
346// CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]
347// CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1
348// CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]
349  }
350// CHECK: [[SIMPLE_LOOP8_END]]
351// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
352// CHECK: call i32 @__kmpc_reduce(
353// CHECK: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]],
354// CHECK: [[RED:%.+]] = mul nsw i32 %{{.+}}, [[R_PRIV_VAL]]
355// CHECK-NEXT: store i32 [[RED]], i32* %{{.+}},
356// CHECK-NEXT: call void @__kmpc_end_reduce(
357// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
358  }
359}
360
361template <class T, unsigned K> T tfoo(T a) { return a + K; }
362
363template <typename T, unsigned N>
364int templ1(T a, T *z) {
365  #pragma omp for simd collapse(N) schedule(simd: static, N)
366  for (int i = 0; i < N * 2; i++) {
367    for (long long j = 0; j < (N + N + N + N); j += 2) {
368      z[i + j] = a + tfoo<T, N>(i + j);
369    }
370  }
371  return 0;
372}
373
374// Instatiation templ1<float,2>
375// CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}})
376// CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 45, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 2)
377// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
378// CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15
379// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
380// CHECK: [[TRUE]]
381// CHECK: br label %[[SWITCH:[^,]+]]
382// CHECK: [[FALSE]]
383// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
384// CHECK: br label %[[SWITCH]]
385// CHECK: [[SWITCH]]
386// CHECK: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
387// CHECK: store i64 [[UP]], i64* [[UB]],
388// CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
389// CHECK: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]],
390
391// ...
392// CHECK: icmp sle i64
393// CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]
394// CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
395// CHECK-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]]
396// CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]]
397// CHECK: [[T1_BODY]]
398// Loop counters i and j updates:
399// CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]
400// CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
401// CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
402// CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
403// CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
404// CHECK-NEXT: store i32 [[I_2]], i32*
405// CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]
406// CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4
407// CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2
408// CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]]
409// CHECK-NEXT: store i64 [[J_2_ADD0]], i64*
410// simd.for.inc:
411// CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]
412// CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1
413// CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]
414// CHECK-NEXT: br label {{%.+}}
415// CHECK: [[T1_END]]
416// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
417// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
418// CHECK: ret i32 0
419//
420void inst_templ1() {
421  float a;
422  float z[100];
423  templ1<float,2> (a, z);
424}
425
426
427typedef int MyIdx;
428
429class IterDouble {
430  double *Ptr;
431public:
432  IterDouble operator++ () const {
433    IterDouble n;
434    n.Ptr = Ptr + 1;
435    return n;
436  }
437  bool operator < (const IterDouble &that) const {
438    return Ptr < that.Ptr;
439  }
440  double & operator *() const {
441    return *Ptr;
442  }
443  MyIdx operator - (const IterDouble &that) const {
444    return (MyIdx) (Ptr - that.Ptr);
445  }
446  IterDouble operator + (int Delta) {
447    IterDouble re;
448    re.Ptr = Ptr + Delta;
449    return re;
450  }
451
452  ///~IterDouble() {}
453};
454
455// CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}}
456void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) {
457//
458// Calculate number of iterations before the loop body.
459// CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
460// CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
461// CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
462// CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
463// CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
464// CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
465  #pragma omp for simd
466
467// CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
468// CHECK-DAG: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
469// CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]],
470// CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], [[OMP_LAST_IT_VAL]]
471// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
472// CHECK: [[TRUE]]
473// CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]],
474// CHECK: br label %[[SWITCH:[^,]+]]
475// CHECK: [[FALSE]]
476// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
477// CHECK: br label %[[SWITCH]]
478// CHECK: [[SWITCH]]
479// CHECK: [[UP:%.+]] = phi i32 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
480// CHECK: store i32 [[UP]], i32* [[UB]],
481// CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
482// CHECK: store i32 [[LB_VAL]], i32* [[IT_OMP_IV:%[^,]+]],
483
484// CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]
485// CHECK-NEXT: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
486// CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]]
487// CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]]
488  for (IterDouble i = ia; i < ib; ++i) {
489// CHECK: [[IT_BODY]]
490// Start of body: calculate i from index:
491// CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]
492// Call of operator+ (i, IV).
493// CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}}
494// ... loop body ...
495   *i = *ic * 0.5;
496// Float multiply and save result.
497// CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01
498// CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}}
499// CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]]
500   ++ic;
501//
502// CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]
503// CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1
504// CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]
505// br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]]
506  }
507// CHECK: [[IT_END]]
508// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
509// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
510// CHECK: ret void
511}
512
513
514// CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}}
515void collapsed(float *a, float *b, float *c, float *d) {
516  int i; // outer loop counter
517  unsigned j; // middle loop couter, leads to unsigned icmp in loop header.
518  // k declared in the loop init below
519  short l; // inner loop counter
520// CHECK: call void @__kmpc_for_static_init_4u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1)
521// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
522// CHECK: [[CMP:%.+]] = icmp ugt i32 [[UB_VAL]], 119
523// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
524// CHECK: [[TRUE]]
525// CHECK: br label %[[SWITCH:[^,]+]]
526// CHECK: [[FALSE]]
527// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]],
528// CHECK: br label %[[SWITCH]]
529// CHECK: [[SWITCH]]
530// CHECK: [[UP:%.+]] = phi i32 [ 119, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
531// CHECK: store i32 [[UP]], i32* [[UB]],
532// CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]],
533// CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]],
534//
535  #pragma omp for simd collapse(4)
536
537// CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]
538// CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]
539// CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB_VAL]]
540// CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]]
541  for (i = 1; i < 3; i++) // 2 iterations
542    for (j = 2u; j < 5u; j++) //3 iterations
543      for (int k = 3; k <= 6; k++) // 4 iterations
544        for (l = 4; l < 9; ++l) // 5 iterations
545        {
546// CHECK: [[COLL1_BODY]]
547// Start of body: calculate i from index:
548// CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]
549// Calculation of the loop counters values.
550// CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
551// CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
552// CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
553// CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
554// CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]
555// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
556// CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
557// CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
558// CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
559// CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
560// CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]
561// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
562// CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
563// CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
564// CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
565// CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]]
566// CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]
567// CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5
568// CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1
569// CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]]
570// CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16
571// CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]]
572// ... loop body ...
573// End of body: store into a[i]:
574// CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]
575    float res = b[j] * c[k];
576    a[i] = res * d[l];
577// CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]
578// CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1
579// CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]
580// br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]]
581// CHECK: [[COLL1_END]]
582  }
583// i,j,l are updated; k is not updated.
584// CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}})
585// CHECK: br i1
586// CHECK: store i32 3, i32*
587// CHECK-NEXT: store i32 5,
588// CHECK-NEXT: store i32 7,
589// CHECK-NEXT: store i16 9, i16*
590// CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}})
591// CHECK: ret void
592}
593
594extern char foo();
595extern double globalfloat;
596
597// CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}}
598void widened(float *a, float *b, float *c, float *d) {
599  int i; // outer loop counter
600  short j; // inner loop counter
601  globalfloat = 1.0;
602  int localint = 1;
603// CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]]
604// Counter is widened to 64 bits.
605// CHECK:     [[MUL:%.+]] = mul nsw i64 2, %{{.+}}
606// CHECK-NEXT: [[SUB:%.+]] = sub nsw i64 [[MUL]], 1
607// CHECK-NEXT: store i64 [[SUB]], i64* [[OMP_LAST_IT:%[^,]+]],
608// CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1)
609// CHECK-DAG: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
610// CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]],
611// CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], [[OMP_LAST_IT_VAL]]
612// CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]]
613// CHECK: [[TRUE]]
614// CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]],
615// CHECK: br label %[[SWITCH:[^,]+]]
616// CHECK: [[FALSE]]
617// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]],
618// CHECK: br label %[[SWITCH]]
619// CHECK: [[SWITCH]]
620// CHECK: [[UP:%.+]] = phi i64 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ]
621// CHECK: store i64 [[UP]], i64* [[UB]],
622// CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]],
623// CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV:%[^,]+]],
624//
625  #pragma omp for simd collapse(2) private(globalfloat, localint)
626
627// CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]
628// CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]
629// CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]]
630// CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]]
631  for (i = 1; i < 3; i++) // 2 iterations
632    for (j = 0; j < foo(); j++) // foo() iterations
633  {
634// CHECK: [[WIDE1_BODY]]
635// Start of body: calculate i from index:
636// CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]
637// Calculation of the loop counters values...
638// CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]]
639// CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]
640// CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]]
641// ... loop body ...
642//
643// Here we expect store into private double var, not global
644// CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]]
645    globalfloat = (float)j/i;
646    float res = b[j] * c[j];
647// Store into a[i]:
648// CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]
649    a[i] = res * d[i];
650// Then there's a store into private var localint:
651// CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]
652    localint = (int)j;
653// CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]
654// CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1
655// CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]
656//
657// br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]]
658// CHECK: [[WIDE1_END]]
659  }
660// i,j are updated.
661// CHECK: store i32 3, i32* [[I:%[^,]+]]
662// CHECK: store i16
663//
664// Here we expect store into original localint, not its privatized version.
665// CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]]
666  localint = (int)j;
667// CHECK: ret void
668}
669
670// TERM_DEBUG-LABEL: bar
671int bar() {return 0;};
672
673// TERM_DEBUG-LABEL: parallel_simd
674void parallel_simd(float *a) {
675#pragma omp parallel
676#pragma omp for simd
677  // TERM_DEBUG-NOT: __kmpc_global_thread_num
678  // TERM_DEBUG:     invoke i32 {{.*}}bar{{.*}}()
679  // TERM_DEBUG:     unwind label %[[TERM_LPAD:.+]],
680  // TERM_DEBUG-NOT: __kmpc_global_thread_num
681  // TERM_DEBUG:     [[TERM_LPAD]]
682  // TERM_DEBUG:     call void @__clang_call_terminate
683  // TERM_DEBUG:     unreachable
684  for (unsigned i = 131071; i <= 2147483647; i += 127)
685    a[i] += bar();
686}
687// TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]],
688#endif // HEADER
689