1// RUN: %clang_cc1 -std=c++11 -fsanitize=signed-integer-overflow,integer-divide-by-zero,float-divide-by-zero,shift,unreachable,return,vla-bound,alignment,null,vptr,object-size,float-cast-overflow,bool,enum,array-bounds,function -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s
2
3struct S {
4  double d;
5  int a, b;
6  virtual int f();
7};
8
9struct T : S {};
10
11// CHECK: @_Z17reference_binding
12void reference_binding(int *p, S *q) {
13  // C++ core issue 453: If an lvalue to which a reference is directly bound
14  // designates neither an existing object or function of an appropriate type,
15  // nor a region of storage of suitable size and alignment to contain an object
16  // of the reference's type, the behavior is undefined.
17
18  // CHECK: icmp ne {{.*}}, null
19
20  // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64
21  // CHECK-NEXT: icmp uge i64 %[[SIZE]], 4
22
23  // CHECK: %[[PTRINT:.*]] = ptrtoint
24  // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3
25  // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0
26  int &r = *p;
27
28  // A reference is not required to refer to an object within its lifetime.
29  // CHECK-NOT: __ubsan_handle_dynamic_type_cache_miss
30  S &r2 = *q;
31}
32
33// CHECK: @_Z13member_access
34void member_access(S *p) {
35  // (1a) Check 'p' is appropriately sized and aligned for member access.
36
37  // CHECK: icmp ne {{.*}}, null
38
39  // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64
40  // CHECK-NEXT: icmp uge i64 %[[SIZE]], 24
41
42  // CHECK: %[[PTRINT:.*]] = ptrtoint
43  // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 7
44  // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0
45
46  // (1b) Check that 'p' actually points to an 'S'.
47
48  // CHECK: %[[VPTRADDR:.*]] = bitcast {{.*}} to i64*
49  // CHECK-NEXT: %[[VPTR:.*]] = load i64* %[[VPTRADDR]]
50  //
51  // hash_16_bytes:
52  //
53  // If this number changes, it indicates that either the mangled name of ::S
54  // has changed, or that LLVM's hashing function has changed. The latter case
55  // is OK if the hashing function is still stable.
56  //
57  // The two hash values are for 64- and 32-bit Clang binaries, respectively.
58  // FIXME: We should produce a 64-bit value either way.
59  //
60  // CHECK-NEXT: xor i64 {{-4030275160588942838|2562089159}}, %[[VPTR]]
61  // CHECK-NEXT: mul i64 {{.*}}, -7070675565921424023
62  // CHECK-NEXT: lshr i64 {{.*}}, 47
63  // CHECK-NEXT: xor i64
64  // CHECK-NEXT: xor i64 %[[VPTR]]
65  // CHECK-NEXT: mul i64 {{.*}}, -7070675565921424023
66  // CHECK-NEXT: lshr i64 {{.*}}, 47
67  // CHECK-NEXT: xor i64
68  // CHECK-NEXT: %[[HASH:.*]] = mul i64 {{.*}}, -7070675565921424023
69  //
70  // Check the hash against the table:
71  //
72  // CHECK-NEXT: %[[IDX:.*]] = and i64 %{{.*}}, 127
73  // CHECK-NEXT: getelementptr inbounds [128 x i64]* @__ubsan_vptr_type_cache, i32 0, i64 %[[IDX]]
74  // CHECK-NEXT: %[[CACHEVAL:.*]] = load i64*
75  // CHECK-NEXT: icmp eq i64 %[[CACHEVAL]], %[[HASH]]
76  // CHECK-NEXT: br i1
77
78  // CHECK: call void @__ubsan_handle_dynamic_type_cache_miss({{.*}}, i64 %{{.*}}, i64 %[[HASH]])
79  // CHECK-NOT: unreachable
80  // CHECK: {{.*}}:
81
82  // (2) Check 'p->b' is appropriately sized and aligned for a load.
83
84  // FIXME: Suppress this in the trivial case of a member access, because we
85  // know we've just checked the member access expression itself.
86
87  // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64
88  // CHECK-NEXT: icmp uge i64 %[[SIZE]], 4
89
90  // CHECK: %[[PTRINT:.*]] = ptrtoint
91  // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3
92  // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0
93  int k = p->b;
94
95  // (3a) Check 'p' is appropriately sized and aligned for member function call.
96
97  // CHECK: icmp ne {{.*}}, null
98
99  // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64
100  // CHECK-NEXT: icmp uge i64 %[[SIZE]], 24
101
102  // CHECK: %[[PTRINT:.*]] = ptrtoint
103  // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 7
104  // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0
105
106  // (3b) Check that 'p' actually points to an 'S'
107
108  // CHECK: load i64*
109  // CHECK-NEXT: xor i64 {{-4030275160588942838|2562089159}},
110  // [...]
111  // CHECK: getelementptr inbounds [128 x i64]* @__ubsan_vptr_type_cache, i32 0, i64 %
112  // CHECK: br i1
113  // CHECK: call void @__ubsan_handle_dynamic_type_cache_miss({{.*}}, i64 %{{.*}}, i64 %{{.*}})
114  // CHECK-NOT: unreachable
115  // CHECK: {{.*}}:
116
117  k = p->f();
118}
119
120// CHECK: @_Z12lsh_overflow
121int lsh_overflow(int a, int b) {
122  // CHECK: %[[INBOUNDS:.*]] = icmp ule i32 %[[RHS:.*]], 31
123  // CHECK-NEXT: br i1 %[[INBOUNDS]]
124
125  // CHECK: %[[SHIFTED_OUT_WIDTH:.*]] = sub nuw nsw i32 31, %[[RHS]]
126  // CHECK-NEXT: %[[SHIFTED_OUT:.*]] = lshr i32 %[[LHS:.*]], %[[SHIFTED_OUT_WIDTH]]
127
128  // This is present for C++11 but not for C: C++ core issue 1457 allows a '1'
129  // to be shifted into the sign bit, but not out of it.
130  // CHECK-NEXT: %[[SHIFTED_OUT_NOT_SIGN:.*]] = lshr i32 %[[SHIFTED_OUT]], 1
131
132  // CHECK-NEXT: %[[NO_OVERFLOW:.*]] = icmp eq i32 %[[SHIFTED_OUT_NOT_SIGN]], 0
133
134  // CHECK: %[[VALID:.*]] = phi i1 [ %[[INBOUNDS]], {{.*}} ], [ %[[NO_OVERFLOW]], {{.*}} ]
135  // CHECK-NEXT: br i1 %[[VALID]]
136
137  // CHECK: call void @__ubsan_handle_shift_out_of_bounds
138  // CHECK-NOT: call void @__ubsan_handle_shift_out_of_bounds
139
140  // CHECK: %[[RET:.*]] = shl i32 %[[LHS]], %[[RHS]]
141  // CHECK-NEXT: ret i32 %[[RET]]
142  return a << b;
143}
144
145// CHECK: @_Z9no_return
146int no_return() {
147  // CHECK:      call void @__ubsan_handle_missing_return(i8* bitcast ({{.*}}* @{{.*}} to i8*)) [[NR_NUW:#[0-9]+]]
148  // CHECK-NEXT: unreachable
149}
150
151// CHECK: @_Z9sour_bool
152bool sour_bool(bool *p) {
153  // CHECK: %[[OK:.*]] = icmp ule i8 {{.*}}, 1
154  // CHECK: br i1 %[[OK]]
155  // CHECK: call void @__ubsan_handle_load_invalid_value(i8* bitcast ({{.*}}), i64 {{.*}})
156  return *p;
157}
158
159enum E1 { e1a = 0, e1b = 127 } e1;
160enum E2 { e2a = -1, e2b = 64 } e2;
161enum E3 { e3a = (1u << 31) - 1 } e3;
162
163// CHECK: @_Z14bad_enum_value
164int bad_enum_value() {
165  // CHECK: %[[E1:.*]] = icmp ule i32 {{.*}}, 127
166  // CHECK: br i1 %[[E1]]
167  // CHECK: call void @__ubsan_handle_load_invalid_value(
168  int a = e1;
169
170  // CHECK: %[[E2HI:.*]] = icmp sle i32 {{.*}}, 127
171  // CHECK: %[[E2LO:.*]] = icmp sge i32 {{.*}}, -128
172  // CHECK: %[[E2:.*]] = and i1 %[[E2HI]], %[[E2LO]]
173  // CHECK: br i1 %[[E2]]
174  // CHECK: call void @__ubsan_handle_load_invalid_value(
175  int b = e2;
176
177  // CHECK: %[[E3:.*]] = icmp ule i32 {{.*}}, 2147483647
178  // CHECK: br i1 %[[E3]]
179  // CHECK: call void @__ubsan_handle_load_invalid_value(
180  int c = e3;
181  return a + b + c;
182}
183
184// CHECK: @_Z20bad_downcast_pointer
185void bad_downcast_pointer(S *p) {
186  // CHECK: %[[NONNULL:.*]] = icmp ne {{.*}}, null
187  // CHECK: br i1 %[[NONNULL]],
188
189  // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(
190  // CHECK: %[[E1:.*]] = icmp uge i64 %[[SIZE]], 24
191  // CHECK: %[[MISALIGN:.*]] = and i64 %{{.*}}, 7
192  // CHECK: %[[E2:.*]] = icmp eq i64 %[[MISALIGN]], 0
193  // CHECK: %[[E12:.*]] = and i1 %[[E1]], %[[E2]]
194  // CHECK: br i1 %[[E12]],
195
196  // CHECK: call void @__ubsan_handle_type_mismatch
197  // CHECK: br label
198
199  // CHECK: br i1 %{{.*}},
200
201  // CHECK: call void @__ubsan_handle_dynamic_type_cache_miss
202  // CHECK: br label
203  (void) static_cast<T*>(p);
204}
205
206// CHECK: @_Z22bad_downcast_reference
207void bad_downcast_reference(S &p) {
208  // CHECK: %[[E1:.*]] = icmp ne {{.*}}, null
209  // CHECK-NOT: br i1
210  // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(
211  // CHECK: %[[E2:.*]] = icmp uge i64 %[[SIZE]], 24
212  // CHECK: %[[E12:.*]] = and i1 %[[E1]], %[[E2]]
213  // CHECK: %[[MISALIGN:.*]] = and i64 %{{.*}}, 7
214  // CHECK: %[[E3:.*]] = icmp eq i64 %[[MISALIGN]], 0
215  // CHECK: %[[E123:.*]] = and i1 %[[E12]], %[[E3]]
216  // CHECK: br i1 %[[E123]],
217
218  // CHECK: call void @__ubsan_handle_type_mismatch
219  // CHECK: br label
220
221  // CHECK: br i1 %{{.*}},
222
223  // CHECK: call void @__ubsan_handle_dynamic_type_cache_miss
224  // CHECK: br label
225  (void) static_cast<T&>(p);
226}
227
228// CHECK: @_Z11array_index
229int array_index(const int (&a)[4], int n) {
230  // CHECK: %[[K1_OK:.*]] = icmp ult i64 %{{.*}}, 4
231  // CHECK: br i1 %[[K1_OK]]
232  // CHECK: call void @__ubsan_handle_out_of_bounds(
233  int k1 = a[n];
234
235  // CHECK: %[[R1_OK:.*]] = icmp ule i64 %{{.*}}, 4
236  // CHECK: br i1 %[[R1_OK]]
237  // CHECK: call void @__ubsan_handle_out_of_bounds(
238  const int *r1 = &a[n];
239
240  // CHECK: %[[K2_OK:.*]] = icmp ult i64 %{{.*}}, 8
241  // CHECK: br i1 %[[K2_OK]]
242  // CHECK: call void @__ubsan_handle_out_of_bounds(
243  int k2 = ((const int(&)[8])a)[n];
244
245  // CHECK: %[[K3_OK:.*]] = icmp ult i64 %{{.*}}, 4
246  // CHECK: br i1 %[[K3_OK]]
247  // CHECK: call void @__ubsan_handle_out_of_bounds(
248  int k3 = n[a];
249
250  return k1 + *r1 + k2;
251}
252
253// CHECK: @_Z17multi_array_index
254int multi_array_index(int n, int m) {
255  int arr[4][6];
256
257  // CHECK: %[[IDX2_OK:.*]] = icmp ult i64 %{{.*}}, 6
258  // CHECK: br i1 %[[IDX2_OK]]
259  // CHECK: call void @__ubsan_handle_out_of_bounds(
260
261  // CHECK: %[[IDX1_OK:.*]] = icmp ult i64 %{{.*}}, 4
262  // CHECK: br i1 %[[IDX1_OK]]
263  // CHECK: call void @__ubsan_handle_out_of_bounds(
264  return arr[n][m];
265}
266
267// CHECK: @_Z11array_arith
268int array_arith(const int (&a)[4], int n) {
269  // CHECK: %[[K1_OK:.*]] = icmp ule i64 %{{.*}}, 4
270  // CHECK: br i1 %[[K1_OK]]
271  // CHECK: call void @__ubsan_handle_out_of_bounds(
272  const int *k1 = a + n;
273
274  // CHECK: %[[K2_OK:.*]] = icmp ule i64 %{{.*}}, 8
275  // CHECK: br i1 %[[K2_OK]]
276  // CHECK: call void @__ubsan_handle_out_of_bounds(
277  const int *k2 = (const int(&)[8])a + n;
278
279  return *k1 + *k2;
280}
281
282struct ArrayMembers {
283  int a1[5];
284  int a2[1];
285};
286// CHECK: @_Z18struct_array_index
287int struct_array_index(ArrayMembers *p, int n) {
288  // CHECK: %[[IDX_OK:.*]] = icmp ult i64 %{{.*}}, 5
289  // CHECK: br i1 %[[IDX_OK]]
290  // CHECK: call void @__ubsan_handle_out_of_bounds(
291  return p->a1[n];
292}
293
294// CHECK: @_Z16flex_array_index
295int flex_array_index(ArrayMembers *p, int n) {
296  // CHECK-NOT: call void @__ubsan_handle_out_of_bounds(
297  return p->a2[n];
298}
299
300extern int incomplete[];
301// CHECK: @_Z22incomplete_array_index
302int incomplete_array_index(int n) {
303  // CHECK-NOT: call void @__ubsan_handle_out_of_bounds(
304  return incomplete[n];
305}
306
307typedef __attribute__((ext_vector_type(4))) int V4I;
308// CHECK: @_Z12vector_index
309int vector_index(V4I v, int n) {
310  // CHECK: %[[IDX_OK:.*]] = icmp ult i64 %{{.*}}, 4
311  // CHECK: br i1 %[[IDX_OK]]
312  // CHECK: call void @__ubsan_handle_out_of_bounds(
313  return v[n];
314}
315
316// CHECK: @_Z12string_index
317char string_index(int n) {
318  // CHECK: %[[IDX_OK:.*]] = icmp ult i64 %{{.*}}, 6
319  // CHECK: br i1 %[[IDX_OK]]
320  // CHECK: call void @__ubsan_handle_out_of_bounds(
321  return "Hello"[n];
322}
323
324class A // align=4
325{
326  int a1, a2, a3;
327};
328
329class B // align=8
330{
331  long b1, b2;
332};
333
334class C : public A, public B // align=16
335{
336  alignas(16) int c1;
337};
338
339// Make sure we check the alignment of the pointer after subtracting any
340// offset. The pointer before subtraction doesn't need to be aligned for
341// the destination type.
342
343// CHECK-LABEL: define void @_Z16downcast_pointerP1B(%class.B* %b)
344void downcast_pointer(B *b) {
345  (void) static_cast<C*>(b);
346  // Alignment check from EmitTypeCheck(TCK_DowncastPointer, ...)
347  // CHECK: [[SUB:%[.a-z0-9]*]] = getelementptr i8* {{.*}}, i64 -16
348  // CHECK-NEXT: [[C:%[0-9]*]] = bitcast i8* [[SUB]] to %class.C*
349  // null check goes here
350  // CHECK: [[FROM_PHI:%[0-9]*]] = phi %class.C* [ [[C]], {{.*}} ], {{.*}}
351  // Objectsize check goes here
352  // CHECK: [[C_INT:%[0-9]*]] = ptrtoint %class.C* [[FROM_PHI]] to i64
353  // CHECK-NEXT: [[MASKED:%[0-9]*]] = and i64 [[C_INT]], 15
354  // CHECK-NEXT: [[TEST:%[0-9]*]] = icmp eq i64 [[MASKED]], 0
355  // AND the alignment test with the objectsize test.
356  // CHECK-NEXT: [[AND:%[0-9]*]] = and i1 {{.*}}, [[TEST]]
357  // CHECK-NEXT: br i1 [[AND]]
358}
359
360// CHECK-LABEL: define void @_Z18downcast_referenceR1B(%class.B* nonnull %b)
361void downcast_reference(B &b) {
362  (void) static_cast<C&>(b);
363  // Alignment check from EmitTypeCheck(TCK_DowncastReference, ...)
364  // CHECK:      [[SUB:%[.a-z0-9]*]] = getelementptr i8* {{.*}}, i64 -16
365  // CHECK-NEXT: [[C:%[0-9]*]] = bitcast i8* [[SUB]] to %class.C*
366  // Objectsize check goes here
367  // CHECK:      [[C_INT:%[0-9]*]] = ptrtoint %class.C* [[C]] to i64
368  // CHECK-NEXT: [[MASKED:%[0-9]*]] = and i64 [[C_INT]], 15
369  // CHECK-NEXT: [[TEST:%[0-9]*]] = icmp eq i64 [[MASKED]], 0
370  // AND the alignment test with the objectsize test.
371  // CHECK-NEXT: [[AND:%[0-9]*]] = and i1 {{.*}}, [[TEST]]
372  // CHECK-NEXT: br i1 [[AND]]
373}
374
375// CHECK-LABEL: @_Z22indirect_function_callPFviE({{.*}} prefix <{ i32, i8* }> <{ i32 1413876459, i8* bitcast ({ i8*, i8* }* @_ZTIFvPFviEE to i8*) }>
376void indirect_function_call(void (*p)(int)) {
377  // CHECK: [[PTR:%[0-9]*]] = bitcast void (i32)* {{.*}} to <{ i32, i8* }>*
378
379  // Signature check
380  // CHECK-NEXT: [[SIGPTR:%[0-9]*]] = getelementptr <{ i32, i8* }>* [[PTR]], i32 0, i32 0
381  // CHECK-NEXT: [[SIG:%[0-9]*]] = load i32* [[SIGPTR]]
382  // CHECK-NEXT: [[SIGCMP:%[0-9]*]] = icmp eq i32 [[SIG]], 1413876459
383  // CHECK-NEXT: br i1 [[SIGCMP]]
384
385  // RTTI pointer check
386  // CHECK: [[RTTIPTR:%[0-9]*]] = getelementptr <{ i32, i8* }>* [[PTR]], i32 0, i32 1
387  // CHECK-NEXT: [[RTTI:%[0-9]*]] = load i8** [[RTTIPTR]]
388  // CHECK-NEXT: [[RTTICMP:%[0-9]*]] = icmp eq i8* [[RTTI]], bitcast ({ i8*, i8* }* @_ZTIFviE to i8*)
389  // CHECK-NEXT: br i1 [[RTTICMP]]
390  p(42);
391}
392
393namespace CopyValueRepresentation {
394  // CHECK-LABEL: define {{.*}} @_ZN23CopyValueRepresentation2S3aSERKS0_
395  // CHECK-NOT: call {{.*}} @__ubsan_handle_load_invalid_value
396  // CHECK-LABEL: define {{.*}} @_ZN23CopyValueRepresentation2S4aSEOS0_
397  // CHECK-NOT: call {{.*}} @__ubsan_handle_load_invalid_value
398  // CHECK-LABEL: define {{.*}} @_ZN23CopyValueRepresentation2S5C2ERKS0_
399  // CHECK-NOT: call {{.*}} __ubsan_handle_load_invalid_value
400  // CHECK-LABEL: define {{.*}} @_ZN23CopyValueRepresentation2S2C2ERKS0_
401  // CHECK: __ubsan_handle_load_invalid_value
402  // CHECK-LABEL: define {{.*}} @_ZN23CopyValueRepresentation2S1C2ERKS0_
403  // CHECK-NOT: call {{.*}} __ubsan_handle_load_invalid_value
404
405  struct CustomCopy { CustomCopy(); CustomCopy(const CustomCopy&); };
406  struct S1 {
407    CustomCopy CC;
408    bool b;
409  };
410  void callee1(S1);
411  void test1() {
412    S1 s11;
413    callee1(s11);
414    S1 s12;
415    s12 = s11;
416  }
417
418  static bool some_global_bool;
419  struct ExprCopy {
420    ExprCopy();
421    ExprCopy(const ExprCopy&, bool b = some_global_bool);
422  };
423  struct S2 {
424    ExprCopy EC;
425    bool b;
426  };
427  void callee2(S2);
428  void test2(void) {
429    S2 s21;
430    callee2(s21);
431    S2 s22;
432    s22 = s21;
433  }
434
435  struct CustomAssign { CustomAssign &operator=(const CustomAssign&); };
436  struct S3 {
437    CustomAssign CA;
438    bool b;
439  };
440  void test3() {
441    S3 x, y;
442    x = y;
443  }
444
445  struct CustomMove {
446    CustomMove();
447    CustomMove(const CustomMove&&);
448    CustomMove &operator=(const CustomMove&&);
449  };
450  struct S4 {
451    CustomMove CM;
452    bool b;
453  };
454  void test4() {
455    S4 x, y;
456    x = static_cast<S4&&>(y);
457  }
458
459  struct EnumCustomCopy {
460    EnumCustomCopy();
461    EnumCustomCopy(const EnumCustomCopy&);
462  };
463  struct S5 {
464    EnumCustomCopy ECC;
465    bool b;
466  };
467  void callee5(S5);
468  void test5() {
469    S5 s51;
470    callee5(s51);
471    S5 s52;
472    s52 = s51;
473  }
474}
475
476// CHECK: attributes [[NR_NUW]] = { noreturn nounwind }
477