1// RUN: %clang_cc1 -emit-llvm -triple x86_64 -O3 -o %t.opt.ll %s \
2// RUN:   -fdump-record-layouts > %t.dump.txt
3// RUN: FileCheck -check-prefix=CHECK-RECORD < %t.dump.txt %s
4// RUN: FileCheck -check-prefix=CHECK-OPT < %t.opt.ll %s
5
6/****/
7
8// Check that we don't read off the end a packed 24-bit structure.
9// PR6176
10
11// CHECK-RECORD: *** Dumping IRgen Record Layout
12// CHECK-RECORD: Record: RecordDecl{{.*}}s0
13// CHECK-RECORD: Layout: <CGRecordLayout
14// CHECK-RECORD:   LLVMType:%struct.s0 = type { [3 x i8] }
15// CHECK-RECORD:   IsZeroInitializable:1
16// CHECK-RECORD:   BitFields:[
17// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageAlignment:1>
18struct __attribute((packed)) s0 {
19  int f0 : 24;
20};
21
22struct s0 g0 = { 0xdeadbeef };
23
24int f0_load(struct s0 *a0) {
25  int size_check[sizeof(struct s0) == 3 ? 1 : -1];
26  return a0->f0;
27}
28int f0_store(struct s0 *a0) {
29  return (a0->f0 = 1);
30}
31int f0_reload(struct s0 *a0) {
32  return (a0->f0 += 1);
33}
34
35// CHECK-OPT-LABEL: define i64 @test_0()
36// CHECK-OPT:  ret i64 1
37// CHECK-OPT: }
38unsigned long long test_0() {
39  struct s0 g0 = { 0xdeadbeef };
40  unsigned long long res = 0;
41  res ^= g0.f0;
42  res ^= f0_load(&g0) ^ f0_store(&g0) ^ f0_reload(&g0);
43  res ^= g0.f0;
44  return res;
45}
46
47/****/
48
49// PR5591
50
51// CHECK-RECORD: *** Dumping IRgen Record Layout
52// CHECK-RECORD: Record: RecordDecl{{.*}}s1
53// CHECK-RECORD: Layout: <CGRecordLayout
54// CHECK-RECORD:   LLVMType:%struct.s1 = type { [3 x i8] }
55// CHECK-RECORD:   IsZeroInitializable:1
56// CHECK-RECORD:   BitFields:[
57// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageAlignment:1>
58// CHECK-RECORD:     <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageAlignment:1>
59
60#pragma pack(push)
61#pragma pack(1)
62struct __attribute((packed)) s1 {
63  signed f0 : 10;
64  signed f1 : 10;
65};
66#pragma pack(pop)
67
68struct s1 g1 = { 0xdeadbeef, 0xdeadbeef };
69
70int f1_load(struct s1 *a0) {
71  int size_check[sizeof(struct s1) == 3 ? 1 : -1];
72  return a0->f1;
73}
74int f1_store(struct s1 *a0) {
75  return (a0->f1 = 1234);
76}
77int f1_reload(struct s1 *a0) {
78  return (a0->f1 += 1234);
79}
80
81// CHECK-OPT-LABEL: define i64 @test_1()
82// CHECK-OPT:  ret i64 210
83// CHECK-OPT: }
84unsigned long long test_1() {
85  struct s1 g1 = { 0xdeadbeef, 0xdeadbeef };
86  unsigned long long res = 0;
87  res ^= g1.f0 ^ g1.f1;
88  res ^= f1_load(&g1) ^ f1_store(&g1) ^ f1_reload(&g1);
89  res ^= g1.f0 ^ g1.f1;
90  return res;
91}
92
93/****/
94
95// Check that we don't access beyond the bounds of a union.
96//
97// PR5567
98
99// CHECK-RECORD: *** Dumping IRgen Record Layout
100// CHECK-RECORD: Record: RecordDecl{{.*}}u2
101// CHECK-RECORD: Layout: <CGRecordLayout
102// CHECK-RECORD:   LLVMType:%union.u2 = type { i8 }
103// CHECK-RECORD:   IsZeroInitializable:1
104// CHECK-RECORD:   BitFields:[
105// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageAlignment:1>
106
107union __attribute__((packed)) u2 {
108  unsigned long long f0 : 3;
109};
110
111union u2 g2 = { 0xdeadbeef };
112
113int f2_load(union u2 *a0) {
114  return a0->f0;
115}
116int f2_store(union u2 *a0) {
117  return (a0->f0 = 1234);
118}
119int f2_reload(union u2 *a0) {
120  return (a0->f0 += 1234);
121}
122
123// CHECK-OPT-LABEL: define i64 @test_2()
124// CHECK-OPT:  ret i64 2
125// CHECK-OPT: }
126unsigned long long test_2() {
127  union u2 g2 = { 0xdeadbeef };
128  unsigned long long res = 0;
129  res ^= g2.f0;
130  res ^= f2_load(&g2) ^ f2_store(&g2) ^ f2_reload(&g2);
131  res ^= g2.f0;
132  return res;
133}
134
135/***/
136
137// PR5039
138
139struct s3 {
140  long long f0 : 32;
141  long long f1 : 32;
142};
143
144struct s3 g3 = { 0xdeadbeef, 0xdeadbeef };
145
146int f3_load(struct s3 *a0) {
147  a0->f0 = 1;
148  return a0->f0;
149}
150int f3_store(struct s3 *a0) {
151  a0->f0 = 1;
152  return (a0->f0 = 1234);
153}
154int f3_reload(struct s3 *a0) {
155  a0->f0 = 1;
156  return (a0->f0 += 1234);
157}
158
159// CHECK-OPT-LABEL: define i64 @test_3()
160// CHECK-OPT:  ret i64 -559039940
161// CHECK-OPT: }
162unsigned long long test_3() {
163  struct s3 g3 = { 0xdeadbeef, 0xdeadbeef };
164  unsigned long long res = 0;
165  res ^= g3.f0 ^ g3.f1;
166  res ^= f3_load(&g3) ^ f3_store(&g3) ^ f3_reload(&g3);
167  res ^= g3.f0 ^ g3.f1;
168  return res;
169}
170
171/***/
172
173// This is a case where the bitfield access will straddle an alignment boundary
174// of its underlying type.
175
176struct s4 {
177  unsigned f0 : 16;
178  unsigned f1 : 28 __attribute__ ((packed));
179};
180
181struct s4 g4 = { 0xdeadbeef, 0xdeadbeef };
182
183int f4_load(struct s4 *a0) {
184  return a0->f0 ^ a0->f1;
185}
186int f4_store(struct s4 *a0) {
187  return (a0->f0 = 1234) ^ (a0->f1 = 5678);
188}
189int f4_reload(struct s4 *a0) {
190  return (a0->f0 += 1234) ^ (a0->f1 += 5678);
191}
192
193// CHECK-OPT-LABEL: define i64 @test_4()
194// CHECK-OPT:  ret i64 4860
195// CHECK-OPT: }
196unsigned long long test_4() {
197  struct s4 g4 = { 0xdeadbeef, 0xdeadbeef };
198  unsigned long long res = 0;
199  res ^= g4.f0 ^ g4.f1;
200  res ^= f4_load(&g4) ^ f4_store(&g4) ^ f4_reload(&g4);
201  res ^= g4.f0 ^ g4.f1;
202  return res;
203}
204
205/***/
206
207struct s5 {
208  unsigned f0 : 2;
209  _Bool f1 : 1;
210  _Bool f2 : 1;
211};
212
213struct s5 g5 = { 0xdeadbeef, 0xdeadbeef };
214
215int f5_load(struct s5 *a0) {
216  return a0->f0 ^ a0->f1;
217}
218int f5_store(struct s5 *a0) {
219  return (a0->f0 = 0xF) ^ (a0->f1 = 0xF) ^ (a0->f2 = 0xF);
220}
221int f5_reload(struct s5 *a0) {
222  return (a0->f0 += 0xF) ^ (a0->f1 += 0xF) ^ (a0->f2 += 0xF);
223}
224
225// CHECK-OPT-LABEL: define i64 @test_5()
226// CHECK-OPT:  ret i64 2
227// CHECK-OPT: }
228unsigned long long test_5() {
229  struct s5 g5 = { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
230  unsigned long long res = 0;
231  res ^= g5.f0 ^ g5.f1 ^ g5.f2;
232  res ^= f5_load(&g5) ^ f5_store(&g5) ^ f5_reload(&g5);
233  res ^= g5.f0 ^ g5.f1 ^ g5.f2;
234  return res;
235}
236
237/***/
238
239struct s6 {
240  _Bool f0 : 2;
241};
242
243struct s6 g6 = { 0xF };
244
245int f6_load(struct s6 *a0) {
246  return a0->f0;
247}
248int f6_store(struct s6 *a0) {
249  return a0->f0 = 0x0;
250}
251int f6_reload(struct s6 *a0) {
252  return (a0->f0 += 0xF);
253}
254
255// CHECK-OPT-LABEL: define zeroext i1 @test_6()
256// CHECK-OPT:  ret i1 true
257// CHECK-OPT: }
258_Bool test_6() {
259  struct s6 g6 = { 0xF };
260  unsigned long long res = 0;
261  res ^= g6.f0;
262  res ^= f6_load(&g6);
263  res ^= g6.f0;
264  return res;
265}
266
267/***/
268
269// Check that we compute the best alignment possible for each access.
270//
271// CHECK-RECORD: *** Dumping IRgen Record Layout
272// CHECK-RECORD: Record: RecordDecl{{.*}}s7
273// CHECK-RECORD: Layout: <CGRecordLayout
274// CHECK-RECORD:   LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
275// CHECK-RECORD:   IsZeroInitializable:1
276// CHECK-RECORD:   BitFields:[
277// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageAlignment:4>
278// CHECK-RECORD:     <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageAlignment:16>
279
280struct __attribute__((aligned(16))) s7 {
281  int a, b, c;
282  int f0 : 5;
283  int f1 : 29;
284};
285
286int f7_load(struct s7 *a0) {
287  return a0->f0;
288}
289
290/***/
291
292// This is a case where we narrow the access width immediately.
293
294struct __attribute__((packed)) s8 {
295  char f0 : 4;
296  char f1;
297  int  f2 : 4;
298  char f3 : 4;
299};
300
301struct s8 g8 = { 0xF };
302
303int f8_load(struct s8 *a0) {
304  return a0->f0 ^ a0 ->f2 ^ a0->f3;
305}
306int f8_store(struct s8 *a0) {
307  return (a0->f0 = 0xFD) ^ (a0->f2 = 0xFD) ^ (a0->f3 = 0xFD);
308}
309int f8_reload(struct s8 *a0) {
310  return (a0->f0 += 0xFD) ^ (a0->f2 += 0xFD) ^ (a0->f3 += 0xFD);
311}
312
313// CHECK-OPT-LABEL: define i32 @test_8()
314// CHECK-OPT:  ret i32 -3
315// CHECK-OPT: }
316unsigned test_8() {
317  struct s8 g8 = { 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef };
318  unsigned long long res = 0;
319  res ^= g8.f0 ^ g8.f2 ^ g8.f3;
320  res ^= f8_load(&g8) ^ f8_store(&g8) ^ f8_reload(&g8);
321  res ^= g8.f0 ^ g8.f2 ^ g8.f3;
322  return res;
323}
324
325/***/
326
327// This is another case where we narrow the access width immediately.
328//
329// <rdar://problem/7893760>
330
331struct __attribute__((packed)) s9 {
332  unsigned f0 : 7;
333  unsigned f1 : 7;
334  unsigned f2 : 7;
335  unsigned f3 : 7;
336  unsigned f4 : 7;
337  unsigned f5 : 7;
338  unsigned f6 : 7;
339  unsigned f7 : 7;
340};
341
342int f9_load(struct s9 *a0) {
343  return a0->f7;
344}
345