bitfield.cpp revision 72d2dab6058467036df73a5f668036a519043e5b
1// RUN: %clang_cc1 -triple x86_64-unknown-unknown -verify -emit-llvm -o - %s | FileCheck %s
2//
3// Tests for bitfield access patterns in C++ with special attention to
4// conformance to C++11 memory model requirements.
5
6namespace N1 {
7  // Ensure that neither loads nor stores to bitfields are not widened into
8  // other memory locations. (PR13691)
9  //
10  // NOTE: We could potentially widen loads based on their alignment if we are
11  // comfortable requiring that subsequent memory locations within the
12  // alignment-widened load are not volatile.
13  struct S {
14    char a;
15    unsigned b : 1;
16    char c;
17  };
18  unsigned read(S* s) {
19    // CHECK: define i32 @_ZN2N14read
20    // CHECK:   %[[ptr:.*]] = getelementptr inbounds %{{.*}}* %{{.*}}, i32 0, i32 1
21    // CHECK:   %[[val:.*]] = load i8* %[[ptr]]
22    // CHECK:   %[[and:.*]] = and i8 %[[val]], 1
23    // CHECK:   %[[ext:.*]] = zext i8 %[[and]] to i32
24    // CHECK:                 ret i32 %[[ext]]
25    return s->b;
26  }
27  void write(S* s, unsigned x) {
28    // CHECK: define void @_ZN2N15write
29    // CHECK:   %[[ptr:.*]]     = getelementptr inbounds %{{.*}}* %{{.*}}, i32 0, i32 1
30    // CHECK:   %[[x_trunc:.*]] = trunc i32 %{{.*}} to i8
31    // CHECK:   %[[old:.*]]     = load i8* %[[ptr]]
32    // CHECK:   %[[x_and:.*]]   = and i8 %[[x_trunc]], 1
33    // CHECK:   %[[old_and:.*]] = and i8 %[[old]], -2
34    // CHECK:   %[[new:.*]]     = or i8 %[[old_and]], %[[x_and]]
35    // CHECK:                     store i8 %[[new]], i8* %[[ptr]]
36    s->b = x;
37  }
38}
39
40namespace N2 {
41  // Do widen loads and stores to bitfields when those bitfields have padding
42  // within the struct following them.
43  struct S {
44    unsigned b : 24;
45    void *p;
46  };
47  unsigned read(S* s) {
48    // CHECK: define i32 @_ZN2N24read
49    // CHECK:   %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32*
50    // CHECK:   %[[val:.*]] = load i32* %[[ptr]]
51    // CHECK:   %[[and:.*]] = and i32 %[[val]], 16777215
52    // CHECK:                 ret i32 %[[and]]
53    return s->b;
54  }
55  void write(S* s, unsigned x) {
56    // CHECK: define void @_ZN2N25write
57    // CHECK:   %[[ptr:.*]]     = bitcast %{{.*}}* %{{.*}} to i32*
58    // CHECK:   %[[old:.*]]     = load i32* %[[ptr]]
59    // CHECK:   %[[x_and:.*]]   = and i32 %{{.*}}, 16777215
60    // CHECK:   %[[old_and:.*]] = and i32 %[[old]], -16777216
61    // CHECK:   %[[new:.*]]     = or i32 %[[old_and]], %[[x_and]]
62    // CHECK:                     store i32 %[[new]], i32* %[[ptr]]
63    s->b = x;
64  }
65}
66
67namespace N3 {
68  // Do widen loads and stores to bitfields through the trailing padding at the
69  // end of a struct.
70  struct S {
71    unsigned b : 24;
72  };
73  unsigned read(S* s) {
74    // CHECK: define i32 @_ZN2N34read
75    // CHECK:   %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32*
76    // CHECK:   %[[val:.*]] = load i32* %[[ptr]]
77    // CHECK:   %[[and:.*]] = and i32 %[[val]], 16777215
78    // CHECK:                 ret i32 %[[and]]
79    return s->b;
80  }
81  void write(S* s, unsigned x) {
82    // CHECK: define void @_ZN2N35write
83    // CHECK:   %[[ptr:.*]]     = bitcast %{{.*}}* %{{.*}} to i32*
84    // CHECK:   %[[old:.*]]     = load i32* %[[ptr]]
85    // CHECK:   %[[x_and:.*]]   = and i32 %{{.*}}, 16777215
86    // CHECK:   %[[old_and:.*]] = and i32 %[[old]], -16777216
87    // CHECK:   %[[new:.*]]     = or i32 %[[old_and]], %[[x_and]]
88    // CHECK:                     store i32 %[[new]], i32* %[[ptr]]
89    s->b = x;
90  }
91}
92
93namespace N4 {
94  // Do NOT widen loads and stores to bitfields into padding at the end of
95  // a class which might end up with members inside of it when inside a derived
96  // class.
97  struct Base {
98    virtual ~Base() {}
99
100    unsigned b : 24;
101  };
102  // Imagine some other translation unit introduces:
103#if 0
104  struct Derived : public Base {
105    char c;
106  };
107#endif
108  unsigned read(Base* s) {
109    // FIXME: We should widen this load as long as the function isn't being
110    // instrumented by thread-sanitizer.
111    //
112    // CHECK: define i32 @_ZN2N44read
113    // CHECK:   %[[ptr:.*]] = bitcast {{.*}}* %{{.*}} to i24*
114    // CHECK:   %[[val:.*]] = load i24* %[[ptr]]
115    // CHECK:   %[[ext:.*]] = zext i24 %[[val]] to i32
116    // CHECK:                 ret i32 %[[ext]]
117    return s->b;
118  }
119  void write(Base* s, unsigned x) {
120    // CHECK: define void @_ZN2N45write
121    // CHECK:   %[[ptr:.*]] = bitcast {{.*}}* %{{.*}} to i24*
122    // CHECK:   %[[new:.*]] = trunc i32 %{{.*}} to i24
123    // CHECK:                 store i24 %[[new]], i24* %[[ptr]]
124    s->b = x;
125  }
126}
127
128namespace N5 {
129  // Widen through padding at the end of a struct even if that struct
130  // participates in a union with another struct which has a separate field in
131  // that location. The reasoning is that if the operation is storing to that
132  // member of the union, it must be the active member, and thus we can write
133  // through the padding. If it is a load, it might be a load of a common
134  // prefix through a non-active member, but in such a case the extra bits
135  // loaded are masked off anyways.
136  union U {
137    struct X { unsigned b : 24; char c; } x;
138    struct Y { unsigned b : 24; } y;
139  };
140  unsigned read(U* u) {
141    // CHECK: define i32 @_ZN2N54read
142    // CHECK:   %[[ptr:.*]] = bitcast %{{.*}}* %{{.*}} to i32*
143    // CHECK:   %[[val:.*]] = load i32* %[[ptr]]
144    // CHECK:   %[[and:.*]] = and i32 %[[val]], 16777215
145    // CHECK:                 ret i32 %[[and]]
146    return u->y.b;
147  }
148  void write(U* u, unsigned x) {
149    // CHECK: define void @_ZN2N55write
150    // CHECK:   %[[ptr:.*]]     = bitcast %{{.*}}* %{{.*}} to i32*
151    // CHECK:   %[[old:.*]]     = load i32* %[[ptr]]
152    // CHECK:   %[[x_and:.*]]   = and i32 %{{.*}}, 16777215
153    // CHECK:   %[[old_and:.*]] = and i32 %[[old]], -16777216
154    // CHECK:   %[[new:.*]]     = or i32 %[[old_and]], %[[x_and]]
155    // CHECK:                     store i32 %[[new]], i32* %[[ptr]]
156    u->y.b = x;
157  }
158}
159
160namespace N6 {
161  // Zero-length bitfields partition the memory locations of bitfields for the
162  // purposes of the memory model. That means stores must not span zero-length
163  // bitfields and loads may only span them when we are not instrumenting with
164  // thread sanitizer.
165  // FIXME: We currently don't widen loads even without thread sanitizer, even
166  // though we could.
167  struct S {
168    unsigned b1 : 24;
169    unsigned char : 0;
170    unsigned char b2 : 8;
171  };
172  unsigned read(S* s) {
173    // CHECK: define i32 @_ZN2N64read
174    // CHECK:   %[[ptr1:.*]] = bitcast {{.*}}* %{{.*}} to i24*
175    // CHECK:   %[[val1:.*]] = load i24* %[[ptr1]]
176    // CHECK:   %[[ext1:.*]] = zext i24 %[[val1]] to i32
177    // CHECK:   %[[ptr2:.*]] = getelementptr inbounds {{.*}}* %{{.*}}, i32 0, i32 1
178    // CHECK:   %[[val2:.*]] = load i8* %[[ptr2]]
179    // CHECK:   %[[ext2:.*]] = zext i8 %[[val2]] to i32
180    // CHECK:   %[[add:.*]]  = add nsw i32 %[[ext1]], %[[ext2]]
181    // CHECK:                  ret i32 %[[add]]
182    return s->b1 + s->b2;
183  }
184  void write(S* s, unsigned x) {
185    // CHECK: define void @_ZN2N65write
186    // CHECK:   %[[ptr1:.*]] = bitcast {{.*}}* %{{.*}} to i24*
187    // CHECK:   %[[new1:.*]] = trunc i32 %{{.*}} to i24
188    // CHECK:                  store i24 %[[new1]], i24* %[[ptr1]]
189    // CHECK:   %[[new2:.*]] = trunc i32 %{{.*}} to i8
190    // CHECK:   %[[ptr2:.*]] = getelementptr inbounds {{.*}}* %{{.*}}, i32 0, i32 1
191    // CHECK:                  store i8 %[[new2]], i8* %[[ptr2]]
192    s->b1 = x;
193    s->b2 = x;
194  }
195}
196