microsoft-abi-member-pointers.cpp revision a06d585468ae9371eb46a69d6180c2a85e0f456e
1// RUN: %clang_cc1 -fno-rtti -emit-llvm %s -o - -cxx-abi microsoft -triple=i386-pc-win32 | FileCheck %s
2// FIXME: Test x86_64 member pointers when codegen no longer asserts on records
3// with virtual bases.
4
5struct B1 {
6  void foo();
7  int b;
8};
9struct B2 {
10  int b2;
11  void foo();
12};
13struct Single : B1 {
14  void foo();
15};
16struct Multiple : B1, B2 {
17  int m;
18  void foo();
19};
20struct Virtual : virtual B1 {
21  int v;
22  void foo();
23};
24
25struct POD {
26  int a;
27  int b;
28};
29
30struct Polymorphic {
31  virtual void myVirtual();
32  int a;
33  int b;
34};
35
36// This class uses the virtual inheritance model, yet its vbptr offset is not 0.
37// We still use zero for the null field offset, despite it being a valid field
38// offset.
39struct NonZeroVBPtr : POD, Virtual {
40  int n;
41  void foo();
42};
43
44struct Unspecified;
45
46// Check that we can lower the LLVM types and get the null initializers right.
47int Single     ::*s_d_memptr;
48int Polymorphic::*p_d_memptr;
49int Multiple   ::*m_d_memptr;
50int Virtual    ::*v_d_memptr;
51int NonZeroVBPtr::*n_d_memptr;
52int Unspecified::*u_d_memptr;
53// CHECK: @"\01?s_d_memptr@@3PQSingle@@HA" = global i32 -1, align 4
54// CHECK: @"\01?p_d_memptr@@3PQPolymorphic@@HA" = global i32 0, align 4
55// CHECK: @"\01?m_d_memptr@@3PQMultiple@@HA" = global i32 -1, align 4
56// CHECK: @"\01?v_d_memptr@@3PQVirtual@@HA" = global { i32, i32 }
57// CHECK:   { i32 0, i32 -1 }, align 4
58// CHECK: @"\01?n_d_memptr@@3PQNonZeroVBPtr@@HA" = global { i32, i32 }
59// CHECK:   { i32 0, i32 -1 }, align 4
60// CHECK: @"\01?u_d_memptr@@3PQUnspecified@@HA" = global { i32, i32, i32 }
61// CHECK:   { i32 0, i32 0, i32 -1 }, align 4
62
63void (Single  ::*s_f_memptr)();
64void (Multiple::*m_f_memptr)();
65void (Virtual ::*v_f_memptr)();
66// CHECK: @"\01?s_f_memptr@@3P8Single@@AEXXZA" = global i8* null, align 4
67// CHECK: @"\01?m_f_memptr@@3P8Multiple@@AEXXZA" = global { i8*, i32 } zeroinitializer, align 4
68// CHECK: @"\01?v_f_memptr@@3P8Virtual@@AEXXZA" = global { i8*, i32, i32 } zeroinitializer, align 4
69
70// We can define Unspecified after locking in the inheritance model.
71struct Unspecified : Multiple, Virtual {
72  void foo();
73  int u;
74};
75
76// Test memptr emission in a constant expression.
77namespace Const {
78void (Single     ::*s_f_mp)() = &Single::foo;
79void (Multiple   ::*m_f_mp)() = &B2::foo;
80void (Virtual    ::*v_f_mp)() = &Virtual::foo;
81void (Unspecified::*u_f_mp)() = &Unspecified::foo;
82// CHECK: @"\01?s_f_mp@Const@@3P8Single@@AEXXZA" =
83// CHECK:   global i8* bitcast ({{.*}} @"\01?foo@Single@@QAEXXZ" to i8*), align 4
84// CHECK: @"\01?m_f_mp@Const@@3P8Multiple@@AEXXZA" =
85// CHECK:   global { i8*, i32 } { i8* bitcast ({{.*}} @"\01?foo@B2@@QAEXXZ" to i8*), i32 4 }, align 4
86// CHECK: @"\01?v_f_mp@Const@@3P8Virtual@@AEXXZA" =
87// CHECK:   global { i8*, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@Virtual@@QAEXXZ" to i8*), i32 0, i32 0 }, align 4
88// CHECK: @"\01?u_f_mp@Const@@3P8Unspecified@@AEXXZA" =
89// CHECK:   global { i8*, i32, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@Unspecified@@QAEXXZ" to i8*), i32 0, i32 12, i32 0 }, align 4
90}
91
92namespace CastParam {
93// This exercises ConstExprEmitter instead of ValueDecl::evaluateValue.  The
94// extra reinterpret_cast for the parameter type requires more careful folding.
95// FIXME: Or does it?  If reinterpret_casts are no-ops, we should be able to
96// strip them in evaluateValue() and just proceed as normal with an APValue.
97struct A {
98  int a;
99  void foo(A *p);
100};
101struct B { int b; };
102struct C : B, A { int c; };
103
104void (A::*ptr1)(void *) = (void (A::*)(void *)) &A::foo;
105// CHECK: @"\01?ptr1@CastParam@@3P8A@1@AEXPAX@ZA" =
106// CHECK:   global i8* bitcast (void ({{.*}})* @"\01?foo@A@CastParam@@QAEXPAU12@@Z" to i8*), align 4
107
108// Try a reinterpret_cast followed by a memptr conversion.
109void (C::*ptr2)(void *) = (void (C::*)(void *)) (void (A::*)(void *)) &A::foo;
110// CHECK: @"\01?ptr2@CastParam@@3P8C@1@AEXPAX@ZA" =
111// CHECK:   global { i8*, i32 } { i8* bitcast (void ({{.*}})* @"\01?foo@A@CastParam@@QAEXPAU12@@Z" to i8*), i32 4 }, align 4
112
113void (C::*ptr3)(void *) = (void (C::*)(void *)) (void (A::*)(void *)) (void (A::*)(A *)) 0;
114// CHECK: @"\01?ptr3@CastParam@@3P8C@1@AEXPAX@ZA" =
115// CHECK:   global { i8*, i32 } zeroinitializer, align 4
116
117struct D : C {
118  virtual void isPolymorphic();
119  int d;
120};
121
122// Try a cast that changes the inheritance model.  Null for D is 0, but null for
123// C is -1.  We need the cast to long in order to hit the non-APValue path.
124int C::*ptr4 = (int C::*) (int D::*) (long D::*) 0;
125// CHECK: @"\01?ptr4@CastParam@@3PQC@1@HA" = global i32 -1, align 4
126
127// MSVC rejects this but we accept it.
128int C::*ptr5 = (int C::*) (long D::*) 0;
129// CHECK: @"\01?ptr5@CastParam@@3PQC@1@HA" = global i32 -1, align 4
130}
131
132struct UnspecWithVBPtr;
133int UnspecWithVBPtr::*forceUnspecWithVBPtr;
134struct UnspecWithVBPtr : B1, virtual B2 {
135  int u;
136  void foo();
137};
138
139// Test emitting non-virtual member pointers in a non-constexpr setting.
140void EmitNonVirtualMemberPointers() {
141  void (Single     ::*s_f_memptr)() = &Single::foo;
142  void (Multiple   ::*m_f_memptr)() = &Multiple::foo;
143  void (Virtual    ::*v_f_memptr)() = &Virtual::foo;
144  void (Unspecified::*u_f_memptr)() = &Unspecified::foo;
145  void (UnspecWithVBPtr::*u2_f_memptr)() = &UnspecWithVBPtr::foo;
146// CHECK: define void @"\01?EmitNonVirtualMemberPointers@@YAXXZ"() {{.*}} {
147// CHECK:   alloca i8*, align 4
148// CHECK:   alloca { i8*, i32 }, align 4
149// CHECK:   alloca { i8*, i32, i32 }, align 4
150// CHECK:   alloca { i8*, i32, i32, i32 }, align 4
151// CHECK:   store i8* bitcast (void (%{{.*}}*)* @"\01?foo@Single@@QAEXXZ" to i8*), i8** %{{.*}}, align 4
152// CHECK:   store { i8*, i32 }
153// CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Multiple@@QAEXXZ" to i8*), i32 0 },
154// CHECK:     { i8*, i32 }* %{{.*}}, align 4
155// CHECK:   store { i8*, i32, i32 }
156// CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Virtual@@QAEXXZ" to i8*), i32 0, i32 0 },
157// CHECK:     { i8*, i32, i32 }* %{{.*}}, align 4
158// CHECK:   store { i8*, i32, i32, i32 }
159// CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Unspecified@@QAEXXZ" to i8*), i32 0, i32 12, i32 0 },
160// CHECK:     { i8*, i32, i32, i32 }* %{{.*}}, align 4
161// CHECK:   store { i8*, i32, i32, i32 }
162// CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@UnspecWithVBPtr@@QAEXXZ" to i8*),
163// CHECK:       i32 0, i32 4, i32 0 },
164// CHECK:     { i8*, i32, i32, i32 }* %{{.*}}, align 4
165// CHECK:   ret void
166// CHECK: }
167}
168
169void podMemPtrs() {
170  int POD::*memptr;
171  memptr = &POD::a;
172  memptr = &POD::b;
173  if (memptr)
174    memptr = 0;
175// Check that member pointers use the right offsets and that null is -1.
176// CHECK:      define void @"\01?podMemPtrs@@YAXXZ"() {{.*}} {
177// CHECK:        %[[memptr:.*]] = alloca i32, align 4
178// CHECK-NEXT:   store i32 0, i32* %[[memptr]], align 4
179// CHECK-NEXT:   store i32 4, i32* %[[memptr]], align 4
180// CHECK-NEXT:   %[[memptr_val:.*]] = load i32* %[[memptr]], align 4
181// CHECK-NEXT:   %{{.*}} = icmp ne i32 %[[memptr_val]], -1
182// CHECK-NEXT:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
183// CHECK:        store i32 -1, i32* %[[memptr]], align 4
184// CHECK:        ret void
185// CHECK:      }
186}
187
188void polymorphicMemPtrs() {
189  int Polymorphic::*memptr;
190  memptr = &Polymorphic::a;
191  memptr = &Polymorphic::b;
192  if (memptr)
193    memptr = 0;
194// Member pointers for polymorphic classes include the vtable slot in their
195// offset and use 0 to represent null.
196// CHECK:      define void @"\01?polymorphicMemPtrs@@YAXXZ"() {{.*}} {
197// CHECK:        %[[memptr:.*]] = alloca i32, align 4
198// CHECK-NEXT:   store i32 4, i32* %[[memptr]], align 4
199// CHECK-NEXT:   store i32 8, i32* %[[memptr]], align 4
200// CHECK-NEXT:   %[[memptr_val:.*]] = load i32* %[[memptr]], align 4
201// CHECK-NEXT:   %{{.*}} = icmp ne i32 %[[memptr_val]], 0
202// CHECK-NEXT:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
203// CHECK:        store i32 0, i32* %[[memptr]], align 4
204// CHECK:        ret void
205// CHECK:      }
206}
207
208bool nullTestDataUnspecified(int Unspecified::*mp) {
209  return mp;
210// CHECK: define zeroext i1 @"\01?nullTestDataUnspecified@@YA_NPQUnspecified@@H@Z"{{.*}} {
211// CHECK:   %{{.*}} = load { i32, i32, i32 }* %{{.*}}, align 4
212// CHECK:   store { i32, i32, i32 } {{.*}} align 4
213// CHECK:   %[[mp:.*]] = load { i32, i32, i32 }* %{{.*}}, align 4
214// CHECK:   %[[mp0:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 0
215// CHECK:   %[[cmp0:.*]] = icmp ne i32 %[[mp0]], 0
216// CHECK:   %[[mp1:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 1
217// CHECK:   %[[cmp1:.*]] = icmp ne i32 %[[mp1]], 0
218// CHECK:   %[[and0:.*]] = and i1 %[[cmp0]], %[[cmp1]]
219// CHECK:   %[[mp2:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 2
220// CHECK:   %[[cmp2:.*]] = icmp ne i32 %[[mp2]], -1
221// CHECK:   %[[and1:.*]] = and i1 %[[and0]], %[[cmp2]]
222// CHECK:   ret i1 %[[and1]]
223// CHECK: }
224}
225
226bool nullTestFunctionUnspecified(void (Unspecified::*mp)()) {
227  return mp;
228// CHECK: define zeroext i1 @"\01?nullTestFunctionUnspecified@@YA_NP8Unspecified@@AEXXZ@Z"{{.*}} {
229// CHECK:   %{{.*}} = load { i8*, i32, i32, i32 }* %{{.*}}, align 4
230// CHECK:   store { i8*, i32, i32, i32 } {{.*}} align 4
231// CHECK:   %[[mp:.*]] = load { i8*, i32, i32, i32 }* %{{.*}}, align 4
232// CHECK:   %[[mp0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[mp]], 0
233// CHECK:   %[[cmp0:.*]] = icmp ne i8* %[[mp0]], null
234// CHECK:   ret i1 %[[cmp0]]
235// CHECK: }
236}
237
238int loadDataMemberPointerVirtual(Virtual *o, int Virtual::*memptr) {
239  return o->*memptr;
240// Test that we can unpack this aggregate member pointer and load the member
241// data pointer.
242// CHECK: define i32 @"\01?loadDataMemberPointerVirtual@@YAHPAUVirtual@@PQ1@H@Z"{{.*}} {
243// CHECK:   %[[o:.*]] = load %{{.*}}** %{{.*}}, align 4
244// CHECK:   %[[memptr:.*]] = load { i32, i32 }* %{{.*}}, align 4
245// CHECK:   %[[memptr0:.*]] = extractvalue { i32, i32 } %[[memptr:.*]], 0
246// CHECK:   %[[memptr1:.*]] = extractvalue { i32, i32 } %[[memptr:.*]], 1
247// CHECK:   %[[v6:.*]] = bitcast %{{.*}}* %[[o]] to i8*
248// CHECK:   %[[vbptr:.*]] = getelementptr inbounds i8* %[[v6]], i32 0
249// CHECK:   %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i8**
250// CHECK:   %[[vbtable:.*]] = load i8** %[[vbptr_a:.*]]
251// CHECK:   %[[v7:.*]] = getelementptr inbounds i8* %[[vbtable]], i32 %[[memptr1]]
252// CHECK:   %[[v8:.*]] = bitcast i8* %[[v7]] to i32*
253// CHECK:   %[[vbase_offs:.*]] = load i32* %[[v8]]
254// CHECK:   %[[v10:.*]] = getelementptr inbounds i8* %[[vbptr]], i32 %[[vbase_offs]]
255// CHECK:   %[[offset:.*]] = getelementptr inbounds i8* %[[v10]], i32 %[[memptr0]]
256// CHECK:   %[[v11:.*]] = bitcast i8* %[[offset]] to i32*
257// CHECK:   %[[v12:.*]] = load i32* %[[v11]]
258// CHECK:   ret i32 %[[v12]]
259// CHECK: }
260}
261
262int loadDataMemberPointerUnspecified(Unspecified *o, int Unspecified::*memptr) {
263  return o->*memptr;
264// Test that we can unpack this aggregate member pointer and load the member
265// data pointer.
266// CHECK: define i32 @"\01?loadDataMemberPointerUnspecified@@YAHPAUUnspecified@@PQ1@H@Z"{{.*}} {
267// CHECK:   %[[o:.*]] = load %{{.*}}** %{{.*}}, align 4
268// CHECK:   %[[memptr:.*]] = load { i32, i32, i32 }* %{{.*}}, align 4
269// CHECK:   %[[memptr0:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 0
270// CHECK:   %[[memptr1:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 1
271// CHECK:   %[[memptr2:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 2
272// CHECK:   %[[base:.*]] = bitcast %{{.*}}* %[[o]] to i8*
273// CHECK:   %[[is_vbase:.*]] = icmp ne i32 %[[memptr2]], 0
274// CHECK:   br i1 %[[is_vbase]], label %[[vadjust:.*]], label %[[skip:.*]]
275//
276// CHECK: [[vadjust]]
277// CHECK:   %[[vbptr:.*]] = getelementptr inbounds i8* %[[base]], i32 %[[memptr1]]
278// CHECK:   %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i8**
279// CHECK:   %[[vbtable:.*]] = load i8** %[[vbptr_a:.*]]
280// CHECK:   %[[v7:.*]] = getelementptr inbounds i8* %[[vbtable]], i32 %[[memptr2]]
281// CHECK:   %[[v8:.*]] = bitcast i8* %[[v7]] to i32*
282// CHECK:   %[[vbase_offs:.*]] = load i32* %[[v8]]
283// CHECK:   %[[base_adj:.*]] = getelementptr inbounds i8* %[[vbptr]], i32 %[[vbase_offs]]
284//
285// CHECK: [[skip]]
286// CHECK:   %[[new_base:.*]] = phi i8* [ %[[base]], %{{.*}} ], [ %[[base_adj]], %[[vadjust]] ]
287// CHECK:   %[[offset:.*]] = getelementptr inbounds i8* %[[new_base]], i32 %[[memptr0]]
288// CHECK:   %[[v11:.*]] = bitcast i8* %[[offset]] to i32*
289// CHECK:   %[[v12:.*]] = load i32* %[[v11]]
290// CHECK:   ret i32 %[[v12]]
291// CHECK: }
292}
293
294void callMemberPointerSingle(Single *o, void (Single::*memptr)()) {
295  (o->*memptr)();
296// Just look for an indirect thiscall.
297// CHECK: define void @"\01?callMemberPointerSingle@@{{.*}} {{.*}} {
298// CHECK:   call x86_thiscallcc void %{{.*}}(%{{.*}} %{{.*}})
299// CHECK:   ret void
300// CHECK: }
301}
302
303void callMemberPointerMultiple(Multiple *o, void (Multiple::*memptr)()) {
304  (o->*memptr)();
305// CHECK: define void @"\01?callMemberPointerMultiple@@{{.*}} {
306// CHECK:   %[[memptr0:.*]] = extractvalue { i8*, i32 } %{{.*}}, 0
307// CHECK:   %[[memptr1:.*]] = extractvalue { i8*, i32 } %{{.*}}, 1
308// CHECK:   %[[this_adjusted:.*]] = getelementptr inbounds i8* %{{.*}}, i32 %[[memptr1]]
309// CHECK:   %[[this:.*]] = bitcast i8* %[[this_adjusted]] to {{.*}}
310// CHECK:   %[[fptr:.*]] = bitcast i8* %[[memptr0]] to {{.*}}
311// CHECK:   call x86_thiscallcc void %[[fptr]](%{{.*}} %[[this]])
312// CHECK:   ret void
313// CHECK: }
314}
315
316void callMemberPointerVirtualBase(Virtual *o, void (Virtual::*memptr)()) {
317  (o->*memptr)();
318// This shares a lot with virtual data member pointers.
319// CHECK: define void @"\01?callMemberPointerVirtualBase@@{{.*}} {
320// CHECK:   %[[memptr0:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 0
321// CHECK:   %[[memptr1:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 1
322// CHECK:   %[[memptr2:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 2
323// CHECK:   %[[vbptr:.*]] = getelementptr inbounds i8* %{{.*}}, i32 0
324// CHECK:   %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i8**
325// CHECK:   %[[vbtable:.*]] = load i8** %[[vbptr_a:.*]]
326// CHECK:   %[[v7:.*]] = getelementptr inbounds i8* %[[vbtable]], i32 %[[memptr2]]
327// CHECK:   %[[v8:.*]] = bitcast i8* %[[v7]] to i32*
328// CHECK:   %[[vbase_offs:.*]] = load i32* %[[v8]]
329// CHECK:   %[[v10:.*]] = getelementptr inbounds i8* %[[vbptr]], i32 %[[vbase_offs]]
330// CHECK:   %[[this_adjusted:.*]] = getelementptr inbounds i8* %[[v10]], i32 %[[memptr1]]
331// CHECK:   %[[fptr:.*]] = bitcast i8* %[[memptr0]] to void ({{.*}})
332// CHECK:   %[[this:.*]] = bitcast i8* %[[this_adjusted]] to {{.*}}
333// CHECK:   call x86_thiscallcc void %[[fptr]](%{{.*}} %[[this]])
334// CHECK:   ret void
335// CHECK: }
336}
337
338bool compareSingleFunctionMemptr(void (Single::*l)(), void (Single::*r)()) {
339  return l == r;
340// Should only be one comparison here.
341// CHECK: define zeroext i1 @"\01?compareSingleFunctionMemptr@@YA_NP8Single@@AEXXZ0@Z"{{.*}} {
342// CHECK-NOT: icmp
343// CHECK:   %[[r:.*]] = icmp eq
344// CHECK-NOT: icmp
345// CHECK:   ret i1 %[[r]]
346// CHECK: }
347}
348
349bool compareNeqSingleFunctionMemptr(void (Single::*l)(), void (Single::*r)()) {
350  return l != r;
351// Should only be one comparison here.
352// CHECK: define zeroext i1 @"\01?compareNeqSingleFunctionMemptr@@YA_NP8Single@@AEXXZ0@Z"{{.*}} {
353// CHECK-NOT: icmp
354// CHECK:   %[[r:.*]] = icmp ne
355// CHECK-NOT: icmp
356// CHECK:   ret i1 %[[r]]
357// CHECK: }
358}
359
360bool unspecFuncMemptrEq(void (Unspecified::*l)(), void (Unspecified::*r)()) {
361  return l == r;
362// CHECK: define zeroext i1 @"\01?unspecFuncMemptrEq@@YA_NP8Unspecified@@AEXXZ0@Z"{{.*}} {
363// CHECK:   %[[lhs0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[l:.*]], 0
364// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r:.*]], 0
365// CHECK:   %[[cmp0:.*]] = icmp eq i8* %[[lhs0]], %{{.*}}
366// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 1
367// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 1
368// CHECK:   %[[cmp1:.*]] = icmp eq i32
369// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 2
370// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 2
371// CHECK:   %[[cmp2:.*]] = icmp eq i32
372// CHECK:   %[[res12:.*]] = and i1 %[[cmp1]], %[[cmp2]]
373// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 3
374// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 3
375// CHECK:   %[[cmp3:.*]] = icmp eq i32
376// CHECK:   %[[res123:.*]] = and i1 %[[res12]], %[[cmp3]]
377// CHECK:   %[[iszero:.*]] = icmp eq i8* %[[lhs0]], null
378// CHECK:   %[[bits_or_null:.*]] = or i1 %[[res123]], %[[iszero]]
379// CHECK:   %{{.*}} = and i1 %[[bits_or_null]], %[[cmp0]]
380// CHECK:   ret i1 %{{.*}}
381// CHECK: }
382}
383
384bool unspecFuncMemptrNeq(void (Unspecified::*l)(), void (Unspecified::*r)()) {
385  return l != r;
386// CHECK: define zeroext i1 @"\01?unspecFuncMemptrNeq@@YA_NP8Unspecified@@AEXXZ0@Z"{{.*}} {
387// CHECK:   %[[lhs0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[l:.*]], 0
388// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r:.*]], 0
389// CHECK:   %[[cmp0:.*]] = icmp ne i8* %[[lhs0]], %{{.*}}
390// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 1
391// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 1
392// CHECK:   %[[cmp1:.*]] = icmp ne i32
393// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 2
394// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 2
395// CHECK:   %[[cmp2:.*]] = icmp ne i32
396// CHECK:   %[[res12:.*]] = or i1 %[[cmp1]], %[[cmp2]]
397// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 3
398// CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 3
399// CHECK:   %[[cmp3:.*]] = icmp ne i32
400// CHECK:   %[[res123:.*]] = or i1 %[[res12]], %[[cmp3]]
401// CHECK:   %[[iszero:.*]] = icmp ne i8* %[[lhs0]], null
402// CHECK:   %[[bits_or_null:.*]] = and i1 %[[res123]], %[[iszero]]
403// CHECK:   %{{.*}} = or i1 %[[bits_or_null]], %[[cmp0]]
404// CHECK:   ret i1 %{{.*}}
405// CHECK: }
406}
407
408bool unspecDataMemptrEq(int Unspecified::*l, int Unspecified::*r) {
409  return l == r;
410// CHECK: define zeroext i1 @"\01?unspecDataMemptrEq@@YA_NPQUnspecified@@H0@Z"{{.*}} {
411// CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 0
412// CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 0
413// CHECK:   icmp eq i32
414// CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 1
415// CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 1
416// CHECK:   icmp eq i32
417// CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 2
418// CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 2
419// CHECK:   icmp eq i32
420// CHECK:   and i1
421// CHECK:   and i1
422// CHECK:   ret i1
423// CHECK: }
424}
425
426void (Multiple::*convertB2FuncToMultiple(void (B2::*mp)()))() {
427  return mp;
428// CHECK: define i64 @"\01?convertB2FuncToMultiple@@YAP8Multiple@@AEXXZP8B2@@AEXXZ@Z"{{.*}} {
429// CHECK:   store
430// CHECK:   %[[mp:.*]] = load i8** %{{.*}}, align 4
431// CHECK:   icmp ne i8* %[[mp]], null
432// CHECK:   br i1 %{{.*}} label %{{.*}}, label %{{.*}}
433//
434//        memptr.convert:                                   ; preds = %entry
435// CHECK:   insertvalue { i8*, i32 } undef, i8* %[[mp]], 0
436// CHECK:   insertvalue { i8*, i32 } %{{.*}}, i32 4, 1
437// CHECK:   br label
438//
439//        memptr.converted:                                 ; preds = %memptr.convert, %entry
440// CHECK:   phi { i8*, i32 } [ zeroinitializer, %{{.*}} ], [ {{.*}} ]
441// CHECK: }
442}
443
444void (B2::*convertMultipleFuncToB2(void (Multiple::*mp)()))() {
445// FIXME: cl emits warning C4407 on this code because of the representation
446// change.  We might want to do the same.
447  return static_cast<void (B2::*)()>(mp);
448// FIXME: We should return i8* instead of i32 here.  The ptrtoint cast prevents
449// LLVM from optimizing away the branch.  This is likely a bug in
450// lib/CodeGen/TargetInfo.cpp with how we classify memptr types for returns.
451//
452// CHECK: define i32 @"\01?convertMultipleFuncToB2@@YAP8B2@@AEXXZP8Multiple@@AEXXZ@Z"{{.*}} {
453// CHECK:   store
454// CHECK:   %[[src:.*]] = load { i8*, i32 }* %{{.*}}, align 4
455// CHECK:   extractvalue { i8*, i32 } %[[src]], 0
456// CHECK:   icmp ne i8* %{{.*}}, null
457// CHECK:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
458//
459//        memptr.convert:                                   ; preds = %entry
460// CHECK:   %[[fp:.*]] = extractvalue { i8*, i32 } %[[src]], 0
461// CHECK:   br label
462//
463//        memptr.converted:                                 ; preds = %memptr.convert, %entry
464// CHECK:   phi i8* [ null, %{{.*}} ], [ %[[fp]], %{{.*}} ]
465// CHECK: }
466}
467
468namespace Test1 {
469
470struct A { int a; };
471struct B { int b; };
472struct C : virtual A { int c; };
473struct D : B, C { int d; };
474
475void (D::*convertCToD(void (C::*mp)()))() {
476  return mp;
477// CHECK: define void @"\01?convertCToD@Test1@@YAP8D@1@AEXXZP8C@1@AEXXZ@Z"{{.*}} {
478// CHECK:   store
479// CHECK:   load { i8*, i32, i32 }* %{{.*}}, align 4
480// CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 0
481// CHECK:   icmp ne i8* %{{.*}}, null
482// CHECK:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
483//
484//        memptr.convert:                                   ; preds = %entry
485// CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 0
486// CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 1
487// CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 2
488// CHECK:   %[[adj:.*]] = add nsw i32 %{{.*}}, 4
489// CHECK:   insertvalue { i8*, i32, i32 } undef, i8* {{.*}}, 0
490// CHECK:   insertvalue { i8*, i32, i32 } {{.*}}, i32 %[[adj]], 1
491// CHECK:   insertvalue { i8*, i32, i32 } {{.*}}, i32 {{.*}}, 2
492// CHECK:   br label
493//
494//        memptr.converted:                                 ; preds = %memptr.convert, %entry
495// CHECK:   phi { i8*, i32, i32 } [ { i8* null, i32 0, i32 -1 }, {{.*}} ], [ {{.*}} ]
496// CHECK: }
497}
498
499}
500
501namespace Test2 {
502// Test that we dynamically convert between different null reps.
503
504struct A { int a; };
505struct B : A { int b; };
506struct C : A {
507  int c;
508  virtual void hasVfPtr();
509};
510
511int A::*reinterpret(int B::*mp) {
512  return reinterpret_cast<int A::*>(mp);
513// CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQB@1@H@Z"{{.*}}  {
514// CHECK-NOT: select
515// CHECK:   ret i32
516// CHECK: }
517}
518
519int A::*reinterpret(int C::*mp) {
520  return reinterpret_cast<int A::*>(mp);
521// CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQC@1@H@Z"{{.*}}  {
522// CHECK:   %[[mp:.*]] = load i32*
523// CHECK:   %[[cmp:.*]] = icmp ne i32 %[[mp]], 0
524// CHECK:   select i1 %[[cmp]], i32 %[[mp]], i32 -1
525// CHECK: }
526}
527
528}
529