CodeGenTypes.cpp revision c8f01ebbce3c874b43ee78535f7d179517f5f436
1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the code that handles AST -> LLVM type lowering.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenTypes.h"
15#include "CGCall.h"
16#include "CGCXXABI.h"
17#include "CGRecordLayout.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/DeclObjC.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/RecordLayout.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Module.h"
25#include "llvm/Target/TargetData.h"
26using namespace clang;
27using namespace CodeGen;
28
29CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
30                           const llvm::TargetData &TD, const ABIInfo &Info,
31                           CGCXXABI &CXXABI)
32  : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
33    TheABIInfo(Info), TheCXXABI(CXXABI) {
34}
35
36CodeGenTypes::~CodeGenTypes() {
37  for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
38         I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
39      I != E; ++I)
40    delete I->second;
41
42  for (llvm::FoldingSet<CGFunctionInfo>::iterator
43       I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
44    delete &*I++;
45}
46
47/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
48/// pointers that are referenced but have not been converted yet.  This is used
49/// to handle cyclic structures properly.
50void CodeGenTypes::HandleLateResolvedPointers() {
51  assert(!PointersToResolve.empty() && "No pointers to resolve!");
52
53  // Any pointers that were converted deferred evaluation of their pointee type,
54  // creating an opaque type instead.  This is in order to avoid problems with
55  // circular types.  Loop through all these defered pointees, if any, and
56  // resolve them now.
57  while (!PointersToResolve.empty()) {
58    std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
59
60    // We can handle bare pointers here because we know that the only pointers
61    // to the Opaque type are P.second and from other types.  Refining the
62    // opqaue type away will invalidate P.second, but we don't mind :).
63    const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
64    P.second->refineAbstractTypeTo(NT);
65  }
66}
67
68
69/// ConvertType - Convert the specified type to its LLVM form.
70const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
71  const llvm::Type *Result = ConvertTypeRecursive(T);
72
73  // If this is a top-level call to ConvertType and sub-conversions caused
74  // pointers to get lazily built as opaque types, resolve the pointers, which
75  // might cause Result to be merged away.
76  if (!IsRecursive && !PointersToResolve.empty()) {
77    llvm::PATypeHolder ResultHandle = Result;
78    HandleLateResolvedPointers();
79    Result = ResultHandle;
80  }
81  return Result;
82}
83
84const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
85  T = Context.getCanonicalType(T);
86
87  // See if type is already cached.
88  llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
89    I = TypeCache.find(T.getTypePtr());
90  // If type is found in map and this is not a definition for a opaque
91  // place holder type then use it. Otherwise, convert type T.
92  if (I != TypeCache.end())
93    return I->second.get();
94
95  const llvm::Type *ResultType = ConvertNewType(T);
96  TypeCache.insert(std::make_pair(T.getTypePtr(),
97                                  llvm::PATypeHolder(ResultType)));
98  return ResultType;
99}
100
101/// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
102/// ConvertType in that it is used to convert to the memory representation for
103/// a type.  For example, the scalar representation for _Bool is i1, but the
104/// memory representation is usually i8 or i32, depending on the target.
105const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
106  const llvm::Type *R = ConvertType(T, IsRecursive);
107
108  // If this is a non-bool type, don't map it.
109  if (!R->isIntegerTy(1))
110    return R;
111
112  // Otherwise, return an integer of the target-specified size.
113  return llvm::IntegerType::get(getLLVMContext(),
114                                (unsigned)Context.getTypeSize(T));
115
116}
117
118// Code to verify a given function type is complete, i.e. the return type
119// and all of the argument types are complete.
120const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
121  const FunctionType *FT = cast<FunctionType>(T);
122  if (const TagType* TT = FT->getResultType()->getAs<TagType>())
123    if (!TT->getDecl()->isDefinition())
124      return TT;
125  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
126    for (unsigned i = 0; i < FPT->getNumArgs(); i++)
127      if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
128        if (!TT->getDecl()->isDefinition())
129          return TT;
130  return 0;
131}
132
133/// UpdateCompletedType - When we find the full definition for a TagDecl,
134/// replace the 'opaque' type we previously made for it if applicable.
135void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
136  const Type *Key = Context.getTagDeclType(TD).getTypePtr();
137  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
138    TagDeclTypes.find(Key);
139  if (TDTI == TagDeclTypes.end()) return;
140
141  // Remember the opaque LLVM type for this tagdecl.
142  llvm::PATypeHolder OpaqueHolder = TDTI->second;
143  assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
144         "Updating compilation of an already non-opaque type?");
145
146  // Remove it from TagDeclTypes so that it will be regenerated.
147  TagDeclTypes.erase(TDTI);
148
149  // Generate the new type.
150  const llvm::Type *NT = ConvertTagDeclType(TD);
151
152  // Refine the old opaque type to its new definition.
153  cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
154
155  // Since we just completed a tag type, check to see if any function types
156  // were completed along with the tag type.
157  // FIXME: This is very inefficient; if we track which function types depend
158  // on which tag types, though, it should be reasonably efficient.
159  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
160  for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
161    if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
162      // This function type still depends on an incomplete tag type; make sure
163      // that tag type has an associated opaque type.
164      ConvertTagDeclType(TT->getDecl());
165    } else {
166      // This function no longer depends on an incomplete tag type; create the
167      // function type, and refine the opaque type to the new function type.
168      llvm::PATypeHolder OpaqueHolder = i->second;
169      const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
170      cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
171      FunctionTypes.erase(i);
172    }
173  }
174}
175
176static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
177                                          const llvm::fltSemantics &format) {
178  if (&format == &llvm::APFloat::IEEEsingle)
179    return llvm::Type::getFloatTy(VMContext);
180  if (&format == &llvm::APFloat::IEEEdouble)
181    return llvm::Type::getDoubleTy(VMContext);
182  if (&format == &llvm::APFloat::IEEEquad)
183    return llvm::Type::getFP128Ty(VMContext);
184  if (&format == &llvm::APFloat::PPCDoubleDouble)
185    return llvm::Type::getPPC_FP128Ty(VMContext);
186  if (&format == &llvm::APFloat::x87DoubleExtended)
187    return llvm::Type::getX86_FP80Ty(VMContext);
188  assert(0 && "Unknown float format!");
189  return 0;
190}
191
192const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
193  const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
194
195  switch (Ty.getTypeClass()) {
196#define TYPE(Class, Base)
197#define ABSTRACT_TYPE(Class, Base)
198#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
199#define DEPENDENT_TYPE(Class, Base) case Type::Class:
200#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
201#include "clang/AST/TypeNodes.def"
202    assert(false && "Non-canonical or dependent types aren't possible.");
203    break;
204
205  case Type::Builtin: {
206    switch (cast<BuiltinType>(Ty).getKind()) {
207    case BuiltinType::Void:
208    case BuiltinType::ObjCId:
209    case BuiltinType::ObjCClass:
210    case BuiltinType::ObjCSel:
211      // LLVM void type can only be used as the result of a function call.  Just
212      // map to the same as char.
213      return llvm::Type::getInt8Ty(getLLVMContext());
214
215    case BuiltinType::Bool:
216      // Note that we always return bool as i1 for use as a scalar type.
217      return llvm::Type::getInt1Ty(getLLVMContext());
218
219    case BuiltinType::Char_S:
220    case BuiltinType::Char_U:
221    case BuiltinType::SChar:
222    case BuiltinType::UChar:
223    case BuiltinType::Short:
224    case BuiltinType::UShort:
225    case BuiltinType::Int:
226    case BuiltinType::UInt:
227    case BuiltinType::Long:
228    case BuiltinType::ULong:
229    case BuiltinType::LongLong:
230    case BuiltinType::ULongLong:
231    case BuiltinType::WChar:
232    case BuiltinType::Char16:
233    case BuiltinType::Char32:
234      return llvm::IntegerType::get(getLLVMContext(),
235        static_cast<unsigned>(Context.getTypeSize(T)));
236
237    case BuiltinType::Float:
238    case BuiltinType::Double:
239    case BuiltinType::LongDouble:
240      return getTypeForFormat(getLLVMContext(),
241                              Context.getFloatTypeSemantics(T));
242
243    case BuiltinType::NullPtr: {
244      // Model std::nullptr_t as i8*
245      const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
246      return llvm::PointerType::getUnqual(Ty);
247    }
248
249    case BuiltinType::UInt128:
250    case BuiltinType::Int128:
251      return llvm::IntegerType::get(getLLVMContext(), 128);
252
253    case BuiltinType::Overload:
254    case BuiltinType::Dependent:
255    case BuiltinType::UndeducedAuto:
256      assert(0 && "Unexpected builtin type!");
257      break;
258    }
259    assert(0 && "Unknown builtin type!");
260    break;
261  }
262  case Type::Complex: {
263    const llvm::Type *EltTy =
264      ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
265    return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
266  }
267  case Type::LValueReference:
268  case Type::RValueReference: {
269    const ReferenceType &RTy = cast<ReferenceType>(Ty);
270    QualType ETy = RTy.getPointeeType();
271    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
272    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
273    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
274  }
275  case Type::Pointer: {
276    const PointerType &PTy = cast<PointerType>(Ty);
277    QualType ETy = PTy.getPointeeType();
278    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
279    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
280    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
281  }
282
283  case Type::VariableArray: {
284    const VariableArrayType &A = cast<VariableArrayType>(Ty);
285    assert(A.getIndexTypeCVRQualifiers() == 0 &&
286           "FIXME: We only handle trivial array types so far!");
287    // VLAs resolve to the innermost element type; this matches
288    // the return of alloca, and there isn't any obviously better choice.
289    return ConvertTypeForMemRecursive(A.getElementType());
290  }
291  case Type::IncompleteArray: {
292    const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
293    assert(A.getIndexTypeCVRQualifiers() == 0 &&
294           "FIXME: We only handle trivial array types so far!");
295    // int X[] -> [0 x int]
296    return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
297                                0);
298  }
299  case Type::ConstantArray: {
300    const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
301    const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
302    return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
303  }
304  case Type::ExtVector:
305  case Type::Vector: {
306    const VectorType &VT = cast<VectorType>(Ty);
307    return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
308                                 VT.getNumElements());
309  }
310  case Type::FunctionNoProto:
311  case Type::FunctionProto: {
312    // First, check whether we can build the full function type.  If the
313    // function type depends on an incomplete type (e.g. a struct or enum), we
314    // cannot lower the function type.  Instead, turn it into an Opaque pointer
315    // and have UpdateCompletedType revisit the function type when/if the opaque
316    // argument type is defined.
317    if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
318      // This function's type depends on an incomplete tag type; make sure
319      // we have an opaque type corresponding to the tag type.
320      ConvertTagDeclType(TT->getDecl());
321      // Create an opaque type for this function type, save it, and return it.
322      llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
323      FunctionTypes.insert(std::make_pair(&Ty, ResultType));
324      return ResultType;
325    }
326
327    // The function type can be built; call the appropriate routines to
328    // build it.
329    const CGFunctionInfo *FI;
330    bool isVariadic;
331    if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
332      FI = &getFunctionInfo(
333                   CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
334                            true /*Recursive*/);
335      isVariadic = FPT->isVariadic();
336    } else {
337      const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
338      FI = &getFunctionInfo(
339                CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
340                            true /*Recursive*/);
341      isVariadic = true;
342    }
343
344    return GetFunctionType(*FI, isVariadic, true);
345  }
346
347  case Type::ObjCObject:
348    return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
349
350  case Type::ObjCInterface: {
351    // Objective-C interfaces are always opaque (outside of the
352    // runtime, which can do whatever it likes); we never refine
353    // these.
354    const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
355    if (!T)
356        T = llvm::OpaqueType::get(getLLVMContext());
357    return T;
358  }
359
360  case Type::ObjCObjectPointer: {
361    // Protocol qualifications do not influence the LLVM type, we just return a
362    // pointer to the underlying interface type. We don't need to worry about
363    // recursive conversion.
364    const llvm::Type *T =
365      ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
366    return llvm::PointerType::getUnqual(T);
367  }
368
369  case Type::Record:
370  case Type::Enum: {
371    const TagDecl *TD = cast<TagType>(Ty).getDecl();
372    const llvm::Type *Res = ConvertTagDeclType(TD);
373
374    llvm::SmallString<256> TypeName;
375    llvm::raw_svector_ostream OS(TypeName);
376    OS << TD->getKindName() << '.';
377
378    // Name the codegen type after the typedef name
379    // if there is no tag type name available
380    if (TD->getIdentifier()) {
381      // FIXME: We should not have to check for a null decl context here.
382      // Right now we do it because the implicit Obj-C decls don't have one.
383      if (TD->getDeclContext())
384        OS << TD->getQualifiedNameAsString();
385      else
386        TD->printName(OS);
387    } else if (const TypedefType *TdT = dyn_cast<TypedefType>(T)) {
388      // FIXME: We should not have to check for a null decl context here.
389      // Right now we do it because the implicit Obj-C decls don't have one.
390      if (TdT->getDecl()->getDeclContext())
391        OS << TdT->getDecl()->getQualifiedNameAsString();
392      else
393        TdT->getDecl()->printName(OS);
394    } else {
395      OS << "anon";
396    }
397
398    TheModule.addTypeName(OS.str(), Res);
399    return Res;
400  }
401
402  case Type::BlockPointer: {
403    const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
404    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
405    PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
406    return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
407  }
408
409  case Type::MemberPointer: {
410    return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty));
411  }
412  }
413
414  // FIXME: implement.
415  return llvm::OpaqueType::get(getLLVMContext());
416}
417
418/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
419/// enum.
420const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
421  // TagDecl's are not necessarily unique, instead use the (clang)
422  // type connected to the decl.
423  const Type *Key =
424    Context.getTagDeclType(TD).getTypePtr();
425  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
426    TagDeclTypes.find(Key);
427
428  // If we've already compiled this tag type, use the previous definition.
429  if (TDTI != TagDeclTypes.end())
430    return TDTI->second;
431
432  const EnumDecl *ED = dyn_cast<EnumDecl>(TD);
433
434  // If this is still a forward declaration, just define an opaque
435  // type to use for this tagged decl.
436  // C++0x: If this is a enumeration type with fixed underlying type,
437  // consider it complete.
438  if (!TD->isDefinition() && !(ED && ED->isFixed())) {
439    llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
440    TagDeclTypes.insert(std::make_pair(Key, ResultType));
441    return ResultType;
442  }
443
444  // Okay, this is a definition of a type.  Compile the implementation now.
445
446  if (ED)  // Don't bother storing enums in TagDeclTypes.
447    return ConvertTypeRecursive(ED->getIntegerType());
448
449  // This decl could well be recursive.  In this case, insert an opaque
450  // definition of this type, which the recursive uses will get.  We will then
451  // refine this opaque version later.
452
453  // Create new OpaqueType now for later use in case this is a recursive
454  // type.  This will later be refined to the actual type.
455  llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
456  TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
457
458  const RecordDecl *RD = cast<const RecordDecl>(TD);
459
460  // Force conversion of non-virtual base classes recursively.
461  if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
462    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
463         e = RD->bases_end(); i != e; ++i) {
464      if (!i->isVirtual()) {
465        const CXXRecordDecl *Base =
466          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
467        ConvertTagDeclType(Base);
468      }
469    }
470  }
471
472  // Layout fields.
473  CGRecordLayout *Layout = ComputeRecordLayout(RD);
474
475  CGRecordLayouts[Key] = Layout;
476  const llvm::Type *ResultType = Layout->getLLVMType();
477
478  // Refine our Opaque type to ResultType.  This can invalidate ResultType, so
479  // make sure to read the result out of the holder.
480  cast<llvm::OpaqueType>(ResultHolder.get())
481    ->refineAbstractTypeTo(ResultType);
482
483  return ResultHolder.get();
484}
485
486/// getCGRecordLayout - Return record layout info for the given llvm::Type.
487const CGRecordLayout &
488CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
489  const Type *Key = Context.getTagDeclType(TD).getTypePtr();
490  const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
491  if (!Layout) {
492    // Compute the type information.
493    ConvertTagDeclType(TD);
494
495    // Now try again.
496    Layout = CGRecordLayouts.lookup(Key);
497  }
498
499  assert(Layout && "Unable to find record layout information for type");
500  return *Layout;
501}
502
503bool CodeGenTypes::isZeroInitializable(QualType T) {
504  // No need to check for member pointers when not compiling C++.
505  if (!Context.getLangOptions().CPlusPlus)
506    return true;
507
508  T = Context.getBaseElementType(T);
509
510  // Records are non-zero-initializable if they contain any
511  // non-zero-initializable subobjects.
512  if (const RecordType *RT = T->getAs<RecordType>()) {
513    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
514    return isZeroInitializable(RD);
515  }
516
517  // We have to ask the ABI about member pointers.
518  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
519    return getCXXABI().isZeroInitializable(MPT);
520
521  // Everything else is okay.
522  return true;
523}
524
525bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
526
527  // FIXME: It would be better if there was a way to explicitly compute the
528  // record layout instead of converting to a type.
529  ConvertTagDeclType(RD);
530
531  const CGRecordLayout &Layout = getCGRecordLayout(RD);
532  return Layout.isZeroInitializable();
533}
534