CodeGenTypes.cpp revision 3a2b657088de9413714a51bff153a59565adb3ef
1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This is the code that handles AST -> LLVM type lowering. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenTypes.h" 15#include "CGCall.h" 16#include "CGCXXABI.h" 17#include "CGRecordLayout.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/DeclObjC.h" 20#include "clang/AST/DeclCXX.h" 21#include "clang/AST/Expr.h" 22#include "clang/AST/RecordLayout.h" 23#include "llvm/DerivedTypes.h" 24#include "llvm/Module.h" 25#include "llvm/Target/TargetData.h" 26using namespace clang; 27using namespace CodeGen; 28 29CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M, 30 const llvm::TargetData &TD, const ABIInfo &Info, 31 CGCXXABI &CXXABI, const CodeGenOptions &CGO) 32 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD), 33 TheABIInfo(Info), TheCXXABI(CXXABI), CodeGenOpts(CGO) { 34 RecursionState = RS_Normal; 35 SkippedLayout = false; 36} 37 38CodeGenTypes::~CodeGenTypes() { 39 for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator 40 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); 41 I != E; ++I) 42 delete I->second; 43 44 for (llvm::FoldingSet<CGFunctionInfo>::iterator 45 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 46 delete &*I++; 47} 48 49void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 50 llvm::StructType *Ty, 51 llvm::StringRef suffix) { 52 llvm::SmallString<256> TypeName; 53 llvm::raw_svector_ostream OS(TypeName); 54 OS << RD->getKindName() << '.'; 55 56 // Name the codegen type after the typedef name 57 // if there is no tag type name available 58 if (RD->getIdentifier()) { 59 // FIXME: We should not have to check for a null decl context here. 60 // Right now we do it because the implicit Obj-C decls don't have one. 61 if (RD->getDeclContext()) 62 OS << RD->getQualifiedNameAsString(); 63 else 64 RD->printName(OS); 65 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 66 // FIXME: We should not have to check for a null decl context here. 67 // Right now we do it because the implicit Obj-C decls don't have one. 68 if (TDD->getDeclContext()) 69 OS << TDD->getQualifiedNameAsString(); 70 else 71 TDD->printName(OS); 72 } else 73 OS << "anon"; 74 75 if (!suffix.empty()) 76 OS << suffix; 77 78 Ty->setName(OS.str()); 79} 80 81/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 82/// ConvertType in that it is used to convert to the memory representation for 83/// a type. For example, the scalar representation for _Bool is i1, but the 84/// memory representation is usually i8 or i32, depending on the target. 85llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){ 86 llvm::Type *R = ConvertType(T); 87 88 // If this is a non-bool type, don't map it. 89 if (!R->isIntegerTy(1)) 90 return R; 91 92 // Otherwise, return an integer of the target-specified size. 93 return llvm::IntegerType::get(getLLVMContext(), 94 (unsigned)Context.getTypeSize(T)); 95} 96 97/// isFuncTypeArgumentConvertible - Return true if the specified type in a 98/// function argument or result position can be converted to an IR type at this 99/// point. This boils down to being whether it is complete, as well as whether 100/// we've temporarily deferred expanding the type because we're in a recursive 101/// context. 102bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty){ 103 // If this isn't a tagged type, we can convert it! 104 const TagType *TT = Ty->getAs<TagType>(); 105 if (TT == 0) return true; 106 107 108 // If it's a tagged type, but is a forward decl, we can't convert it. 109 if (!TT->getDecl()->isDefinition()) 110 return false; 111 112 // If we're not under a pointer under a struct, then we can convert it if 113 // needed. 114 if (RecursionState != RS_StructPointer) 115 return true; 116 117 // If this is an enum, then it is safe to convert. 118 const RecordType *RT = dyn_cast<RecordType>(TT); 119 if (RT == 0) return true; 120 121 // Otherwise, we have to be careful. If it is a struct that we're in the 122 // process of expanding, then we can't convert the function type. That's ok 123 // though because we must be in a pointer context under the struct, so we can 124 // just convert it to a dummy type. 125 // 126 // We decide this by checking whether ConvertRecordDeclType returns us an 127 // opaque type for a struct that we know is defined. 128 return !ConvertRecordDeclType(RT->getDecl())->isOpaque(); 129} 130 131 132/// Code to verify a given function type is complete, i.e. the return type 133/// and all of the argument types are complete. Also check to see if we are in 134/// a RS_StructPointer context, and if so whether any struct types have been 135/// pended. If so, we don't want to ask the ABI lowering code to handle a type 136/// that cannot be converted to an IR type. 137bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 138 if (!isFuncTypeArgumentConvertible(FT->getResultType())) 139 return false; 140 141 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 142 for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++) 143 if (!isFuncTypeArgumentConvertible(FPT->getArgType(i))) 144 return false; 145 146 return true; 147} 148 149/// UpdateCompletedType - When we find the full definition for a TagDecl, 150/// replace the 'opaque' type we previously made for it if applicable. 151void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 152 // If this is an enum being completed, then we flush all non-struct types from 153 // the cache. This allows function types and other things that may be derived 154 // from the enum to be recomputed. 155 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 156 // Only flush the cache if we've actually already converted this type. 157 if (TypeCache.count(ED->getTypeForDecl())) 158 TypeCache.clear(); 159 return; 160 } 161 162 // If we completed a RecordDecl that we previously used and converted to an 163 // anonymous type, then go ahead and complete it now. 164 const RecordDecl *RD = cast<RecordDecl>(TD); 165 if (RD->isDependentType()) return; 166 167 // Only complete it if we converted it already. If we haven't converted it 168 // yet, we'll just do it lazily. 169 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 170 ConvertRecordDeclType(RD); 171} 172 173static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 174 const llvm::fltSemantics &format) { 175 if (&format == &llvm::APFloat::IEEEsingle) 176 return llvm::Type::getFloatTy(VMContext); 177 if (&format == &llvm::APFloat::IEEEdouble) 178 return llvm::Type::getDoubleTy(VMContext); 179 if (&format == &llvm::APFloat::IEEEquad) 180 return llvm::Type::getFP128Ty(VMContext); 181 if (&format == &llvm::APFloat::PPCDoubleDouble) 182 return llvm::Type::getPPC_FP128Ty(VMContext); 183 if (&format == &llvm::APFloat::x87DoubleExtended) 184 return llvm::Type::getX86_FP80Ty(VMContext); 185 assert(0 && "Unknown float format!"); 186 return 0; 187} 188 189/// ConvertType - Convert the specified type to its LLVM form. 190llvm::Type *CodeGenTypes::ConvertType(QualType T) { 191 T = Context.getCanonicalType(T); 192 193 const Type *Ty = T.getTypePtr(); 194 195 // RecordTypes are cached and processed specially. 196 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 197 return ConvertRecordDeclType(RT->getDecl()); 198 199 // See if type is already cached. 200 llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty); 201 // If type is found in map then use it. Otherwise, convert type T. 202 if (TCI != TypeCache.end()) 203 return TCI->second; 204 205 // If we don't have it in the cache, convert it now. 206 llvm::Type *ResultType = 0; 207 switch (Ty->getTypeClass()) { 208 case Type::Record: // Handled above. 209#define TYPE(Class, Base) 210#define ABSTRACT_TYPE(Class, Base) 211#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 212#define DEPENDENT_TYPE(Class, Base) case Type::Class: 213#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 214#include "clang/AST/TypeNodes.def" 215 llvm_unreachable("Non-canonical or dependent types aren't possible."); 216 break; 217 218 case Type::Builtin: { 219 switch (cast<BuiltinType>(Ty)->getKind()) { 220 case BuiltinType::Void: 221 case BuiltinType::ObjCId: 222 case BuiltinType::ObjCClass: 223 case BuiltinType::ObjCSel: 224 // LLVM void type can only be used as the result of a function call. Just 225 // map to the same as char. 226 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 227 break; 228 229 case BuiltinType::Bool: 230 // Note that we always return bool as i1 for use as a scalar type. 231 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 232 break; 233 234 case BuiltinType::Char_S: 235 case BuiltinType::Char_U: 236 case BuiltinType::SChar: 237 case BuiltinType::UChar: 238 case BuiltinType::Short: 239 case BuiltinType::UShort: 240 case BuiltinType::Int: 241 case BuiltinType::UInt: 242 case BuiltinType::Long: 243 case BuiltinType::ULong: 244 case BuiltinType::LongLong: 245 case BuiltinType::ULongLong: 246 case BuiltinType::WChar_S: 247 case BuiltinType::WChar_U: 248 case BuiltinType::Char16: 249 case BuiltinType::Char32: 250 ResultType = llvm::IntegerType::get(getLLVMContext(), 251 static_cast<unsigned>(Context.getTypeSize(T))); 252 break; 253 254 case BuiltinType::Float: 255 case BuiltinType::Double: 256 case BuiltinType::LongDouble: 257 ResultType = getTypeForFormat(getLLVMContext(), 258 Context.getFloatTypeSemantics(T)); 259 break; 260 261 case BuiltinType::NullPtr: 262 // Model std::nullptr_t as i8* 263 ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); 264 break; 265 266 case BuiltinType::UInt128: 267 case BuiltinType::Int128: 268 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 269 break; 270 271 case BuiltinType::Overload: 272 case BuiltinType::Dependent: 273 case BuiltinType::BoundMember: 274 case BuiltinType::UnknownAny: 275 llvm_unreachable("Unexpected placeholder builtin type!"); 276 break; 277 } 278 break; 279 } 280 case Type::Complex: { 281 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 282 ResultType = llvm::StructType::get(EltTy, EltTy, NULL); 283 break; 284 } 285 case Type::LValueReference: 286 case Type::RValueReference: { 287 RecursionStatePointerRAII X(RecursionState); 288 const ReferenceType *RTy = cast<ReferenceType>(Ty); 289 QualType ETy = RTy->getPointeeType(); 290 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 291 unsigned AS = Context.getTargetAddressSpace(ETy); 292 ResultType = llvm::PointerType::get(PointeeType, AS); 293 break; 294 } 295 case Type::Pointer: { 296 RecursionStatePointerRAII X(RecursionState); 297 const PointerType *PTy = cast<PointerType>(Ty); 298 QualType ETy = PTy->getPointeeType(); 299 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 300 if (PointeeType->isVoidTy()) 301 PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); 302 unsigned AS = Context.getTargetAddressSpace(ETy); 303 ResultType = llvm::PointerType::get(PointeeType, AS); 304 break; 305 } 306 307 case Type::VariableArray: { 308 const VariableArrayType *A = cast<VariableArrayType>(Ty); 309 assert(A->getIndexTypeCVRQualifiers() == 0 && 310 "FIXME: We only handle trivial array types so far!"); 311 // VLAs resolve to the innermost element type; this matches 312 // the return of alloca, and there isn't any obviously better choice. 313 ResultType = ConvertTypeForMem(A->getElementType()); 314 break; 315 } 316 case Type::IncompleteArray: { 317 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 318 assert(A->getIndexTypeCVRQualifiers() == 0 && 319 "FIXME: We only handle trivial array types so far!"); 320 // int X[] -> [0 x int], unless the element type is not sized. If it is 321 // unsized (e.g. an incomplete struct) just use [0 x i8]. 322 ResultType = ConvertTypeForMem(A->getElementType()); 323 if (!ResultType->isSized()) { 324 SkippedLayout = true; 325 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 326 } 327 ResultType = llvm::ArrayType::get(ResultType, 0); 328 break; 329 } 330 case Type::ConstantArray: { 331 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 332 const llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 333 ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); 334 break; 335 } 336 case Type::ExtVector: 337 case Type::Vector: { 338 const VectorType *VT = cast<VectorType>(Ty); 339 ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()), 340 VT->getNumElements()); 341 break; 342 } 343 case Type::FunctionNoProto: 344 case Type::FunctionProto: { 345 // First, check whether we can build the full function type. If the 346 // function type depends on an incomplete type (e.g. a struct or enum), we 347 // cannot lower the function type. 348 if (RecursionState == RS_StructPointer || 349 !isFuncTypeConvertible(cast<FunctionType>(Ty))) { 350 // This function's type depends on an incomplete tag type. 351 // Return a placeholder type. 352 ResultType = llvm::StructType::get(getLLVMContext()); 353 354 SkippedLayout |= RecursionState == RS_StructPointer; 355 break; 356 } 357 358 // While we're converting the argument types for a function, we don't want 359 // to recursively convert any pointed-to structs. Converting directly-used 360 // structs is ok though. 361 RecursionStateTy SavedRecursionState = RecursionState; 362 RecursionState = RS_Struct; 363 364 // The function type can be built; call the appropriate routines to 365 // build it. 366 const CGFunctionInfo *FI; 367 bool isVariadic; 368 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) { 369 FI = &getFunctionInfo( 370 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 371 isVariadic = FPT->isVariadic(); 372 } else { 373 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(Ty); 374 FI = &getFunctionInfo( 375 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 376 isVariadic = true; 377 } 378 379 ResultType = GetFunctionType(*FI, isVariadic); 380 381 // Restore our recursion state. 382 RecursionState = SavedRecursionState; 383 384 if (SkippedLayout) 385 TypeCache.clear(); 386 387 if (RecursionState == RS_Normal) 388 while (!DeferredRecords.empty()) 389 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 390 break; 391 } 392 393 case Type::ObjCObject: 394 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 395 break; 396 397 case Type::ObjCInterface: { 398 // Objective-C interfaces are always opaque (outside of the 399 // runtime, which can do whatever it likes); we never refine 400 // these. 401 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 402 if (!T) 403 T = llvm::StructType::createNamed(getLLVMContext(), ""); 404 ResultType = T; 405 break; 406 } 407 408 case Type::ObjCObjectPointer: { 409 RecursionStatePointerRAII X(RecursionState); 410 // Protocol qualifications do not influence the LLVM type, we just return a 411 // pointer to the underlying interface type. We don't need to worry about 412 // recursive conversion. 413 const llvm::Type *T = 414 ConvertType(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 415 ResultType = T->getPointerTo(); 416 break; 417 } 418 419 case Type::Enum: { 420 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 421 if (ED->isDefinition() || ED->isFixed()) 422 return ConvertType(ED->getIntegerType()); 423 // Return a placeholder '{}' type. 424 ResultType = llvm::StructType::get(getLLVMContext()); 425 break; 426 } 427 428 case Type::BlockPointer: { 429 RecursionStatePointerRAII X(RecursionState); 430 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 431 llvm::Type *PointeeType = ConvertTypeForMem(FTy); 432 unsigned AS = Context.getTargetAddressSpace(FTy); 433 ResultType = llvm::PointerType::get(PointeeType, AS); 434 break; 435 } 436 437 case Type::MemberPointer: { 438 ResultType = 439 getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty)); 440 break; 441 } 442 } 443 444 assert(ResultType && "Didn't convert a type?"); 445 446 TypeCache[Ty] = ResultType; 447 return ResultType; 448} 449 450/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 451llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 452 // TagDecl's are not necessarily unique, instead use the (clang) 453 // type connected to the decl. 454 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 455 456 llvm::StructType *&Entry = RecordDeclTypes[Key]; 457 458 // If we don't have a StructType at all yet, create the forward declaration. 459 if (Entry == 0) { 460 Entry = llvm::StructType::createNamed(getLLVMContext(), ""); 461 addRecordTypeName(RD, Entry, ""); 462 } 463 llvm::StructType *Ty = Entry; 464 465 // If this is still a forward declaration, or the LLVM type is already 466 // complete, there's nothing more to do. 467 if (!RD->isDefinition() || !Ty->isOpaque()) 468 return Ty; 469 470 // If we're recursively nested inside the conversion of a pointer inside the 471 // struct, defer conversion. 472 if (RecursionState == RS_StructPointer) { 473 DeferredRecords.push_back(RD); 474 return Ty; 475 } 476 477 // Okay, this is a definition of a type. Compile the implementation now. 478 RecursionStateTy SavedRecursionState = RecursionState; 479 RecursionState = RS_Struct; 480 481 // Force conversion of non-virtual base classes recursively. 482 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 483 for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(), 484 e = CRD->bases_end(); i != e; ++i) { 485 if (!i->isVirtual()) { 486 const CXXRecordDecl *Base = 487 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 488 ConvertRecordDeclType(Base); 489 } 490 } 491 } 492 493 // Layout fields. 494 CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty); 495 CGRecordLayouts[Key] = Layout; 496 497 // If this struct blocked a FunctionType conversion, then recompute whatever 498 // was derived from that. 499 // FIXME: This is hugely overconservative. 500 if (SkippedLayout) 501 TypeCache.clear(); 502 503 504 // Restore our recursion state. If we're done converting the outer-most 505 // record, then convert any deferred structs as well. 506 RecursionState = SavedRecursionState; 507 508 if (RecursionState == RS_Normal) 509 while (!DeferredRecords.empty()) 510 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 511 512 return Ty; 513} 514 515/// getCGRecordLayout - Return record layout info for the given record decl. 516const CGRecordLayout & 517CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 518 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 519 520 const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); 521 if (!Layout) { 522 // Compute the type information. 523 ConvertRecordDeclType(RD); 524 525 // Now try again. 526 Layout = CGRecordLayouts.lookup(Key); 527 } 528 529 assert(Layout && "Unable to find record layout information for type"); 530 return *Layout; 531} 532 533bool CodeGenTypes::isZeroInitializable(QualType T) { 534 // No need to check for member pointers when not compiling C++. 535 if (!Context.getLangOptions().CPlusPlus) 536 return true; 537 538 T = Context.getBaseElementType(T); 539 540 // Records are non-zero-initializable if they contain any 541 // non-zero-initializable subobjects. 542 if (const RecordType *RT = T->getAs<RecordType>()) { 543 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 544 return isZeroInitializable(RD); 545 } 546 547 // We have to ask the ABI about member pointers. 548 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 549 return getCXXABI().isZeroInitializable(MPT); 550 551 // Everything else is okay. 552 return true; 553} 554 555bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) { 556 return getCGRecordLayout(RD).isZeroInitializable(); 557} 558