CodeGenTypes.cpp revision 9048891ff983d0681c116c6e8f1073aa31bdd6e8
13842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 23842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// 33842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// The LLVM Compiler Infrastructure 43842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// 53842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// This file is distributed under the University of Illinois Open Source 63842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// License. See LICENSE.TXT for details. 73842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// 83842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz//===----------------------------------------------------------------------===// 93842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// 103842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// This is the code that handles AST -> LLVM type lowering. 113842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz// 123842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz//===----------------------------------------------------------------------===// 133842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz 143842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "CodeGenTypes.h" 153842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "clang/AST/ASTContext.h" 163842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "clang/AST/DeclObjC.h" 173842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "clang/AST/Expr.h" 183842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "clang/AST/RecordLayout.h" 193842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "llvm/DerivedTypes.h" 203842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "llvm/Module.h" 213842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz#include "llvm/Target/TargetData.h" 223842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz 233842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitzusing namespace clang; 243842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitzusing namespace CodeGen; 253842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz 263842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitznamespace { 273842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts 283842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz /// structs and unions. It manages transient information used during layout. 293842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz /// FIXME : Handle field aligments. Handle packed structs. 303842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz class RecordOrganizer { 313842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz public: 323842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz explicit RecordOrganizer(CodeGenTypes &Types, const RecordDecl& Record) : 333842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz CGT(Types), RD(Record), STy(NULL) {} 343842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz 355fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala /// layoutStructFields - Do the actual work and lay out all fields. Create 363842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz /// corresponding llvm struct type. This should be invoked only after 375fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala /// all fields are added. 385fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala void layoutStructFields(const ASTRecordLayout &RL); 395fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala 405fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala /// layoutUnionFields - Do the actual work and lay out all fields. Create 415fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala /// corresponding llvm struct type. This should be invoked only after 425fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala /// all fields are added. 435fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala void layoutUnionFields(const ASTRecordLayout &RL); 445fef17c05dd20ea6c0a6e3532177a916ef022a3cTommi Rantala 453842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz /// getLLVMType - Return associated llvm struct type. This may be NULL 463842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz /// if fields are not laid out. 473842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz llvm::Type *getLLVMType() const { 483842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz return STy; 493842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz } 503842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz 513842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz llvm::SmallSet<unsigned, 8> &getPaddingFields() { 523842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz return PaddingFields; 533842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz } 543842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz 553842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz private: 563842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz CodeGenTypes &CGT; 573842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz const RecordDecl& RD; 583842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz llvm::Type *STy; 593842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz llvm::SmallSet<unsigned, 8> PaddingFields; 603842dac7333e42aa44531eda34ba55200b99ccf8Daniel Jacobowitz }; 61} 62 63CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M, 64 const llvm::TargetData &TD) 65 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD) { 66} 67 68CodeGenTypes::~CodeGenTypes() { 69 for(llvm::DenseMap<const TagDecl *, CGRecordLayout *>::iterator 70 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); 71 I != E; ++I) 72 delete I->second; 73 CGRecordLayouts.clear(); 74} 75 76/// ConvertType - Convert the specified type to its LLVM form. 77const llvm::Type *CodeGenTypes::ConvertType(QualType T) { 78 llvm::PATypeHolder Result = ConvertTypeRecursive(T); 79 80 // Any pointers that were converted defered evaluation of their pointee type, 81 // creating an opaque type instead. This is in order to avoid problems with 82 // circular types. Loop through all these defered pointees, if any, and 83 // resolve them now. 84 while (!PointersToResolve.empty()) { 85 std::pair<const PointerLikeType *, llvm::OpaqueType*> P = 86 PointersToResolve.back(); 87 PointersToResolve.pop_back(); 88 // We can handle bare pointers here because we know that the only pointers 89 // to the Opaque type are P.second and from other types. Refining the 90 // opqaue type away will invalidate P.second, but we don't mind :). 91 const llvm::Type *NT = ConvertTypeRecursive(P.first->getPointeeType()); 92 P.second->refineAbstractTypeTo(NT); 93 } 94 95 return Result; 96} 97 98const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) { 99 T = Context.getCanonicalType(T);; 100 101 // See if type is already cached. 102 llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator 103 I = TypeCache.find(T.getTypePtr()); 104 // If type is found in map and this is not a definition for a opaque 105 // place holder type then use it. Otherwise, convert type T. 106 if (I != TypeCache.end()) 107 return I->second.get(); 108 109 const llvm::Type *ResultType = ConvertNewType(T); 110 TypeCache.insert(std::make_pair(T.getTypePtr(), 111 llvm::PATypeHolder(ResultType))); 112 return ResultType; 113} 114 115/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 116/// ConvertType in that it is used to convert to the memory representation for 117/// a type. For example, the scalar representation for _Bool is i1, but the 118/// memory representation is usually i8 or i32, depending on the target. 119const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { 120 const llvm::Type *R = ConvertType(T); 121 122 // If this is a non-bool type, don't map it. 123 if (R != llvm::Type::Int1Ty) 124 return R; 125 126 // Otherwise, return an integer of the target-specified size. 127 return llvm::IntegerType::get((unsigned)Context.getTypeSize(T)); 128 129} 130 131/// UpdateCompletedType - When we find the full definition for a TagDecl, 132/// replace the 'opaque' type we previously made for it if applicable. 133void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 134 llvm::DenseMap<const TagDecl*, llvm::PATypeHolder>::iterator TDTI = 135 TagDeclTypes.find(TD); 136 if (TDTI == TagDeclTypes.end()) return; 137 138 // Remember the opaque LLVM type for this tagdecl. 139 llvm::PATypeHolder OpaqueHolder = TDTI->second; 140 assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) && 141 "Updating compilation of an already non-opaque type?"); 142 143 // Remove it from TagDeclTypes so that it will be regenerated. 144 TagDeclTypes.erase(TDTI); 145 146 // Generate the new type. 147 const llvm::Type *NT = ConvertTagDeclType(TD); 148 149 // Refine the old opaque type to its new definition. 150 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT); 151} 152 153/// Produces a vector containing the all of the instance variables in an 154/// Objective-C object, in the order that they appear. Used to create LLVM 155/// structures corresponding to Objective-C objects. 156void CodeGenTypes::CollectObjCIvarTypes(ObjCInterfaceDecl *ObjCClass, 157 std::vector<const llvm::Type*> &IvarTypes) { 158 ObjCInterfaceDecl *SuperClass = ObjCClass->getSuperClass(); 159 if (SuperClass) 160 CollectObjCIvarTypes(SuperClass, IvarTypes); 161 for (ObjCInterfaceDecl::ivar_iterator I = ObjCClass->ivar_begin(), 162 E = ObjCClass->ivar_end(); I != E; ++I) { 163 IvarTypes.push_back(ConvertType((*I)->getType())); 164 ObjCIvarInfo[*I] = IvarTypes.size() - 1; 165 } 166} 167 168const llvm::Type *CodeGenTypes::ConvertReturnType(QualType T) { 169 if (T->isVoidType()) 170 return llvm::Type::VoidTy; // Result of function uses llvm void. 171 else 172 return ConvertType(T); 173} 174 175static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) { 176 if (&format == &llvm::APFloat::IEEEsingle) 177 return llvm::Type::FloatTy; 178 if (&format == &llvm::APFloat::IEEEdouble) 179 return llvm::Type::DoubleTy; 180 if (&format == &llvm::APFloat::IEEEquad) 181 return llvm::Type::FP128Ty; 182 if (&format == &llvm::APFloat::PPCDoubleDouble) 183 return llvm::Type::PPC_FP128Ty; 184 if (&format == &llvm::APFloat::x87DoubleExtended) 185 return llvm::Type::X86_FP80Ty; 186 assert(0 && "Unknown float format!"); 187 return 0; 188} 189 190const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { 191 const clang::Type &Ty = *Context.getCanonicalType(T); 192 193 switch (Ty.getTypeClass()) { 194 case Type::TypeName: // typedef isn't canonical. 195 case Type::TypeOfExp: // typeof isn't canonical. 196 case Type::TypeOfTyp: // typeof isn't canonical. 197 assert(0 && "Non-canonical type, shouldn't happen"); 198 case Type::Builtin: { 199 switch (cast<BuiltinType>(Ty).getKind()) { 200 default: assert(0 && "Unknown builtin type!"); 201 case BuiltinType::Void: 202 // LLVM void type can only be used as the result of a function call. Just 203 // map to the same as char. 204 return llvm::IntegerType::get(8); 205 206 case BuiltinType::Bool: 207 // Note that we always return bool as i1 for use as a scalar type. 208 return llvm::Type::Int1Ty; 209 210 case BuiltinType::Char_S: 211 case BuiltinType::Char_U: 212 case BuiltinType::SChar: 213 case BuiltinType::UChar: 214 case BuiltinType::Short: 215 case BuiltinType::UShort: 216 case BuiltinType::Int: 217 case BuiltinType::UInt: 218 case BuiltinType::Long: 219 case BuiltinType::ULong: 220 case BuiltinType::LongLong: 221 case BuiltinType::ULongLong: 222 case BuiltinType::WChar: 223 return llvm::IntegerType::get( 224 static_cast<unsigned>(Context.getTypeSize(T))); 225 226 case BuiltinType::Float: 227 case BuiltinType::Double: 228 case BuiltinType::LongDouble: 229 return getTypeForFormat(Context.getFloatTypeSemantics(T)); 230 } 231 break; 232 } 233 case Type::Complex: { 234 const llvm::Type *EltTy = 235 ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType()); 236 return llvm::StructType::get(EltTy, EltTy, NULL); 237 } 238 case Type::Reference: 239 case Type::Pointer: { 240 const PointerLikeType &PTy = cast<PointerLikeType>(Ty); 241 QualType ETy = PTy.getPointeeType(); 242 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(); 243 PointersToResolve.push_back(std::make_pair(&PTy, PointeeType)); 244 return llvm::PointerType::get(PointeeType, ETy.getAddressSpace()); 245 } 246 247 case Type::VariableArray: { 248 const VariableArrayType &A = cast<VariableArrayType>(Ty); 249 assert(A.getIndexTypeQualifier() == 0 && 250 "FIXME: We only handle trivial array types so far!"); 251 // VLAs resolve to the innermost element type; this matches 252 // the return of alloca, and there isn't any obviously better choice. 253 return ConvertTypeRecursive(A.getElementType()); 254 } 255 case Type::IncompleteArray: { 256 const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty); 257 assert(A.getIndexTypeQualifier() == 0 && 258 "FIXME: We only handle trivial array types so far!"); 259 // int X[] -> [0 x int] 260 return llvm::ArrayType::get(ConvertTypeRecursive(A.getElementType()), 0); 261 } 262 case Type::ConstantArray: { 263 const ConstantArrayType &A = cast<ConstantArrayType>(Ty); 264 const llvm::Type *EltTy = ConvertTypeRecursive(A.getElementType()); 265 return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue()); 266 } 267 case Type::ExtVector: 268 case Type::Vector: { 269 const VectorType &VT = cast<VectorType>(Ty); 270 return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()), 271 VT.getNumElements()); 272 } 273 case Type::FunctionNoProto: 274 case Type::FunctionProto: { 275 const FunctionType &FP = cast<FunctionType>(Ty); 276 const llvm::Type *ResultType; 277 278 if (FP.getResultType()->isVoidType()) 279 ResultType = llvm::Type::VoidTy; // Result of function uses llvm void. 280 else 281 ResultType = ConvertTypeRecursive(FP.getResultType()); 282 283 // FIXME: Convert argument types. 284 bool isVarArg; 285 std::vector<const llvm::Type*> ArgTys; 286 287 // Struct return passes the struct byref. 288 if (!ResultType->isSingleValueType() && ResultType != llvm::Type::VoidTy) { 289 ArgTys.push_back(llvm::PointerType::get(ResultType, 290 FP.getResultType().getAddressSpace())); 291 ResultType = llvm::Type::VoidTy; 292 } 293 294 if (const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(&FP)) { 295 DecodeArgumentTypes(*FTP, ArgTys); 296 isVarArg = FTP->isVariadic(); 297 } else { 298 isVarArg = true; 299 } 300 301 return llvm::FunctionType::get(ResultType, ArgTys, isVarArg); 302 } 303 304 case Type::ASQual: 305 return 306 ConvertTypeRecursive(QualType(cast<ASQualType>(Ty).getBaseType(), 0)); 307 308 case Type::ObjCInterface: { 309 // FIXME: This comment is broken. Either the code should check for 310 // the flag it is referring to or it should do the right thing in 311 // the presence of it. 312 313 // Warning: Use of this is strongly discouraged. Late binding of instance 314 // variables is supported on some runtimes and so using static binding can 315 // break code when libraries are updated. Only use this if you have 316 // previously checked that the ObjCRuntime subclass in use does not support 317 // late-bound ivars. 318 ObjCInterfaceType OIT = cast<ObjCInterfaceType>(Ty); 319 std::vector<const llvm::Type*> IvarTypes; 320 CollectObjCIvarTypes(OIT.getDecl(), IvarTypes); 321 llvm::Type *T = llvm::StructType::get(IvarTypes); 322 TheModule.addTypeName(std::string("struct.") + OIT.getDecl()->getName(), T); 323 return T; 324 } 325 326 case Type::ObjCQualifiedInterface: { 327 ObjCQualifiedInterfaceType QIT = cast<ObjCQualifiedInterfaceType>(Ty); 328 329 return ConvertTypeRecursive(Context.getObjCInterfaceType(QIT.getDecl())); 330 } 331 332 case Type::ObjCQualifiedId: 333 // Protocols don't influence the LLVM type. 334 return ConvertTypeRecursive(Context.getObjCIdType()); 335 336 case Type::Tagged: { 337 const TagDecl *TD = cast<TagType>(Ty).getDecl(); 338 const llvm::Type *Res = ConvertTagDeclType(TD); 339 340 std::string TypeName(TD->getKindName()); 341 TypeName += '.'; 342 343 // Name the codegen type after the typedef name 344 // if there is no tag type name available 345 if (TD->getIdentifier()) 346 TypeName += TD->getName(); 347 else if (const TypedefType *TdT = dyn_cast<TypedefType>(T)) 348 TypeName += TdT->getDecl()->getName(); 349 else 350 TypeName += "anon"; 351 352 TheModule.addTypeName(TypeName, Res); 353 return Res; 354 } 355 356 case Type::BlockPointer: { 357 assert(0 && "FIXME: Cannot get type of block pointer."); 358 } 359 } 360 361 // FIXME: implement. 362 return llvm::OpaqueType::get(); 363} 364 365void CodeGenTypes::DecodeArgumentTypes(const FunctionTypeProto &FTP, 366 std::vector<const llvm::Type*> &ArgTys) { 367 for (unsigned i = 0, e = FTP.getNumArgs(); i != e; ++i) { 368 const llvm::Type *Ty = ConvertTypeRecursive(FTP.getArgType(i)); 369 if (Ty->isSingleValueType()) 370 ArgTys.push_back(Ty); 371 else 372 // byval arguments are always on the stack, which is addr space #0. 373 ArgTys.push_back(llvm::PointerType::getUnqual(Ty)); 374 } 375} 376 377/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or 378/// enum. 379const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { 380 llvm::DenseMap<const TagDecl*, llvm::PATypeHolder>::iterator TDTI = 381 TagDeclTypes.find(TD); 382 383 // If we've already compiled this tag type, use the previous definition. 384 if (TDTI != TagDeclTypes.end()) 385 return TDTI->second; 386 387 // If this is still a forward definition, just define an opaque type to use 388 // for this tagged decl. 389 if (!TD->isDefinition()) { 390 llvm::Type *ResultType = llvm::OpaqueType::get(); 391 TagDeclTypes.insert(std::make_pair(TD, ResultType)); 392 return ResultType; 393 } 394 395 // Okay, this is a definition of a type. Compile the implementation now. 396 397 if (TD->isEnum()) { 398 // Don't bother storing enums in TagDeclTypes. 399 return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType()); 400 } 401 402 // This decl could well be recursive. In this case, insert an opaque 403 // definition of this type, which the recursive uses will get. We will then 404 // refine this opaque version later. 405 406 // Create new OpaqueType now for later use in case this is a recursive 407 // type. This will later be refined to the actual type. 408 llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(); 409 TagDeclTypes.insert(std::make_pair(TD, ResultHolder)); 410 411 const llvm::Type *ResultType; 412 const RecordDecl *RD = cast<const RecordDecl>(TD); 413 if (TD->isStruct() || TD->isClass()) { 414 // Layout fields. 415 RecordOrganizer RO(*this, *RD); 416 417 RO.layoutStructFields(Context.getASTRecordLayout(RD)); 418 419 // Get llvm::StructType. 420 CGRecordLayouts[TD] = new CGRecordLayout(RO.getLLVMType(), 421 RO.getPaddingFields()); 422 ResultType = RO.getLLVMType(); 423 424 } else if (TD->isUnion()) { 425 // Just use the largest element of the union, breaking ties with the 426 // highest aligned member. 427 if (RD->getNumMembers() != 0) { 428 RecordOrganizer RO(*this, *RD); 429 430 RO.layoutUnionFields(Context.getASTRecordLayout(RD)); 431 432 // Get llvm::StructType. 433 CGRecordLayouts[TD] = new CGRecordLayout(RO.getLLVMType(), 434 RO.getPaddingFields()); 435 ResultType = RO.getLLVMType(); 436 } else { 437 ResultType = llvm::StructType::get(std::vector<const llvm::Type*>()); 438 } 439 } else { 440 assert(0 && "FIXME: Unknown tag decl kind!"); 441 } 442 443 // Refine our Opaque type to ResultType. This can invalidate ResultType, so 444 // make sure to read the result out of the holder. 445 cast<llvm::OpaqueType>(ResultHolder.get()) 446 ->refineAbstractTypeTo(ResultType); 447 448 return ResultHolder.get(); 449} 450 451/// getLLVMFieldNo - Return llvm::StructType element number 452/// that corresponds to the field FD. 453unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) { 454 llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD); 455 assert (I != FieldInfo.end() && "Unable to find field info"); 456 return I->second; 457} 458 459unsigned CodeGenTypes::getLLVMFieldNo(const ObjCIvarDecl *OID) { 460 llvm::DenseMap<const ObjCIvarDecl*, unsigned>::iterator 461 I = ObjCIvarInfo.find(OID); 462 assert(I != ObjCIvarInfo.end() && "Unable to find field info"); 463 return I->second; 464} 465 466/// addFieldInfo - Assign field number to field FD. 467void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) { 468 FieldInfo[FD] = No; 469} 470 471/// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD. 472CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) { 473 llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator 474 I = BitFields.find(FD); 475 assert (I != BitFields.end() && "Unable to find bitfield info"); 476 return I->second; 477} 478 479/// addBitFieldInfo - Assign a start bit and a size to field FD. 480void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin, 481 unsigned Size) { 482 BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size))); 483} 484 485/// getCGRecordLayout - Return record layout info for the given llvm::Type. 486const CGRecordLayout * 487CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const { 488 llvm::DenseMap<const TagDecl*, CGRecordLayout *>::iterator I 489 = CGRecordLayouts.find(TD); 490 assert (I != CGRecordLayouts.end() 491 && "Unable to find record layout information for type"); 492 return I->second; 493} 494 495/// layoutStructFields - Do the actual work and lay out all fields. Create 496/// corresponding llvm struct type. 497/// Note that this doesn't actually try to do struct layout; it depends on 498/// the layout built by the AST. (We have to do struct layout to do Sema, 499/// and there's no point to duplicating the work.) 500void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) { 501 // FIXME: This code currently always generates packed structures. 502 // Unpacked structures are more readable, and sometimes more efficient! 503 // (But note that any changes here are likely to impact CGExprConstant, 504 // which makes some messy assumptions.) 505 uint64_t llvmSize = 0; 506 // FIXME: Make this a SmallVector 507 std::vector<const llvm::Type*> LLVMFields; 508 int NumMembers = RD.getNumMembers(); 509 510 for (int curField = 0; curField < NumMembers; curField++) { 511 const FieldDecl *FD = RD.getMember(curField); 512 uint64_t offset = RL.getFieldOffset(curField); 513 const llvm::Type *Ty = CGT.ConvertTypeRecursive(FD->getType()); 514 uint64_t size = CGT.getTargetData().getABITypeSizeInBits(Ty); 515 516 if (FD->isBitField()) { 517 Expr *BitWidth = FD->getBitWidth(); 518 llvm::APSInt FieldSize(32); 519 bool isBitField = 520 BitWidth->isIntegerConstantExpr(FieldSize, CGT.getContext()); 521 assert (isBitField && "Invalid BitField size expression"); 522 uint64_t BitFieldSize = FieldSize.getZExtValue(); 523 524 // Bitfield field info is different from other field info; 525 // it actually ignores the underlying LLVM struct because 526 // there isn't any convenient mapping. 527 CGT.addFieldInfo(FD, offset / size); 528 CGT.addBitFieldInfo(FD, offset % size, BitFieldSize); 529 } else { 530 // Put the element into the struct. This would be simpler 531 // if we didn't bother, but it seems a bit too strange to 532 // allocate all structs as i8 arrays. 533 while (llvmSize < offset) { 534 LLVMFields.push_back(llvm::Type::Int8Ty); 535 llvmSize += 8; 536 } 537 538 llvmSize += size; 539 CGT.addFieldInfo(FD, LLVMFields.size()); 540 LLVMFields.push_back(Ty); 541 } 542 } 543 544 while (llvmSize < RL.getSize()) { 545 LLVMFields.push_back(llvm::Type::Int8Ty); 546 llvmSize += 8; 547 } 548 549 STy = llvm::StructType::get(LLVMFields, true); 550 assert(CGT.getTargetData().getABITypeSizeInBits(STy) == RL.getSize()); 551} 552 553/// layoutUnionFields - Do the actual work and lay out all fields. Create 554/// corresponding llvm struct type. This should be invoked only after 555/// all fields are added. 556void RecordOrganizer::layoutUnionFields(const ASTRecordLayout &RL) { 557 for (int curField = 0; curField < RD.getNumMembers(); curField++) { 558 const FieldDecl *FD = RD.getMember(curField); 559 // The offset should usually be zero, but bitfields could be strange 560 uint64_t offset = RL.getFieldOffset(curField); 561 562 if (FD->isBitField()) { 563 Expr *BitWidth = FD->getBitWidth(); 564 uint64_t BitFieldSize = 565 BitWidth->getIntegerConstantExprValue(CGT.getContext()).getZExtValue(); 566 567 CGT.addFieldInfo(FD, 0); 568 CGT.addBitFieldInfo(FD, offset, BitFieldSize); 569 } else { 570 CGT.addFieldInfo(FD, 0); 571 } 572 } 573 574 // This looks stupid, but it is correct in the sense that 575 // it works no matter how complicated the sizes and alignments 576 // of the union elements are. The natural alignment 577 // of the result doesn't matter because anyone allocating 578 // structures should be aligning them appropriately anyway. 579 // FIXME: We can be a bit more intuitive in a lot of cases. 580 STy = llvm::ArrayType::get(llvm::Type::Int8Ty, RL.getSize() / 8); 581 assert(CGT.getTargetData().getABITypeSizeInBits(STy) == RL.getSize()); 582} 583