CGRecordLayoutBuilder.cpp revision cb0511cc291a2f1b6ba281b818f3d07184e198b7
1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Builder implementation for CGRecordLayout objects. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CGRecordLayout.h" 15#include "clang/AST/ASTContext.h" 16#include "clang/AST/Attr.h" 17#include "clang/AST/DeclCXX.h" 18#include "clang/AST/Expr.h" 19#include "clang/AST/RecordLayout.h" 20#include "CodeGenTypes.h" 21#include "CGCXXABI.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/Type.h" 24#include "llvm/Support/Debug.h" 25#include "llvm/Support/raw_ostream.h" 26#include "llvm/Target/TargetData.h" 27using namespace clang; 28using namespace CodeGen; 29 30namespace clang { 31namespace CodeGen { 32 33class CGRecordLayoutBuilder { 34public: 35 /// FieldTypes - Holds the LLVM types that the struct is created from. 36 std::vector<const llvm::Type *> FieldTypes; 37 38 /// NonVirtualBaseFieldTypes - Holds the LLVM types for the non-virtual part 39 /// of the struct. For example, consider: 40 /// 41 /// struct A { int i; }; 42 /// struct B { void *v; }; 43 /// struct C : virtual A, B { }; 44 /// 45 /// The LLVM type of C will be 46 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 47 /// 48 /// And the LLVM type of the non-virtual base struct will be 49 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 50 std::vector<const llvm::Type *> NonVirtualBaseFieldTypes; 51 52 /// NonVirtualBaseTypeIsSameAsCompleteType - Whether the non-virtual part of 53 /// the struct is equivalent to the complete struct. 54 bool NonVirtualBaseTypeIsSameAsCompleteType; 55 56 /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number. 57 typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo; 58 llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields; 59 60 /// LLVMBitFieldInfo - Holds location and size information about a bit field. 61 typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo; 62 llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields; 63 64 typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo; 65 llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases; 66 67 /// IsZeroInitializable - Whether this struct can be C++ 68 /// zero-initialized with an LLVM zeroinitializer. 69 bool IsZeroInitializable; 70 71 /// Packed - Whether the resulting LLVM struct will be packed or not. 72 bool Packed; 73 74private: 75 CodeGenTypes &Types; 76 77 /// Alignment - Contains the alignment of the RecordDecl. 78 // 79 // FIXME: This is not needed and should be removed. 80 unsigned Alignment; 81 82 /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the 83 /// LLVM types. 84 unsigned AlignmentAsLLVMStruct; 85 86 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, 87 /// this will have the number of bits still available in the field. 88 char BitsAvailableInLastField; 89 90 /// NextFieldOffsetInBytes - Holds the next field offset in bytes. 91 uint64_t NextFieldOffsetInBytes; 92 93 /// LayoutUnionField - Will layout a field in an union and return the type 94 /// that the field will have. 95 const llvm::Type *LayoutUnionField(const FieldDecl *Field, 96 const ASTRecordLayout &Layout); 97 98 /// LayoutUnion - Will layout a union RecordDecl. 99 void LayoutUnion(const RecordDecl *D); 100 101 /// LayoutField - try to layout all fields in the record decl. 102 /// Returns false if the operation failed because the struct is not packed. 103 bool LayoutFields(const RecordDecl *D); 104 105 /// LayoutNonVirtualBase - layout a single non-virtual base. 106 void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl, 107 uint64_t BaseOffset); 108 109 /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl. 110 void LayoutNonVirtualBases(const CXXRecordDecl *RD, 111 const ASTRecordLayout &Layout); 112 113 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 114 void ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 115 116 /// LayoutField - layout a single field. Returns false if the operation failed 117 /// because the current struct is not packed. 118 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 119 120 /// LayoutBitField - layout a single bit field. 121 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 122 123 /// AppendField - Appends a field with the given offset and type. 124 void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy); 125 126 /// AppendPadding - Appends enough padding bytes so that the total 127 /// struct size is a multiple of the field alignment. 128 void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment); 129 130 /// getByteArrayType - Returns a byte array type with the given number of 131 /// elements. 132 const llvm::Type *getByteArrayType(uint64_t NumBytes); 133 134 /// AppendBytes - Append a given number of bytes to the record. 135 void AppendBytes(uint64_t NumBytes); 136 137 /// AppendTailPadding - Append enough tail padding so that the type will have 138 /// the passed size. 139 void AppendTailPadding(uint64_t RecordSize); 140 141 unsigned getTypeAlignment(const llvm::Type *Ty) const; 142 143 /// CheckZeroInitializable - Check if the given type contains a pointer 144 /// to data member. 145 void CheckZeroInitializable(QualType T); 146 void CheckZeroInitializable(const CXXRecordDecl *RD); 147 148public: 149 CGRecordLayoutBuilder(CodeGenTypes &Types) 150 : NonVirtualBaseTypeIsSameAsCompleteType(false), IsZeroInitializable(true), 151 Packed(false), Types(Types), Alignment(0), AlignmentAsLLVMStruct(1), 152 BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { } 153 154 /// Layout - Will layout a RecordDecl. 155 void Layout(const RecordDecl *D); 156}; 157 158} 159} 160 161void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 162 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8; 163 Packed = D->hasAttr<PackedAttr>(); 164 165 if (D->isUnion()) { 166 LayoutUnion(D); 167 return; 168 } 169 170 if (LayoutFields(D)) 171 return; 172 173 // We weren't able to layout the struct. Try again with a packed struct 174 Packed = true; 175 AlignmentAsLLVMStruct = 1; 176 NextFieldOffsetInBytes = 0; 177 FieldTypes.clear(); 178 LLVMFields.clear(); 179 LLVMBitFields.clear(); 180 LLVMNonVirtualBases.clear(); 181 182 LayoutFields(D); 183} 184 185CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 186 const FieldDecl *FD, 187 uint64_t FieldOffset, 188 uint64_t FieldSize, 189 uint64_t ContainingTypeSizeInBits, 190 unsigned ContainingTypeAlign) { 191 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType()); 192 uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty); 193 uint64_t TypeSizeInBits = TypeSizeInBytes * 8; 194 195 bool IsSigned = FD->getType()->isSignedIntegerType(); 196 197 if (FieldSize > TypeSizeInBits) { 198 // We have a wide bit-field. The extra bits are only used for padding, so 199 // if we have a bitfield of type T, with size N: 200 // 201 // T t : N; 202 // 203 // We can just assume that it's: 204 // 205 // T t : sizeof(T); 206 // 207 FieldSize = TypeSizeInBits; 208 } 209 210 // Compute the access components. The policy we use is to start by attempting 211 // to access using the width of the bit-field type itself and to always access 212 // at aligned indices of that type. If such an access would fail because it 213 // extends past the bound of the type, then we reduce size to the next smaller 214 // power of two and retry. The current algorithm assumes pow2 sized types, 215 // although this is easy to fix. 216 // 217 // FIXME: This algorithm is wrong on big-endian systems, I think. 218 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!"); 219 CGBitFieldInfo::AccessInfo Components[3]; 220 unsigned NumComponents = 0; 221 unsigned AccessedTargetBits = 0; // The tumber of target bits accessed. 222 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt. 223 224 // Round down from the field offset to find the first access position that is 225 // at an aligned offset of the initial access type. 226 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth); 227 228 // Adjust initial access size to fit within record. 229 while (AccessWidth > 8 && 230 AccessStart + AccessWidth > ContainingTypeSizeInBits) { 231 AccessWidth >>= 1; 232 AccessStart = FieldOffset - (FieldOffset % AccessWidth); 233 } 234 235 while (AccessedTargetBits < FieldSize) { 236 // Check that we can access using a type of this size, without reading off 237 // the end of the structure. This can occur with packed structures and 238 // -fno-bitfield-type-align, for example. 239 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) { 240 // If so, reduce access size to the next smaller power-of-two and retry. 241 AccessWidth >>= 1; 242 assert(AccessWidth >= 8 && "Cannot access under byte size!"); 243 continue; 244 } 245 246 // Otherwise, add an access component. 247 248 // First, compute the bits inside this access which are part of the 249 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the 250 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits 251 // in the target that we are reading. 252 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!"); 253 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!"); 254 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset); 255 uint64_t AccessBitsInFieldSize = 256 std::min(AccessWidth + AccessStart, 257 FieldOffset + FieldSize) - AccessBitsInFieldStart; 258 259 assert(NumComponents < 3 && "Unexpected number of components!"); 260 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++]; 261 AI.FieldIndex = 0; 262 // FIXME: We still follow the old access pattern of only using the field 263 // byte offset. We should switch this once we fix the struct layout to be 264 // pretty. 265 AI.FieldByteOffset = AccessStart / 8; 266 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart; 267 AI.AccessWidth = AccessWidth; 268 AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8; 269 AI.TargetBitOffset = AccessedTargetBits; 270 AI.TargetBitWidth = AccessBitsInFieldSize; 271 272 AccessStart += AccessWidth; 273 AccessedTargetBits += AI.TargetBitWidth; 274 } 275 276 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!"); 277 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned); 278} 279 280CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 281 const FieldDecl *FD, 282 uint64_t FieldOffset, 283 uint64_t FieldSize) { 284 const RecordDecl *RD = FD->getParent(); 285 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD); 286 uint64_t ContainingTypeSizeInBits = RL.getSize(); 287 unsigned ContainingTypeAlign = RL.getAlignment(); 288 289 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits, 290 ContainingTypeAlign); 291} 292 293void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D, 294 uint64_t FieldOffset) { 295 uint64_t FieldSize = 296 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue(); 297 298 if (FieldSize == 0) 299 return; 300 301 uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8; 302 unsigned NumBytesToAppend; 303 304 if (FieldOffset < NextFieldOffset) { 305 assert(BitsAvailableInLastField && "Bitfield size mismatch!"); 306 assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!"); 307 308 // The bitfield begins in the previous bit-field. 309 NumBytesToAppend = 310 llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8; 311 } else { 312 assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly"); 313 314 // Append padding if necessary. 315 AppendBytes((FieldOffset - NextFieldOffset) / 8); 316 317 NumBytesToAppend = 318 llvm::RoundUpToAlignment(FieldSize, 8) / 8; 319 320 assert(NumBytesToAppend && "No bytes to append!"); 321 } 322 323 // Add the bit field info. 324 LLVMBitFields.push_back( 325 LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset, 326 FieldSize))); 327 328 AppendBytes(NumBytesToAppend); 329 330 BitsAvailableInLastField = 331 NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize); 332} 333 334bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 335 uint64_t FieldOffset) { 336 // If the field is packed, then we need a packed struct. 337 if (!Packed && D->hasAttr<PackedAttr>()) 338 return false; 339 340 if (D->isBitField()) { 341 // We must use packed structs for unnamed bit fields since they 342 // don't affect the struct alignment. 343 if (!Packed && !D->getDeclName()) 344 return false; 345 346 LayoutBitField(D, FieldOffset); 347 return true; 348 } 349 350 CheckZeroInitializable(D->getType()); 351 352 assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!"); 353 uint64_t FieldOffsetInBytes = FieldOffset / 8; 354 355 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType()); 356 unsigned TypeAlignment = getTypeAlignment(Ty); 357 358 // If the type alignment is larger then the struct alignment, we must use 359 // a packed struct. 360 if (TypeAlignment > Alignment) { 361 assert(!Packed && "Alignment is wrong even with packed struct!"); 362 return false; 363 } 364 365 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 366 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 367 if (const MaxFieldAlignmentAttr *MFAA = 368 RD->getAttr<MaxFieldAlignmentAttr>()) { 369 if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed) 370 return false; 371 } 372 } 373 374 // Round up the field offset to the alignment of the field type. 375 uint64_t AlignedNextFieldOffsetInBytes = 376 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment); 377 378 if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) { 379 assert(!Packed && "Could not place field even with packed struct!"); 380 return false; 381 } 382 383 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) { 384 // Even with alignment, the field offset is not at the right place, 385 // insert padding. 386 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes; 387 388 AppendBytes(PaddingInBytes); 389 } 390 391 // Now append the field. 392 LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size())); 393 AppendField(FieldOffsetInBytes, Ty); 394 395 return true; 396} 397 398const llvm::Type * 399CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 400 const ASTRecordLayout &Layout) { 401 if (Field->isBitField()) { 402 uint64_t FieldSize = 403 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue(); 404 405 // Ignore zero sized bit fields. 406 if (FieldSize == 0) 407 return 0; 408 409 const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 410 unsigned NumBytesToAppend = 411 llvm::RoundUpToAlignment(FieldSize, 8) / 8; 412 413 if (NumBytesToAppend > 1) 414 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend); 415 416 // Add the bit field info. 417 LLVMBitFields.push_back( 418 LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field, 419 0, FieldSize))); 420 return FieldTy; 421 } 422 423 // This is a regular union field. 424 LLVMFields.push_back(LLVMFieldInfo(Field, 0)); 425 return Types.ConvertTypeForMemRecursive(Field->getType()); 426} 427 428void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 429 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 430 431 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 432 433 const llvm::Type *Ty = 0; 434 uint64_t Size = 0; 435 unsigned Align = 0; 436 437 bool HasOnlyZeroSizedBitFields = true; 438 439 unsigned FieldNo = 0; 440 for (RecordDecl::field_iterator Field = D->field_begin(), 441 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { 442 assert(Layout.getFieldOffset(FieldNo) == 0 && 443 "Union field offset did not start at the beginning of record!"); 444 const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout); 445 446 if (!FieldTy) 447 continue; 448 449 HasOnlyZeroSizedBitFields = false; 450 451 unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy); 452 uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy); 453 454 if (FieldAlign < Align) 455 continue; 456 457 if (FieldAlign > Align || FieldSize > Size) { 458 Ty = FieldTy; 459 Align = FieldAlign; 460 Size = FieldSize; 461 } 462 } 463 464 // Now add our field. 465 if (Ty) { 466 AppendField(0, Ty); 467 468 if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) { 469 // We need a packed struct. 470 Packed = true; 471 Align = 1; 472 } 473 } 474 if (!Align) { 475 assert(HasOnlyZeroSizedBitFields && 476 "0-align record did not have all zero-sized bit-fields!"); 477 Align = 1; 478 } 479 480 // Append tail padding. 481 if (Layout.getSize() / 8 > Size) 482 AppendPadding(Layout.getSize() / 8, Align); 483} 484 485void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl, 486 uint64_t BaseOffset) { 487 const ASTRecordLayout &Layout = 488 Types.getContext().getASTRecordLayout(BaseDecl); 489 490 uint64_t NonVirtualSize = Layout.getNonVirtualSize(); 491 492 if (BaseDecl->isEmpty()) { 493 // FIXME: Lay out empty bases. 494 return; 495 } 496 497 CheckZeroInitializable(BaseDecl); 498 499 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can. 500 AppendPadding(BaseOffset / 8, 1); 501 502 // Append the base field. 503 LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size())); 504 505 AppendBytes(NonVirtualSize / 8); 506} 507 508void 509CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 510 const ASTRecordLayout &Layout) { 511 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 512 513 // Check if we need to add a vtable pointer. 514 if (RD->isDynamicClass()) { 515 if (!PrimaryBase) { 516 const llvm::Type *FunctionType = 517 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 518 /*isVarArg=*/true); 519 const llvm::Type *VTableTy = FunctionType->getPointerTo(); 520 521 assert(NextFieldOffsetInBytes == 0 && 522 "VTable pointer must come first!"); 523 AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo()); 524 } else { 525 // FIXME: Handle a virtual primary base. 526 if (!Layout.getPrimaryBaseWasVirtual()) 527 LayoutNonVirtualBase(PrimaryBase, 0); 528 } 529 } 530 531 // Layout the non-virtual bases. 532 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 533 E = RD->bases_end(); I != E; ++I) { 534 if (I->isVirtual()) 535 continue; 536 537 const CXXRecordDecl *BaseDecl = 538 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 539 540 // We've already laid out the primary base. 541 if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual()) 542 continue; 543 544 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl)); 545 } 546} 547 548void 549CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 550 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 551 552 uint64_t AlignedNonVirtualTypeSize = 553 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(), 554 Layout.getNonVirtualAlign()) / 8; 555 556 557 // First check if we can use the same fields as for the complete class. 558 if (AlignedNonVirtualTypeSize == Layout.getSize() / 8) { 559 NonVirtualBaseTypeIsSameAsCompleteType = true; 560 return; 561 } 562 563 NonVirtualBaseFieldTypes = FieldTypes; 564 565 // Check if we need padding. 566 uint64_t AlignedNextFieldOffset = 567 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct); 568 569 assert(AlignedNextFieldOffset <= AlignedNonVirtualTypeSize && 570 "Size mismatch!"); 571 572 if (AlignedNonVirtualTypeSize == AlignedNextFieldOffset) { 573 // We don't need any padding. 574 return; 575 } 576 577 uint64_t NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 578 NonVirtualBaseFieldTypes.push_back(getByteArrayType(NumBytes)); 579} 580 581bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 582 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 583 assert(Alignment && "Did not set alignment!"); 584 585 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 586 587 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 588 if (RD) 589 LayoutNonVirtualBases(RD, Layout); 590 591 unsigned FieldNo = 0; 592 593 for (RecordDecl::field_iterator Field = D->field_begin(), 594 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { 595 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) { 596 assert(!Packed && 597 "Could not layout fields even with a packed LLVM struct!"); 598 return false; 599 } 600 } 601 602 // We've laid out the non-virtual bases and the fields, now compute the 603 // non-virtual base field types. 604 if (RD) 605 ComputeNonVirtualBaseType(RD); 606 607 // FIXME: Lay out the virtual bases instead of just treating them as tail 608 // padding. 609 610 // Append tail padding if necessary. 611 AppendTailPadding(Layout.getSize()); 612 613 return true; 614} 615 616void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) { 617 assert(RecordSize % 8 == 0 && "Invalid record size!"); 618 619 uint64_t RecordSizeInBytes = RecordSize / 8; 620 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!"); 621 622 uint64_t AlignedNextFieldOffset = 623 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct); 624 625 if (AlignedNextFieldOffset == RecordSizeInBytes) { 626 // We don't need any padding. 627 return; 628 } 629 630 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes; 631 AppendBytes(NumPadBytes); 632} 633 634void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes, 635 const llvm::Type *FieldTy) { 636 AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct, 637 getTypeAlignment(FieldTy)); 638 639 uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy); 640 641 FieldTypes.push_back(FieldTy); 642 643 NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes; 644 BitsAvailableInLastField = 0; 645} 646 647void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes, 648 unsigned FieldAlignment) { 649 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes && 650 "Incorrect field layout!"); 651 652 // Round up the field offset to the alignment of the field type. 653 uint64_t AlignedNextFieldOffsetInBytes = 654 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment); 655 656 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) { 657 // Even with alignment, the field offset is not at the right place, 658 // insert padding. 659 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes; 660 661 AppendBytes(PaddingInBytes); 662 } 663} 664 665const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(uint64_t NumBytes) { 666 assert(NumBytes != 0 && "Empty byte array's aren't allowed."); 667 668 const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 669 if (NumBytes > 1) 670 Ty = llvm::ArrayType::get(Ty, NumBytes); 671 672 return Ty; 673} 674 675void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) { 676 if (NumBytes == 0) 677 return; 678 679 // Append the padding field 680 AppendField(NextFieldOffsetInBytes, getByteArrayType(NumBytes)); 681} 682 683unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const { 684 if (Packed) 685 return 1; 686 687 return Types.getTargetData().getABITypeAlignment(Ty); 688} 689 690void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 691 // This record already contains a member pointer. 692 if (!IsZeroInitializable) 693 return; 694 695 // Can only have member pointers if we're compiling C++. 696 if (!Types.getContext().getLangOptions().CPlusPlus) 697 return; 698 699 T = Types.getContext().getBaseElementType(T); 700 701 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) { 702 if (!Types.getCXXABI().isZeroInitializable(MPT)) 703 IsZeroInitializable = false; 704 } else if (const RecordType *RT = T->getAs<RecordType>()) { 705 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 706 CheckZeroInitializable(RD); 707 } 708} 709 710void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) { 711 // This record already contains a member pointer. 712 if (!IsZeroInitializable) 713 return; 714 715 // FIXME: It would be better if there was a way to explicitly compute the 716 // record layout instead of converting to a type. 717 Types.ConvertTagDeclType(RD); 718 719 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 720 721 if (!Layout.isZeroInitializable()) 722 IsZeroInitializable = false; 723} 724 725CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) { 726 CGRecordLayoutBuilder Builder(*this); 727 728 Builder.Layout(D); 729 730 const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(), 731 Builder.FieldTypes, 732 Builder.Packed); 733 734 const llvm::Type *BaseTy = 0; 735 if (isa<CXXRecordDecl>(D)) { 736 if (Builder.NonVirtualBaseTypeIsSameAsCompleteType) 737 BaseTy = Ty; 738 else if (!Builder.NonVirtualBaseFieldTypes.empty()) 739 BaseTy = llvm::StructType::get(getLLVMContext(), 740 Builder.NonVirtualBaseFieldTypes, 741 Builder.Packed); 742 } 743 744 CGRecordLayout *RL = 745 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable); 746 747 // Add all the non-virtual base field numbers. 748 RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(), 749 Builder.LLVMNonVirtualBases.end()); 750 751 // Add all the field numbers. 752 RL->FieldInfo.insert(Builder.LLVMFields.begin(), 753 Builder.LLVMFields.end()); 754 755 // Add bitfield info. 756 RL->BitFields.insert(Builder.LLVMBitFields.begin(), 757 Builder.LLVMBitFields.end()); 758 759 // Dump the layout, if requested. 760 if (getContext().getLangOptions().DumpRecordLayouts) { 761 llvm::errs() << "\n*** Dumping IRgen Record Layout\n"; 762 llvm::errs() << "Record: "; 763 D->dump(); 764 llvm::errs() << "\nLayout: "; 765 RL->dump(); 766 } 767 768#ifndef NDEBUG 769 // Verify that the computed LLVM struct size matches the AST layout size. 770 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 771 772 uint64_t TypeSizeInBits = Layout.getSize(); 773 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) && 774 "Type size mismatch!"); 775 776 if (BaseTy) { 777 uint64_t AlignedNonVirtualTypeSizeInBits = 778 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(), 779 Layout.getNonVirtualAlign()); 780 781 assert(AlignedNonVirtualTypeSizeInBits == 782 getTargetData().getTypeAllocSizeInBits(BaseTy) && 783 "Type size mismatch!"); 784 } 785 786 // Verify that the LLVM and AST field offsets agree. 787 const llvm::StructType *ST = 788 dyn_cast<llvm::StructType>(RL->getLLVMType()); 789 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST); 790 791 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 792 RecordDecl::field_iterator it = D->field_begin(); 793 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 794 const FieldDecl *FD = *it; 795 796 // For non-bit-fields, just check that the LLVM struct offset matches the 797 // AST offset. 798 if (!FD->isBitField()) { 799 unsigned FieldNo = RL->getLLVMFieldNo(FD); 800 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 801 "Invalid field offset!"); 802 continue; 803 } 804 805 // Ignore unnamed bit-fields. 806 if (!FD->getDeclName()) 807 continue; 808 809 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 810 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 811 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 812 813 // Verify that every component access is within the structure. 814 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex); 815 uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8; 816 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits && 817 "Invalid bit-field access (out of range)!"); 818 } 819 } 820#endif 821 822 return RL; 823} 824 825void CGRecordLayout::print(llvm::raw_ostream &OS) const { 826 OS << "<CGRecordLayout\n"; 827 OS << " LLVMType:" << *LLVMType << "\n"; 828 if (BaseLLVMType) 829 OS << " BaseLLVMType:" << *BaseLLVMType << "\n"; 830 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 831 OS << " BitFields:[\n"; 832 833 // Print bit-field infos in declaration order. 834 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 835 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 836 it = BitFields.begin(), ie = BitFields.end(); 837 it != ie; ++it) { 838 const RecordDecl *RD = it->first->getParent(); 839 unsigned Index = 0; 840 for (RecordDecl::field_iterator 841 it2 = RD->field_begin(); *it2 != it->first; ++it2) 842 ++Index; 843 BFIs.push_back(std::make_pair(Index, &it->second)); 844 } 845 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 846 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 847 OS.indent(4); 848 BFIs[i].second->print(OS); 849 OS << "\n"; 850 } 851 852 OS << "]>\n"; 853} 854 855void CGRecordLayout::dump() const { 856 print(llvm::errs()); 857} 858 859void CGBitFieldInfo::print(llvm::raw_ostream &OS) const { 860 OS << "<CGBitFieldInfo"; 861 OS << " Size:" << Size; 862 OS << " IsSigned:" << IsSigned << "\n"; 863 864 OS.indent(4 + strlen("<CGBitFieldInfo")); 865 OS << " NumComponents:" << getNumComponents(); 866 OS << " Components: ["; 867 if (getNumComponents()) { 868 OS << "\n"; 869 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) { 870 const AccessInfo &AI = getComponent(i); 871 OS.indent(8); 872 OS << "<AccessInfo" 873 << " FieldIndex:" << AI.FieldIndex 874 << " FieldByteOffset:" << AI.FieldByteOffset 875 << " FieldBitStart:" << AI.FieldBitStart 876 << " AccessWidth:" << AI.AccessWidth << "\n"; 877 OS.indent(8 + strlen("<AccessInfo")); 878 OS << " AccessAlignment:" << AI.AccessAlignment 879 << " TargetBitOffset:" << AI.TargetBitOffset 880 << " TargetBitWidth:" << AI.TargetBitWidth 881 << ">\n"; 882 } 883 OS.indent(4); 884 } 885 OS << "]>"; 886} 887 888void CGBitFieldInfo::dump() const { 889 print(llvm::errs()); 890} 891