CGRecordLayoutBuilder.cpp revision 68cf1a5a01ba43ed56a8624632fd65e0804430ac
1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Builder implementation for CGRecordLayout objects.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGRecordLayout.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/CXXInheritance.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/RecordLayout.h"
21#include "CodeGenTypes.h"
22#include "CGCXXABI.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Type.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/raw_ostream.h"
27#include "llvm/Target/TargetData.h"
28using namespace clang;
29using namespace CodeGen;
30
31namespace {
32
33class CGRecordLayoutBuilder {
34public:
35  /// FieldTypes - Holds the LLVM types that the struct is created from.
36  std::vector<const llvm::Type *> FieldTypes;
37
38  /// NonVirtualBaseFieldTypes - Holds the LLVM types for the non-virtual part
39  /// of the struct. For example, consider:
40  ///
41  /// struct A { int i; };
42  /// struct B { void *v; };
43  /// struct C : virtual A, B { };
44  ///
45  /// The LLVM type of C will be
46  /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
47  ///
48  /// And the LLVM type of the non-virtual base struct will be
49  /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
50  std::vector<const llvm::Type *> NonVirtualBaseFieldTypes;
51
52  /// NonVirtualBaseTypeIsSameAsCompleteType - Whether the non-virtual part of
53  /// the struct is equivalent to the complete struct.
54  bool NonVirtualBaseTypeIsSameAsCompleteType;
55
56  /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
57  typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
58  llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
59
60  /// LLVMBitFieldInfo - Holds location and size information about a bit field.
61  typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
62  llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
63
64  typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
65  llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
66
67  /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
68  /// primary base classes for some other direct or indirect base class.
69  CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
70
71  /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
72  /// avoid laying out virtual bases more than once.
73  llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
74
75  /// IsZeroInitializable - Whether this struct can be C++
76  /// zero-initialized with an LLVM zeroinitializer.
77  bool IsZeroInitializable;
78
79  /// Packed - Whether the resulting LLVM struct will be packed or not.
80  bool Packed;
81
82private:
83  CodeGenTypes &Types;
84
85  /// Alignment - Contains the alignment of the RecordDecl.
86  //
87  // FIXME: This is not needed and should be removed.
88  unsigned Alignment;
89
90  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
91  /// this will have the number of bits still available in the field.
92  char BitsAvailableInLastField;
93
94  /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
95  uint64_t NextFieldOffsetInBytes;
96
97  /// LayoutUnionField - Will layout a field in an union and return the type
98  /// that the field will have.
99  const llvm::Type *LayoutUnionField(const FieldDecl *Field,
100                                     const ASTRecordLayout &Layout);
101
102  /// LayoutUnion - Will layout a union RecordDecl.
103  void LayoutUnion(const RecordDecl *D);
104
105  /// LayoutField - try to layout all fields in the record decl.
106  /// Returns false if the operation failed because the struct is not packed.
107  bool LayoutFields(const RecordDecl *D);
108
109  /// Layout a single base, virtual or non-virtual
110  void LayoutBase(const CXXRecordDecl *BaseDecl, uint64_t BaseOffset);
111
112  /// LayoutVirtualBase - layout a single virtual base.
113  void LayoutVirtualBase(const CXXRecordDecl *BaseDecl, uint64_t BaseOffset);
114
115  /// LayoutVirtualBases - layout the virtual bases of a record decl.
116  void LayoutVirtualBases(const CXXRecordDecl *RD,
117                          const ASTRecordLayout &Layout);
118
119  /// LayoutNonVirtualBase - layout a single non-virtual base.
120  void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
121                            uint64_t BaseOffset);
122
123  /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
124  void LayoutNonVirtualBases(const CXXRecordDecl *RD,
125                             const ASTRecordLayout &Layout);
126
127  /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
128  bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
129
130  /// LayoutField - layout a single field. Returns false if the operation failed
131  /// because the current struct is not packed.
132  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
133
134  /// LayoutBitField - layout a single bit field.
135  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
136
137  /// AppendField - Appends a field with the given offset and type.
138  void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
139
140  /// AppendPadding - Appends enough padding bytes so that the total
141  /// struct size is a multiple of the field alignment.
142  void AppendPadding(uint64_t FieldOffsetInBytes,
143                     unsigned FieldAlignmentInBytes);
144
145  /// getByteArrayType - Returns a byte array type with the given number of
146  /// elements.
147  const llvm::Type *getByteArrayType(uint64_t NumBytes);
148
149  /// AppendBytes - Append a given number of bytes to the record.
150  void AppendBytes(uint64_t NumBytes);
151
152  /// AppendTailPadding - Append enough tail padding so that the type will have
153  /// the passed size.
154  void AppendTailPadding(uint64_t RecordSize);
155
156  unsigned getTypeAlignment(const llvm::Type *Ty) const;
157
158  /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
159  /// LLVM element types.
160  unsigned getAlignmentAsLLVMStruct() const;
161
162  /// CheckZeroInitializable - Check if the given type contains a pointer
163  /// to data member.
164  void CheckZeroInitializable(QualType T);
165  void CheckZeroInitializable(const CXXRecordDecl *RD);
166
167public:
168  CGRecordLayoutBuilder(CodeGenTypes &Types)
169    : NonVirtualBaseTypeIsSameAsCompleteType(false), IsZeroInitializable(true),
170    Packed(false), Types(Types), Alignment(0), BitsAvailableInLastField(0),
171    NextFieldOffsetInBytes(0) { }
172
173  /// Layout - Will layout a RecordDecl.
174  void Layout(const RecordDecl *D);
175};
176
177}
178
179void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
180  Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
181  Packed = D->hasAttr<PackedAttr>();
182
183  if (D->isUnion()) {
184    LayoutUnion(D);
185    return;
186  }
187
188  if (LayoutFields(D))
189    return;
190
191  // We weren't able to layout the struct. Try again with a packed struct
192  Packed = true;
193  NextFieldOffsetInBytes = 0;
194  FieldTypes.clear();
195  LLVMFields.clear();
196  LLVMBitFields.clear();
197  LLVMNonVirtualBases.clear();
198
199  LayoutFields(D);
200}
201
202CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
203                               const FieldDecl *FD,
204                               uint64_t FieldOffset,
205                               uint64_t FieldSize,
206                               uint64_t ContainingTypeSizeInBits,
207                               unsigned ContainingTypeAlign) {
208  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
209  uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
210  uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
211
212  bool IsSigned = FD->getType()->isSignedIntegerType();
213
214  if (FieldSize > TypeSizeInBits) {
215    // We have a wide bit-field. The extra bits are only used for padding, so
216    // if we have a bitfield of type T, with size N:
217    //
218    // T t : N;
219    //
220    // We can just assume that it's:
221    //
222    // T t : sizeof(T);
223    //
224    FieldSize = TypeSizeInBits;
225  }
226
227  // Compute the access components. The policy we use is to start by attempting
228  // to access using the width of the bit-field type itself and to always access
229  // at aligned indices of that type. If such an access would fail because it
230  // extends past the bound of the type, then we reduce size to the next smaller
231  // power of two and retry. The current algorithm assumes pow2 sized types,
232  // although this is easy to fix.
233  //
234  // FIXME: This algorithm is wrong on big-endian systems, I think.
235  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
236  CGBitFieldInfo::AccessInfo Components[3];
237  unsigned NumComponents = 0;
238  unsigned AccessedTargetBits = 0;       // The tumber of target bits accessed.
239  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
240
241  // Round down from the field offset to find the first access position that is
242  // at an aligned offset of the initial access type.
243  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
244
245  // Adjust initial access size to fit within record.
246  while (AccessWidth > 8 &&
247         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
248    AccessWidth >>= 1;
249    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
250  }
251
252  while (AccessedTargetBits < FieldSize) {
253    // Check that we can access using a type of this size, without reading off
254    // the end of the structure. This can occur with packed structures and
255    // -fno-bitfield-type-align, for example.
256    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
257      // If so, reduce access size to the next smaller power-of-two and retry.
258      AccessWidth >>= 1;
259      assert(AccessWidth >= 8 && "Cannot access under byte size!");
260      continue;
261    }
262
263    // Otherwise, add an access component.
264
265    // First, compute the bits inside this access which are part of the
266    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
267    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
268    // in the target that we are reading.
269    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
270    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
271    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
272    uint64_t AccessBitsInFieldSize =
273      std::min(AccessWidth + AccessStart,
274               FieldOffset + FieldSize) - AccessBitsInFieldStart;
275
276    assert(NumComponents < 3 && "Unexpected number of components!");
277    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
278    AI.FieldIndex = 0;
279    // FIXME: We still follow the old access pattern of only using the field
280    // byte offset. We should switch this once we fix the struct layout to be
281    // pretty.
282    AI.FieldByteOffset = AccessStart / 8;
283    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
284    AI.AccessWidth = AccessWidth;
285    AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
286    AI.TargetBitOffset = AccessedTargetBits;
287    AI.TargetBitWidth = AccessBitsInFieldSize;
288
289    AccessStart += AccessWidth;
290    AccessedTargetBits += AI.TargetBitWidth;
291  }
292
293  assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
294  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
295}
296
297CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
298                                        const FieldDecl *FD,
299                                        uint64_t FieldOffset,
300                                        uint64_t FieldSize) {
301  const RecordDecl *RD = FD->getParent();
302  const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
303  uint64_t ContainingTypeSizeInBits = RL.getSize();
304  unsigned ContainingTypeAlign = RL.getAlignment();
305
306  return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
307                  ContainingTypeAlign);
308}
309
310void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
311                                           uint64_t FieldOffset) {
312  uint64_t FieldSize =
313    D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
314
315  if (FieldSize == 0)
316    return;
317
318  uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
319  unsigned NumBytesToAppend;
320
321  if (FieldOffset < NextFieldOffset) {
322    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
323    assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
324
325    // The bitfield begins in the previous bit-field.
326    NumBytesToAppend =
327      llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
328  } else {
329    assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
330
331    // Append padding if necessary.
332    AppendPadding(FieldOffset / 8, 1);
333
334    NumBytesToAppend =
335      llvm::RoundUpToAlignment(FieldSize, 8) / 8;
336
337    assert(NumBytesToAppend && "No bytes to append!");
338  }
339
340  // Add the bit field info.
341  LLVMBitFields.push_back(
342    LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
343                                                 FieldSize)));
344
345  AppendBytes(NumBytesToAppend);
346
347  BitsAvailableInLastField =
348    NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
349}
350
351bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
352                                        uint64_t FieldOffset) {
353  // If the field is packed, then we need a packed struct.
354  if (!Packed && D->hasAttr<PackedAttr>())
355    return false;
356
357  if (D->isBitField()) {
358    // We must use packed structs for unnamed bit fields since they
359    // don't affect the struct alignment.
360    if (!Packed && !D->getDeclName())
361      return false;
362
363    LayoutBitField(D, FieldOffset);
364    return true;
365  }
366
367  CheckZeroInitializable(D->getType());
368
369  assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
370  uint64_t FieldOffsetInBytes = FieldOffset / 8;
371
372  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
373  unsigned TypeAlignment = getTypeAlignment(Ty);
374
375  // If the type alignment is larger then the struct alignment, we must use
376  // a packed struct.
377  if (TypeAlignment > Alignment) {
378    assert(!Packed && "Alignment is wrong even with packed struct!");
379    return false;
380  }
381
382  if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
383    const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
384    if (const MaxFieldAlignmentAttr *MFAA =
385          RD->getAttr<MaxFieldAlignmentAttr>()) {
386      if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
387        return false;
388    }
389  }
390
391  // Round up the field offset to the alignment of the field type.
392  uint64_t AlignedNextFieldOffsetInBytes =
393    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
394
395  if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
396    assert(!Packed && "Could not place field even with packed struct!");
397    return false;
398  }
399
400  AppendPadding(FieldOffsetInBytes, TypeAlignment);
401
402  // Now append the field.
403  LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
404  AppendField(FieldOffsetInBytes, Ty);
405
406  return true;
407}
408
409const llvm::Type *
410CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
411                                        const ASTRecordLayout &Layout) {
412  if (Field->isBitField()) {
413    uint64_t FieldSize =
414      Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
415
416    // Ignore zero sized bit fields.
417    if (FieldSize == 0)
418      return 0;
419
420    const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
421    unsigned NumBytesToAppend =
422      llvm::RoundUpToAlignment(FieldSize, 8) / 8;
423
424    if (NumBytesToAppend > 1)
425      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
426
427    // Add the bit field info.
428    LLVMBitFields.push_back(
429      LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
430                                                       0, FieldSize)));
431    return FieldTy;
432  }
433
434  // This is a regular union field.
435  LLVMFields.push_back(LLVMFieldInfo(Field, 0));
436  return Types.ConvertTypeForMemRecursive(Field->getType());
437}
438
439void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
440  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
441
442  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
443
444  const llvm::Type *Ty = 0;
445  uint64_t Size = 0;
446  unsigned Align = 0;
447
448  bool HasOnlyZeroSizedBitFields = true;
449
450  unsigned FieldNo = 0;
451  for (RecordDecl::field_iterator Field = D->field_begin(),
452       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
453    assert(Layout.getFieldOffset(FieldNo) == 0 &&
454          "Union field offset did not start at the beginning of record!");
455    const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
456
457    if (!FieldTy)
458      continue;
459
460    HasOnlyZeroSizedBitFields = false;
461
462    unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
463    uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
464
465    if (FieldAlign < Align)
466      continue;
467
468    if (FieldAlign > Align || FieldSize > Size) {
469      Ty = FieldTy;
470      Align = FieldAlign;
471      Size = FieldSize;
472    }
473  }
474
475  // Now add our field.
476  if (Ty) {
477    AppendField(0, Ty);
478
479    if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
480      // We need a packed struct.
481      Packed = true;
482      Align = 1;
483    }
484  }
485  if (!Align) {
486    assert(HasOnlyZeroSizedBitFields &&
487           "0-align record did not have all zero-sized bit-fields!");
488    Align = 1;
489  }
490
491  // Append tail padding.
492  if (Layout.getSize() / 8 > Size)
493    AppendPadding(Layout.getSize() / 8, Align);
494}
495
496void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *BaseDecl,
497                                       uint64_t BaseOffset) {
498  CheckZeroInitializable(BaseDecl);
499
500  const ASTRecordLayout &Layout =
501    Types.getContext().getASTRecordLayout(BaseDecl);
502
503  CharUnits NonVirtualSize = Layout.getNonVirtualSize();
504
505  AppendPadding(BaseOffset / 8, 1);
506
507  // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
508  AppendBytes(NonVirtualSize.getQuantity());
509}
510
511void
512CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *BaseDecl,
513                                         uint64_t BaseOffset) {
514  // Ignore empty bases.
515  if (BaseDecl->isEmpty())
516    return;
517
518  CheckZeroInitializable(BaseDecl);
519
520  const ASTRecordLayout &Layout =
521    Types.getContext().getASTRecordLayout(BaseDecl);
522
523  CharUnits NonVirtualSize = Layout.getNonVirtualSize();
524
525  AppendPadding(BaseOffset / 8, 1);
526
527  // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
528  AppendBytes(NonVirtualSize.getQuantity());
529
530  // FIXME: Add the vbase field info.
531}
532
533/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
534void
535CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
536                                          const ASTRecordLayout &Layout) {
537  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
538       E = RD->bases_end(); I != E; ++I) {
539    const CXXRecordDecl *BaseDecl =
540      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
541
542    // We only want to lay out virtual bases that aren't indirect primary bases
543    // of some other base.
544    if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
545      // Only lay out the base once.
546      if (!LaidOutVirtualBases.insert(BaseDecl))
547        continue;
548
549      uint64_t VBaseOffset = Layout.getVBaseClassOffsetInBits(BaseDecl);
550      LayoutVirtualBase(BaseDecl, VBaseOffset);
551    }
552
553    if (!BaseDecl->getNumVBases()) {
554      // This base isn't interesting since it doesn't have any virtual bases.
555      continue;
556    }
557
558    LayoutVirtualBases(BaseDecl, Layout);
559  }
560}
561
562void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
563                                                 uint64_t BaseOffset) {
564  // Ignore empty bases.
565  if (BaseDecl->isEmpty())
566    return;
567
568  LayoutBase(BaseDecl, BaseOffset);
569
570  // Append the base field.
571  LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size() - 1));
572}
573
574void
575CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
576                                             const ASTRecordLayout &Layout) {
577  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
578
579  // Check if we need to add a vtable pointer.
580  if (RD->isDynamicClass()) {
581    if (!PrimaryBase) {
582      const llvm::Type *FunctionType =
583        llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
584                                /*isVarArg=*/true);
585      const llvm::Type *VTableTy = FunctionType->getPointerTo();
586
587      assert(NextFieldOffsetInBytes == 0 &&
588             "VTable pointer must come first!");
589      AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
590    } else {
591      if (!Layout.isPrimaryBaseVirtual())
592        LayoutNonVirtualBase(PrimaryBase, 0);
593      else
594        LayoutVirtualBase(PrimaryBase, 0);
595    }
596  }
597
598  // Layout the non-virtual bases.
599  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
600       E = RD->bases_end(); I != E; ++I) {
601    if (I->isVirtual())
602      continue;
603
604    const CXXRecordDecl *BaseDecl =
605      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
606
607    // We've already laid out the primary base.
608    if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
609      continue;
610
611    LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl));
612  }
613}
614
615bool
616CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
617  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
618
619
620  CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
621  CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
622  uint64_t AlignedNonVirtualTypeSize =
623    NonVirtualSize.RoundUpToAlignment(NonVirtualAlign).getQuantity();
624
625
626  // First check if we can use the same fields as for the complete class.
627  if (AlignedNonVirtualTypeSize == Layout.getSize() / 8) {
628    NonVirtualBaseTypeIsSameAsCompleteType = true;
629    return true;
630  }
631
632  // Check if we need padding.
633  uint64_t AlignedNextFieldOffset =
634    llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
635                             getAlignmentAsLLVMStruct());
636
637  if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize)
638    return false; // Needs packing.
639
640  NonVirtualBaseFieldTypes = FieldTypes;
641
642  if (AlignedNonVirtualTypeSize == AlignedNextFieldOffset) {
643    // We don't need any padding.
644    return true;
645  }
646
647  uint64_t NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
648  NonVirtualBaseFieldTypes.push_back(getByteArrayType(NumBytes));
649  return true;
650}
651
652bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
653  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
654  assert(Alignment && "Did not set alignment!");
655
656  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
657
658  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
659  if (RD)
660    LayoutNonVirtualBases(RD, Layout);
661
662  unsigned FieldNo = 0;
663
664  for (RecordDecl::field_iterator Field = D->field_begin(),
665       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
666    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
667      assert(!Packed &&
668             "Could not layout fields even with a packed LLVM struct!");
669      return false;
670    }
671  }
672
673  if (RD) {
674    // We've laid out the non-virtual bases and the fields, now compute the
675    // non-virtual base field types.
676    if (!ComputeNonVirtualBaseType(RD)) {
677      assert(!Packed && "Could not layout even with a packed LLVM struct!");
678      return false;
679    }
680
681    // And lay out the virtual bases.
682    RD->getIndirectPrimaryBases(IndirectPrimaryBases);
683    if (Layout.isPrimaryBaseVirtual())
684      IndirectPrimaryBases.insert(Layout.getPrimaryBase());
685    LayoutVirtualBases(RD, Layout);
686  }
687
688  // Append tail padding if necessary.
689  AppendTailPadding(Layout.getSize());
690
691  return true;
692}
693
694void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
695  assert(RecordSize % 8 == 0 && "Invalid record size!");
696
697  uint64_t RecordSizeInBytes = RecordSize / 8;
698  assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
699
700  uint64_t AlignedNextFieldOffset =
701    llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
702                             getAlignmentAsLLVMStruct());
703
704  if (AlignedNextFieldOffset == RecordSizeInBytes) {
705    // We don't need any padding.
706    return;
707  }
708
709  unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
710  AppendBytes(NumPadBytes);
711}
712
713void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
714                                        const llvm::Type *FieldTy) {
715  uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
716
717  FieldTypes.push_back(FieldTy);
718
719  NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
720  BitsAvailableInLastField = 0;
721}
722
723void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
724                                          unsigned FieldAlignmentInBytes) {
725  assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
726         "Incorrect field layout!");
727
728  // Round up the field offset to the alignment of the field type.
729  uint64_t AlignedNextFieldOffsetInBytes =
730    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignmentInBytes);
731
732  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
733    // Even with alignment, the field offset is not at the right place,
734    // insert padding.
735    uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
736
737    AppendBytes(PaddingInBytes);
738  }
739}
740
741const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(uint64_t NumBytes) {
742  assert(NumBytes != 0 && "Empty byte array's aren't allowed.");
743
744  const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
745  if (NumBytes > 1)
746    Ty = llvm::ArrayType::get(Ty, NumBytes);
747
748  return Ty;
749}
750
751void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
752  if (NumBytes == 0)
753    return;
754
755  // Append the padding field
756  AppendField(NextFieldOffsetInBytes, getByteArrayType(NumBytes));
757}
758
759unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
760  if (Packed)
761    return 1;
762
763  return Types.getTargetData().getABITypeAlignment(Ty);
764}
765
766unsigned CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
767  if (Packed)
768    return 1;
769
770  unsigned MaxAlignment = 1;
771  for (size_t i = 0; i != FieldTypes.size(); ++i)
772    MaxAlignment = std::max(MaxAlignment, getTypeAlignment(FieldTypes[i]));
773
774  return MaxAlignment;
775}
776
777void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
778  // This record already contains a member pointer.
779  if (!IsZeroInitializable)
780    return;
781
782  // Can only have member pointers if we're compiling C++.
783  if (!Types.getContext().getLangOptions().CPlusPlus)
784    return;
785
786  T = Types.getContext().getBaseElementType(T);
787
788  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
789    if (!Types.getCXXABI().isZeroInitializable(MPT))
790      IsZeroInitializable = false;
791  } else if (const RecordType *RT = T->getAs<RecordType>()) {
792    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
793    CheckZeroInitializable(RD);
794  }
795}
796
797void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
798  // This record already contains a member pointer.
799  if (!IsZeroInitializable)
800    return;
801
802  const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
803  if (!Layout.isZeroInitializable())
804    IsZeroInitializable = false;
805}
806
807CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
808  CGRecordLayoutBuilder Builder(*this);
809
810  Builder.Layout(D);
811
812  const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
813                                                     Builder.FieldTypes,
814                                                     Builder.Packed);
815
816  const llvm::StructType *BaseTy = 0;
817  if (isa<CXXRecordDecl>(D)) {
818    if (Builder.NonVirtualBaseTypeIsSameAsCompleteType)
819      BaseTy = Ty;
820    else if (!Builder.NonVirtualBaseFieldTypes.empty())
821      BaseTy = llvm::StructType::get(getLLVMContext(),
822                                     Builder.NonVirtualBaseFieldTypes,
823                                     Builder.Packed);
824  }
825
826  CGRecordLayout *RL =
827    new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable);
828
829  // Add all the non-virtual base field numbers.
830  RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
831                                  Builder.LLVMNonVirtualBases.end());
832
833  // Add all the field numbers.
834  RL->FieldInfo.insert(Builder.LLVMFields.begin(),
835                       Builder.LLVMFields.end());
836
837  // Add bitfield info.
838  RL->BitFields.insert(Builder.LLVMBitFields.begin(),
839                       Builder.LLVMBitFields.end());
840
841  // Dump the layout, if requested.
842  if (getContext().getLangOptions().DumpRecordLayouts) {
843    llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
844    llvm::errs() << "Record: ";
845    D->dump();
846    llvm::errs() << "\nLayout: ";
847    RL->dump();
848  }
849
850#ifndef NDEBUG
851  // Verify that the computed LLVM struct size matches the AST layout size.
852  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
853
854  uint64_t TypeSizeInBits = Layout.getSize();
855  assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
856         "Type size mismatch!");
857
858  if (BaseTy) {
859    CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
860    CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
861    CharUnits AlignedNonVirtualTypeSize =
862      NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
863
864    uint64_t AlignedNonVirtualTypeSizeInBits =
865      AlignedNonVirtualTypeSize.getQuantity() * getContext().getCharWidth();
866
867    assert(AlignedNonVirtualTypeSizeInBits ==
868           getTargetData().getTypeAllocSizeInBits(BaseTy) &&
869           "Type size mismatch!");
870  }
871
872  // Verify that the LLVM and AST field offsets agree.
873  const llvm::StructType *ST =
874    dyn_cast<llvm::StructType>(RL->getLLVMType());
875  const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
876
877  const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
878  RecordDecl::field_iterator it = D->field_begin();
879  for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
880    const FieldDecl *FD = *it;
881
882    // For non-bit-fields, just check that the LLVM struct offset matches the
883    // AST offset.
884    if (!FD->isBitField()) {
885      unsigned FieldNo = RL->getLLVMFieldNo(FD);
886      assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
887             "Invalid field offset!");
888      continue;
889    }
890
891    // Ignore unnamed bit-fields.
892    if (!FD->getDeclName())
893      continue;
894
895    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
896    for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
897      const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
898
899      // Verify that every component access is within the structure.
900      uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
901      uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
902      assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
903             "Invalid bit-field access (out of range)!");
904    }
905  }
906#endif
907
908  return RL;
909}
910
911void CGRecordLayout::print(llvm::raw_ostream &OS) const {
912  OS << "<CGRecordLayout\n";
913  OS << "  LLVMType:" << *LLVMType << "\n";
914  if (NonVirtualBaseLLVMType)
915    OS << "  NonVirtualBaseLLVMType:" << *NonVirtualBaseLLVMType << "\n";
916  OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
917  OS << "  BitFields:[\n";
918
919  // Print bit-field infos in declaration order.
920  std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
921  for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
922         it = BitFields.begin(), ie = BitFields.end();
923       it != ie; ++it) {
924    const RecordDecl *RD = it->first->getParent();
925    unsigned Index = 0;
926    for (RecordDecl::field_iterator
927           it2 = RD->field_begin(); *it2 != it->first; ++it2)
928      ++Index;
929    BFIs.push_back(std::make_pair(Index, &it->second));
930  }
931  llvm::array_pod_sort(BFIs.begin(), BFIs.end());
932  for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
933    OS.indent(4);
934    BFIs[i].second->print(OS);
935    OS << "\n";
936  }
937
938  OS << "]>\n";
939}
940
941void CGRecordLayout::dump() const {
942  print(llvm::errs());
943}
944
945void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
946  OS << "<CGBitFieldInfo";
947  OS << " Size:" << Size;
948  OS << " IsSigned:" << IsSigned << "\n";
949
950  OS.indent(4 + strlen("<CGBitFieldInfo"));
951  OS << " NumComponents:" << getNumComponents();
952  OS << " Components: [";
953  if (getNumComponents()) {
954    OS << "\n";
955    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
956      const AccessInfo &AI = getComponent(i);
957      OS.indent(8);
958      OS << "<AccessInfo"
959         << " FieldIndex:" << AI.FieldIndex
960         << " FieldByteOffset:" << AI.FieldByteOffset
961         << " FieldBitStart:" << AI.FieldBitStart
962         << " AccessWidth:" << AI.AccessWidth << "\n";
963      OS.indent(8 + strlen("<AccessInfo"));
964      OS << " AccessAlignment:" << AI.AccessAlignment
965         << " TargetBitOffset:" << AI.TargetBitOffset
966         << " TargetBitWidth:" << AI.TargetBitWidth
967         << ">\n";
968    }
969    OS.indent(4);
970  }
971  OS << "]>";
972}
973
974void CGBitFieldInfo::dump() const {
975  print(llvm::errs());
976}
977