1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Builder implementation for CGRecordLayout objects.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGRecordLayout.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/CXXInheritance.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/RecordLayout.h"
21#include "clang/Frontend/CodeGenOptions.h"
22#include "CodeGenTypes.h"
23#include "CGCXXABI.h"
24#include "llvm/DerivedTypes.h"
25#include "llvm/Type.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/Target/TargetData.h"
29using namespace clang;
30using namespace CodeGen;
31
32namespace {
33
34class CGRecordLayoutBuilder {
35public:
36  /// FieldTypes - Holds the LLVM types that the struct is created from.
37  ///
38  SmallVector<llvm::Type *, 16> FieldTypes;
39
40  /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
41  /// of the struct. For example, consider:
42  ///
43  /// struct A { int i; };
44  /// struct B { void *v; };
45  /// struct C : virtual A, B { };
46  ///
47  /// The LLVM type of C will be
48  /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
49  ///
50  /// And the LLVM type of the non-virtual base struct will be
51  /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
52  ///
53  /// This only gets initialized if the base subobject type is
54  /// different from the complete-object type.
55  llvm::StructType *BaseSubobjectType;
56
57  /// FieldInfo - Holds a field and its corresponding LLVM field number.
58  llvm::DenseMap<const FieldDecl *, unsigned> Fields;
59
60  /// BitFieldInfo - Holds location and size information about a bit field.
61  llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
62
63  llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
64  llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
65
66  /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
67  /// primary base classes for some other direct or indirect base class.
68  CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
69
70  /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
71  /// avoid laying out virtual bases more than once.
72  llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
73
74  /// IsZeroInitializable - Whether this struct can be C++
75  /// zero-initialized with an LLVM zeroinitializer.
76  bool IsZeroInitializable;
77  bool IsZeroInitializableAsBase;
78
79  /// Packed - Whether the resulting LLVM struct will be packed or not.
80  bool Packed;
81
82  /// IsMsStruct - Whether ms_struct is in effect or not
83  bool IsMsStruct;
84
85private:
86  CodeGenTypes &Types;
87
88  /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
89  /// last base laid out. Used so that we can replace the last laid out base
90  /// type with an i8 array if needed.
91  struct LastLaidOutBaseInfo {
92    CharUnits Offset;
93    CharUnits NonVirtualSize;
94
95    bool isValid() const { return !NonVirtualSize.isZero(); }
96    void invalidate() { NonVirtualSize = CharUnits::Zero(); }
97
98  } LastLaidOutBase;
99
100  /// Alignment - Contains the alignment of the RecordDecl.
101  CharUnits Alignment;
102
103  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
104  /// this will have the number of bits still available in the field.
105  char BitsAvailableInLastField;
106
107  /// NextFieldOffset - Holds the next field offset.
108  CharUnits NextFieldOffset;
109
110  /// LayoutUnionField - Will layout a field in an union and return the type
111  /// that the field will have.
112  llvm::Type *LayoutUnionField(const FieldDecl *Field,
113                               const ASTRecordLayout &Layout);
114
115  /// LayoutUnion - Will layout a union RecordDecl.
116  void LayoutUnion(const RecordDecl *D);
117
118  /// LayoutField - try to layout all fields in the record decl.
119  /// Returns false if the operation failed because the struct is not packed.
120  bool LayoutFields(const RecordDecl *D);
121
122  /// Layout a single base, virtual or non-virtual
123  bool LayoutBase(const CXXRecordDecl *base,
124                  const CGRecordLayout &baseLayout,
125                  CharUnits baseOffset);
126
127  /// LayoutVirtualBase - layout a single virtual base.
128  bool LayoutVirtualBase(const CXXRecordDecl *base,
129                         CharUnits baseOffset);
130
131  /// LayoutVirtualBases - layout the virtual bases of a record decl.
132  bool LayoutVirtualBases(const CXXRecordDecl *RD,
133                          const ASTRecordLayout &Layout);
134
135  /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
136  /// like MSVC.
137  bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
138                            const ASTRecordLayout &Layout);
139
140  /// LayoutNonVirtualBase - layout a single non-virtual base.
141  bool LayoutNonVirtualBase(const CXXRecordDecl *base,
142                            CharUnits baseOffset);
143
144  /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
145  bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
146                             const ASTRecordLayout &Layout);
147
148  /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
149  bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
150
151  /// LayoutField - layout a single field. Returns false if the operation failed
152  /// because the current struct is not packed.
153  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
154
155  /// LayoutBitField - layout a single bit field.
156  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
157
158  /// AppendField - Appends a field with the given offset and type.
159  void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
160
161  /// AppendPadding - Appends enough padding bytes so that the total
162  /// struct size is a multiple of the field alignment.
163  void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
164
165  /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
166  /// tail padding of a previous base. If this happens, the type of the previous
167  /// base needs to be changed to an array of i8. Returns true if the last
168  /// laid out base was resized.
169  bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
170
171  /// getByteArrayType - Returns a byte array type with the given number of
172  /// elements.
173  llvm::Type *getByteArrayType(CharUnits NumBytes);
174
175  /// AppendBytes - Append a given number of bytes to the record.
176  void AppendBytes(CharUnits numBytes);
177
178  /// AppendTailPadding - Append enough tail padding so that the type will have
179  /// the passed size.
180  void AppendTailPadding(CharUnits RecordSize);
181
182  CharUnits getTypeAlignment(llvm::Type *Ty) const;
183
184  /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
185  /// LLVM element types.
186  CharUnits getAlignmentAsLLVMStruct() const;
187
188  /// CheckZeroInitializable - Check if the given type contains a pointer
189  /// to data member.
190  void CheckZeroInitializable(QualType T);
191
192public:
193  CGRecordLayoutBuilder(CodeGenTypes &Types)
194    : BaseSubobjectType(0),
195      IsZeroInitializable(true), IsZeroInitializableAsBase(true),
196      Packed(false), IsMsStruct(false),
197      Types(Types), BitsAvailableInLastField(0) { }
198
199  /// Layout - Will layout a RecordDecl.
200  void Layout(const RecordDecl *D);
201};
202
203}
204
205void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
206  Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
207  Packed = D->hasAttr<PackedAttr>();
208
209  IsMsStruct = D->hasAttr<MsStructAttr>();
210
211  if (D->isUnion()) {
212    LayoutUnion(D);
213    return;
214  }
215
216  if (LayoutFields(D))
217    return;
218
219  // We weren't able to layout the struct. Try again with a packed struct
220  Packed = true;
221  LastLaidOutBase.invalidate();
222  NextFieldOffset = CharUnits::Zero();
223  FieldTypes.clear();
224  Fields.clear();
225  BitFields.clear();
226  NonVirtualBases.clear();
227  VirtualBases.clear();
228
229  LayoutFields(D);
230}
231
232CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
233                               const FieldDecl *FD,
234                               uint64_t FieldOffset,
235                               uint64_t FieldSize,
236                               uint64_t ContainingTypeSizeInBits,
237                               unsigned ContainingTypeAlign) {
238  llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
239  CharUnits TypeSizeInBytes =
240    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
241  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
242
243  bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
244
245  if (FieldSize > TypeSizeInBits) {
246    // We have a wide bit-field. The extra bits are only used for padding, so
247    // if we have a bitfield of type T, with size N:
248    //
249    // T t : N;
250    //
251    // We can just assume that it's:
252    //
253    // T t : sizeof(T);
254    //
255    FieldSize = TypeSizeInBits;
256  }
257
258  // in big-endian machines the first fields are in higher bit positions,
259  // so revert the offset. The byte offsets are reversed(back) later.
260  if (Types.getTargetData().isBigEndian()) {
261    FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
262  }
263
264  // Compute the access components. The policy we use is to start by attempting
265  // to access using the width of the bit-field type itself and to always access
266  // at aligned indices of that type. If such an access would fail because it
267  // extends past the bound of the type, then we reduce size to the next smaller
268  // power of two and retry. The current algorithm assumes pow2 sized types,
269  // although this is easy to fix.
270  //
271  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
272  CGBitFieldInfo::AccessInfo Components[3];
273  unsigned NumComponents = 0;
274  unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
275  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
276
277  // If requested, widen the initial bit-field access to be register sized. The
278  // theory is that this is most likely to allow multiple accesses into the same
279  // structure to be coalesced, and that the backend should be smart enough to
280  // narrow the store if no coalescing is ever done.
281  //
282  // The subsequent code will handle align these access to common boundaries and
283  // guaranteeing that we do not access past the end of the structure.
284  if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
285    if (AccessWidth < Types.getTarget().getRegisterWidth())
286      AccessWidth = Types.getTarget().getRegisterWidth();
287  }
288
289  // Round down from the field offset to find the first access position that is
290  // at an aligned offset of the initial access type.
291  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
292
293  // Adjust initial access size to fit within record.
294  while (AccessWidth > Types.getTarget().getCharWidth() &&
295         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
296    AccessWidth >>= 1;
297    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
298  }
299
300  while (AccessedTargetBits < FieldSize) {
301    // Check that we can access using a type of this size, without reading off
302    // the end of the structure. This can occur with packed structures and
303    // -fno-bitfield-type-align, for example.
304    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
305      // If so, reduce access size to the next smaller power-of-two and retry.
306      AccessWidth >>= 1;
307      assert(AccessWidth >= Types.getTarget().getCharWidth()
308             && "Cannot access under byte size!");
309      continue;
310    }
311
312    // Otherwise, add an access component.
313
314    // First, compute the bits inside this access which are part of the
315    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
316    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
317    // in the target that we are reading.
318    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
319    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
320    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
321    uint64_t AccessBitsInFieldSize =
322      std::min(AccessWidth + AccessStart,
323               FieldOffset + FieldSize) - AccessBitsInFieldStart;
324
325    assert(NumComponents < 3 && "Unexpected number of components!");
326    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
327    AI.FieldIndex = 0;
328    // FIXME: We still follow the old access pattern of only using the field
329    // byte offset. We should switch this once we fix the struct layout to be
330    // pretty.
331
332    // on big-endian machines we reverted the bit offset because first fields are
333    // in higher bits. But this also reverts the bytes, so fix this here by reverting
334    // the byte offset on big-endian machines.
335    if (Types.getTargetData().isBigEndian()) {
336      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
337          ContainingTypeSizeInBits - AccessStart - AccessWidth);
338    } else {
339      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
340    }
341    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
342    AI.AccessWidth = AccessWidth;
343    AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
344        llvm::MinAlign(ContainingTypeAlign, AccessStart));
345    AI.TargetBitOffset = AccessedTargetBits;
346    AI.TargetBitWidth = AccessBitsInFieldSize;
347
348    AccessStart += AccessWidth;
349    AccessedTargetBits += AI.TargetBitWidth;
350  }
351
352  assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
353  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
354}
355
356CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
357                                        const FieldDecl *FD,
358                                        uint64_t FieldOffset,
359                                        uint64_t FieldSize) {
360  const RecordDecl *RD = FD->getParent();
361  const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
362  uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
363  unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
364
365  return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
366                  ContainingTypeAlign);
367}
368
369void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
370                                           uint64_t fieldOffset) {
371  uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
372
373  if (fieldSize == 0)
374    return;
375
376  uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
377  CharUnits numBytesToAppend;
378  unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
379
380  if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
381    assert(fieldOffset % charAlign == 0 &&
382           "Field offset not aligned correctly");
383
384    CharUnits fieldOffsetInCharUnits =
385      Types.getContext().toCharUnitsFromBits(fieldOffset);
386
387    // Try to resize the last base field.
388    if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
389      nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
390  }
391
392  if (fieldOffset < nextFieldOffsetInBits) {
393    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
394    assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
395
396    // The bitfield begins in the previous bit-field.
397    numBytesToAppend = Types.getContext().toCharUnitsFromBits(
398      llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
399                               charAlign));
400  } else {
401    assert(fieldOffset % charAlign == 0 &&
402           "Field offset not aligned correctly");
403
404    // Append padding if necessary.
405    AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
406                  CharUnits::One());
407
408    numBytesToAppend = Types.getContext().toCharUnitsFromBits(
409        llvm::RoundUpToAlignment(fieldSize, charAlign));
410
411    assert(!numBytesToAppend.isZero() && "No bytes to append!");
412  }
413
414  // Add the bit field info.
415  BitFields.insert(std::make_pair(D,
416                   CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
417
418  AppendBytes(numBytesToAppend);
419
420  BitsAvailableInLastField =
421    Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
422}
423
424bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
425                                        uint64_t fieldOffset) {
426  // If the field is packed, then we need a packed struct.
427  if (!Packed && D->hasAttr<PackedAttr>())
428    return false;
429
430  if (D->isBitField()) {
431    // We must use packed structs for unnamed bit fields since they
432    // don't affect the struct alignment.
433    if (!Packed && !D->getDeclName())
434      return false;
435
436    LayoutBitField(D, fieldOffset);
437    return true;
438  }
439
440  CheckZeroInitializable(D->getType());
441
442  assert(fieldOffset % Types.getTarget().getCharWidth() == 0
443         && "field offset is not on a byte boundary!");
444  CharUnits fieldOffsetInBytes
445    = Types.getContext().toCharUnitsFromBits(fieldOffset);
446
447  llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
448  CharUnits typeAlignment = getTypeAlignment(Ty);
449
450  // If the type alignment is larger then the struct alignment, we must use
451  // a packed struct.
452  if (typeAlignment > Alignment) {
453    assert(!Packed && "Alignment is wrong even with packed struct!");
454    return false;
455  }
456
457  if (!Packed) {
458    if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
459      const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
460      if (const MaxFieldAlignmentAttr *MFAA =
461            RD->getAttr<MaxFieldAlignmentAttr>()) {
462        if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
463          return false;
464      }
465    }
466  }
467
468  // Round up the field offset to the alignment of the field type.
469  CharUnits alignedNextFieldOffsetInBytes =
470    NextFieldOffset.RoundUpToAlignment(typeAlignment);
471
472  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
473    // Try to resize the last base field.
474    if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
475      alignedNextFieldOffsetInBytes =
476        NextFieldOffset.RoundUpToAlignment(typeAlignment);
477    }
478  }
479
480  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
481    assert(!Packed && "Could not place field even with packed struct!");
482    return false;
483  }
484
485  AppendPadding(fieldOffsetInBytes, typeAlignment);
486
487  // Now append the field.
488  Fields[D] = FieldTypes.size();
489  AppendField(fieldOffsetInBytes, Ty);
490
491  LastLaidOutBase.invalidate();
492  return true;
493}
494
495llvm::Type *
496CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
497                                        const ASTRecordLayout &Layout) {
498  if (Field->isBitField()) {
499    uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
500
501    // Ignore zero sized bit fields.
502    if (FieldSize == 0)
503      return 0;
504
505    llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
506    CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
507      llvm::RoundUpToAlignment(FieldSize,
508                               Types.getContext().getTargetInfo().getCharAlign()));
509
510    if (NumBytesToAppend > CharUnits::One())
511      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
512
513    // Add the bit field info.
514    BitFields.insert(std::make_pair(Field,
515                         CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
516    return FieldTy;
517  }
518
519  // This is a regular union field.
520  Fields[Field] = 0;
521  return Types.ConvertTypeForMem(Field->getType());
522}
523
524void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
525  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
526
527  const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
528
529  llvm::Type *unionType = 0;
530  CharUnits unionSize = CharUnits::Zero();
531  CharUnits unionAlign = CharUnits::Zero();
532
533  bool hasOnlyZeroSizedBitFields = true;
534  bool checkedFirstFieldZeroInit = false;
535
536  unsigned fieldNo = 0;
537  for (RecordDecl::field_iterator field = D->field_begin(),
538       fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
539    assert(layout.getFieldOffset(fieldNo) == 0 &&
540          "Union field offset did not start at the beginning of record!");
541    llvm::Type *fieldType = LayoutUnionField(*field, layout);
542
543    if (!fieldType)
544      continue;
545
546    if (field->getDeclName() && !checkedFirstFieldZeroInit) {
547      CheckZeroInitializable(field->getType());
548      checkedFirstFieldZeroInit = true;
549    }
550
551    hasOnlyZeroSizedBitFields = false;
552
553    CharUnits fieldAlign = CharUnits::fromQuantity(
554                          Types.getTargetData().getABITypeAlignment(fieldType));
555    CharUnits fieldSize = CharUnits::fromQuantity(
556                             Types.getTargetData().getTypeAllocSize(fieldType));
557
558    if (fieldAlign < unionAlign)
559      continue;
560
561    if (fieldAlign > unionAlign || fieldSize > unionSize) {
562      unionType = fieldType;
563      unionAlign = fieldAlign;
564      unionSize = fieldSize;
565    }
566  }
567
568  // Now add our field.
569  if (unionType) {
570    AppendField(CharUnits::Zero(), unionType);
571
572    if (getTypeAlignment(unionType) > layout.getAlignment()) {
573      // We need a packed struct.
574      Packed = true;
575      unionAlign = CharUnits::One();
576    }
577  }
578  if (unionAlign.isZero()) {
579    (void)hasOnlyZeroSizedBitFields;
580    assert(hasOnlyZeroSizedBitFields &&
581           "0-align record did not have all zero-sized bit-fields!");
582    unionAlign = CharUnits::One();
583  }
584
585  // Append tail padding.
586  CharUnits recordSize = layout.getSize();
587  if (recordSize > unionSize)
588    AppendPadding(recordSize, unionAlign);
589}
590
591bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
592                                       const CGRecordLayout &baseLayout,
593                                       CharUnits baseOffset) {
594  ResizeLastBaseFieldIfNecessary(baseOffset);
595
596  AppendPadding(baseOffset, CharUnits::One());
597
598  const ASTRecordLayout &baseASTLayout
599    = Types.getContext().getASTRecordLayout(base);
600
601  LastLaidOutBase.Offset = NextFieldOffset;
602  LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
603
604  llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
605  if (getTypeAlignment(subobjectType) > Alignment)
606    return false;
607
608  AppendField(baseOffset, subobjectType);
609  return true;
610}
611
612bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
613                                                 CharUnits baseOffset) {
614  // Ignore empty bases.
615  if (base->isEmpty()) return true;
616
617  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
618  if (IsZeroInitializableAsBase) {
619    assert(IsZeroInitializable &&
620           "class zero-initializable as base but not as complete object");
621
622    IsZeroInitializable = IsZeroInitializableAsBase =
623      baseLayout.isZeroInitializableAsBase();
624  }
625
626  if (!LayoutBase(base, baseLayout, baseOffset))
627    return false;
628  NonVirtualBases[base] = (FieldTypes.size() - 1);
629  return true;
630}
631
632bool
633CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
634                                         CharUnits baseOffset) {
635  // Ignore empty bases.
636  if (base->isEmpty()) return true;
637
638  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
639  if (IsZeroInitializable)
640    IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
641
642  if (!LayoutBase(base, baseLayout, baseOffset))
643    return false;
644  VirtualBases[base] = (FieldTypes.size() - 1);
645  return true;
646}
647
648bool
649CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
650                                          const ASTRecordLayout &Layout) {
651  if (!RD->getNumVBases())
652    return true;
653
654  // The vbases list is uniqued and ordered by a depth-first
655  // traversal, which is what we need here.
656  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
657        E = RD->vbases_end(); I != E; ++I) {
658
659    const CXXRecordDecl *BaseDecl =
660      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
661
662    CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
663    if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
664      return false;
665  }
666  return true;
667}
668
669/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
670bool
671CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
672                                          const ASTRecordLayout &Layout) {
673  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
674       E = RD->bases_end(); I != E; ++I) {
675    const CXXRecordDecl *BaseDecl =
676      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
677
678    // We only want to lay out virtual bases that aren't indirect primary bases
679    // of some other base.
680    if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
681      // Only lay out the base once.
682      if (!LaidOutVirtualBases.insert(BaseDecl))
683        continue;
684
685      CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
686      if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
687        return false;
688    }
689
690    if (!BaseDecl->getNumVBases()) {
691      // This base isn't interesting since it doesn't have any virtual bases.
692      continue;
693    }
694
695    if (!LayoutVirtualBases(BaseDecl, Layout))
696      return false;
697  }
698  return true;
699}
700
701bool
702CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
703                                             const ASTRecordLayout &Layout) {
704  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
705
706  // If we have a primary base, lay it out first.
707  if (PrimaryBase) {
708    if (!Layout.isPrimaryBaseVirtual()) {
709      if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
710        return false;
711    } else {
712      if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
713        return false;
714    }
715
716  // Otherwise, add a vtable / vf-table if the layout says to do so.
717  } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
718               ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
719               : RD->isDynamicClass()) {
720    llvm::Type *FunctionType =
721      llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
722                              /*isVarArg=*/true);
723    llvm::Type *VTableTy = FunctionType->getPointerTo();
724
725    assert(NextFieldOffset.isZero() &&
726           "VTable pointer must come first!");
727    AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
728  }
729
730  // Layout the non-virtual bases.
731  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
732       E = RD->bases_end(); I != E; ++I) {
733    if (I->isVirtual())
734      continue;
735
736    const CXXRecordDecl *BaseDecl =
737      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
738
739    // We've already laid out the primary base.
740    if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
741      continue;
742
743    if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
744      return false;
745  }
746
747  // Add a vb-table pointer if the layout insists.
748  if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
749    CharUnits VBPtrOffset = Layout.getVBPtrOffset();
750    llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
751    AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
752    AppendField(VBPtrOffset, Vbptr);
753  }
754
755  return true;
756}
757
758bool
759CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
760  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
761
762  CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
763  CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
764  CharUnits AlignedNonVirtualTypeSize =
765    NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
766
767  // First check if we can use the same fields as for the complete class.
768  CharUnits RecordSize = Layout.getSize();
769  if (AlignedNonVirtualTypeSize == RecordSize)
770    return true;
771
772  // Check if we need padding.
773  CharUnits AlignedNextFieldOffset =
774    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
775
776  if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
777    assert(!Packed && "cannot layout even as packed struct");
778    return false; // Needs packing.
779  }
780
781  bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
782  if (needsPadding) {
783    CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
784    FieldTypes.push_back(getByteArrayType(NumBytes));
785  }
786
787  BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
788                                               FieldTypes, "", Packed);
789  Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
790
791  // Pull the padding back off.
792  if (needsPadding)
793    FieldTypes.pop_back();
794
795  return true;
796}
797
798bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
799  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
800  assert(!Alignment.isZero() && "Did not set alignment!");
801
802  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
803
804  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
805  if (RD)
806    if (!LayoutNonVirtualBases(RD, Layout))
807      return false;
808
809  unsigned FieldNo = 0;
810  const FieldDecl *LastFD = 0;
811
812  for (RecordDecl::field_iterator Field = D->field_begin(),
813       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
814    if (IsMsStruct) {
815      // Zero-length bitfields following non-bitfield members are
816      // ignored:
817      const FieldDecl *FD =  (*Field);
818      if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
819        --FieldNo;
820        continue;
821      }
822      LastFD = FD;
823    }
824
825    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
826      assert(!Packed &&
827             "Could not layout fields even with a packed LLVM struct!");
828      return false;
829    }
830  }
831
832  if (RD) {
833    // We've laid out the non-virtual bases and the fields, now compute the
834    // non-virtual base field types.
835    if (!ComputeNonVirtualBaseType(RD)) {
836      assert(!Packed && "Could not layout even with a packed LLVM struct!");
837      return false;
838    }
839
840    // Lay out the virtual bases.  The MS ABI uses a different
841    // algorithm here due to the lack of primary virtual bases.
842    if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
843      RD->getIndirectPrimaryBases(IndirectPrimaryBases);
844      if (Layout.isPrimaryBaseVirtual())
845        IndirectPrimaryBases.insert(Layout.getPrimaryBase());
846
847      if (!LayoutVirtualBases(RD, Layout))
848        return false;
849    } else {
850      if (!MSLayoutVirtualBases(RD, Layout))
851        return false;
852    }
853  }
854
855  // Append tail padding if necessary.
856  AppendTailPadding(Layout.getSize());
857
858  return true;
859}
860
861void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
862  ResizeLastBaseFieldIfNecessary(RecordSize);
863
864  assert(NextFieldOffset <= RecordSize && "Size mismatch!");
865
866  CharUnits AlignedNextFieldOffset =
867    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
868
869  if (AlignedNextFieldOffset == RecordSize) {
870    // We don't need any padding.
871    return;
872  }
873
874  CharUnits NumPadBytes = RecordSize - NextFieldOffset;
875  AppendBytes(NumPadBytes);
876}
877
878void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
879                                        llvm::Type *fieldType) {
880  CharUnits fieldSize =
881    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
882
883  FieldTypes.push_back(fieldType);
884
885  NextFieldOffset = fieldOffset + fieldSize;
886  BitsAvailableInLastField = 0;
887}
888
889void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
890                                          CharUnits fieldAlignment) {
891  assert(NextFieldOffset <= fieldOffset &&
892         "Incorrect field layout!");
893
894  // Do nothing if we're already at the right offset.
895  if (fieldOffset == NextFieldOffset) return;
896
897  // If we're not emitting a packed LLVM type, try to avoid adding
898  // unnecessary padding fields.
899  if (!Packed) {
900    // Round up the field offset to the alignment of the field type.
901    CharUnits alignedNextFieldOffset =
902      NextFieldOffset.RoundUpToAlignment(fieldAlignment);
903    assert(alignedNextFieldOffset <= fieldOffset);
904
905    // If that's the right offset, we're done.
906    if (alignedNextFieldOffset == fieldOffset) return;
907  }
908
909  // Otherwise we need explicit padding.
910  CharUnits padding = fieldOffset - NextFieldOffset;
911  AppendBytes(padding);
912}
913
914bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
915  // Check if we have a base to resize.
916  if (!LastLaidOutBase.isValid())
917    return false;
918
919  // This offset does not overlap with the tail padding.
920  if (offset >= NextFieldOffset)
921    return false;
922
923  // Restore the field offset and append an i8 array instead.
924  FieldTypes.pop_back();
925  NextFieldOffset = LastLaidOutBase.Offset;
926  AppendBytes(LastLaidOutBase.NonVirtualSize);
927  LastLaidOutBase.invalidate();
928
929  return true;
930}
931
932llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
933  assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
934
935  llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
936  if (numBytes > CharUnits::One())
937    Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
938
939  return Ty;
940}
941
942void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
943  if (numBytes.isZero())
944    return;
945
946  // Append the padding field
947  AppendField(NextFieldOffset, getByteArrayType(numBytes));
948}
949
950CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
951  if (Packed)
952    return CharUnits::One();
953
954  return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
955}
956
957CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
958  if (Packed)
959    return CharUnits::One();
960
961  CharUnits maxAlignment = CharUnits::One();
962  for (size_t i = 0; i != FieldTypes.size(); ++i)
963    maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
964
965  return maxAlignment;
966}
967
968/// Merge in whether a field of the given type is zero-initializable.
969void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
970  // This record already contains a member pointer.
971  if (!IsZeroInitializableAsBase)
972    return;
973
974  // Can only have member pointers if we're compiling C++.
975  if (!Types.getContext().getLangOpts().CPlusPlus)
976    return;
977
978  const Type *elementType = T->getBaseElementTypeUnsafe();
979
980  if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
981    if (!Types.getCXXABI().isZeroInitializable(MPT))
982      IsZeroInitializable = IsZeroInitializableAsBase = false;
983  } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
984    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
985    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
986    if (!Layout.isZeroInitializable())
987      IsZeroInitializable = IsZeroInitializableAsBase = false;
988  }
989}
990
991CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
992                                                  llvm::StructType *Ty) {
993  CGRecordLayoutBuilder Builder(*this);
994
995  Builder.Layout(D);
996
997  Ty->setBody(Builder.FieldTypes, Builder.Packed);
998
999  // If we're in C++, compute the base subobject type.
1000  llvm::StructType *BaseTy = 0;
1001  if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
1002    BaseTy = Builder.BaseSubobjectType;
1003    if (!BaseTy) BaseTy = Ty;
1004  }
1005
1006  CGRecordLayout *RL =
1007    new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
1008                       Builder.IsZeroInitializableAsBase);
1009
1010  RL->NonVirtualBases.swap(Builder.NonVirtualBases);
1011  RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
1012
1013  // Add all the field numbers.
1014  RL->FieldInfo.swap(Builder.Fields);
1015
1016  // Add bitfield info.
1017  RL->BitFields.swap(Builder.BitFields);
1018
1019  // Dump the layout, if requested.
1020  if (getContext().getLangOpts().DumpRecordLayouts) {
1021    llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
1022    llvm::errs() << "Record: ";
1023    D->dump();
1024    llvm::errs() << "\nLayout: ";
1025    RL->dump();
1026  }
1027
1028#ifndef NDEBUG
1029  // Verify that the computed LLVM struct size matches the AST layout size.
1030  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
1031
1032  uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
1033  assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
1034         "Type size mismatch!");
1035
1036  if (BaseTy) {
1037    CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
1038    CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
1039    CharUnits AlignedNonVirtualTypeSize =
1040      NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
1041
1042    uint64_t AlignedNonVirtualTypeSizeInBits =
1043      getContext().toBits(AlignedNonVirtualTypeSize);
1044
1045    assert(AlignedNonVirtualTypeSizeInBits ==
1046           getTargetData().getTypeAllocSizeInBits(BaseTy) &&
1047           "Type size mismatch!");
1048  }
1049
1050  // Verify that the LLVM and AST field offsets agree.
1051  llvm::StructType *ST =
1052    dyn_cast<llvm::StructType>(RL->getLLVMType());
1053  const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
1054
1055  const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
1056  RecordDecl::field_iterator it = D->field_begin();
1057  const FieldDecl *LastFD = 0;
1058  bool IsMsStruct = D->hasAttr<MsStructAttr>();
1059  for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
1060    const FieldDecl *FD = *it;
1061
1062    // For non-bit-fields, just check that the LLVM struct offset matches the
1063    // AST offset.
1064    if (!FD->isBitField()) {
1065      unsigned FieldNo = RL->getLLVMFieldNo(FD);
1066      assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
1067             "Invalid field offset!");
1068      LastFD = FD;
1069      continue;
1070    }
1071
1072    if (IsMsStruct) {
1073      // Zero-length bitfields following non-bitfield members are
1074      // ignored:
1075      if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
1076        --i;
1077        continue;
1078      }
1079      LastFD = FD;
1080    }
1081
1082    // Ignore unnamed bit-fields.
1083    if (!FD->getDeclName()) {
1084      LastFD = FD;
1085      continue;
1086    }
1087
1088    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
1089    for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1090      const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1091
1092      // Verify that every component access is within the structure.
1093      uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
1094      uint64_t AccessBitOffset = FieldOffset +
1095        getContext().toBits(AI.FieldByteOffset);
1096      assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
1097             "Invalid bit-field access (out of range)!");
1098    }
1099  }
1100#endif
1101
1102  return RL;
1103}
1104
1105void CGRecordLayout::print(raw_ostream &OS) const {
1106  OS << "<CGRecordLayout\n";
1107  OS << "  LLVMType:" << *CompleteObjectType << "\n";
1108  if (BaseSubobjectType)
1109    OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1110  OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
1111  OS << "  BitFields:[\n";
1112
1113  // Print bit-field infos in declaration order.
1114  std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1115  for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1116         it = BitFields.begin(), ie = BitFields.end();
1117       it != ie; ++it) {
1118    const RecordDecl *RD = it->first->getParent();
1119    unsigned Index = 0;
1120    for (RecordDecl::field_iterator
1121           it2 = RD->field_begin(); *it2 != it->first; ++it2)
1122      ++Index;
1123    BFIs.push_back(std::make_pair(Index, &it->second));
1124  }
1125  llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1126  for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1127    OS.indent(4);
1128    BFIs[i].second->print(OS);
1129    OS << "\n";
1130  }
1131
1132  OS << "]>\n";
1133}
1134
1135void CGRecordLayout::dump() const {
1136  print(llvm::errs());
1137}
1138
1139void CGBitFieldInfo::print(raw_ostream &OS) const {
1140  OS << "<CGBitFieldInfo";
1141  OS << " Size:" << Size;
1142  OS << " IsSigned:" << IsSigned << "\n";
1143
1144  OS.indent(4 + strlen("<CGBitFieldInfo"));
1145  OS << " NumComponents:" << getNumComponents();
1146  OS << " Components: [";
1147  if (getNumComponents()) {
1148    OS << "\n";
1149    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
1150      const AccessInfo &AI = getComponent(i);
1151      OS.indent(8);
1152      OS << "<AccessInfo"
1153         << " FieldIndex:" << AI.FieldIndex
1154         << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
1155         << " FieldBitStart:" << AI.FieldBitStart
1156         << " AccessWidth:" << AI.AccessWidth << "\n";
1157      OS.indent(8 + strlen("<AccessInfo"));
1158      OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
1159         << " TargetBitOffset:" << AI.TargetBitOffset
1160         << " TargetBitWidth:" << AI.TargetBitWidth
1161         << ">\n";
1162    }
1163    OS.indent(4);
1164  }
1165  OS << "]>";
1166}
1167
1168void CGBitFieldInfo::dump() const {
1169  print(llvm::errs());
1170}
1171