1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Builder implementation for CGRecordLayout objects.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGRecordLayout.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/CXXInheritance.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/RecordLayout.h"
21#include "clang/Frontend/CodeGenOptions.h"
22#include "CodeGenTypes.h"
23#include "CGCXXABI.h"
24#include "llvm/DerivedTypes.h"
25#include "llvm/Type.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/Target/TargetData.h"
29using namespace clang;
30using namespace CodeGen;
31
32namespace {
33
34class CGRecordLayoutBuilder {
35public:
36  /// FieldTypes - Holds the LLVM types that the struct is created from.
37  ///
38  SmallVector<llvm::Type *, 16> FieldTypes;
39
40  /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
41  /// of the struct. For example, consider:
42  ///
43  /// struct A { int i; };
44  /// struct B { void *v; };
45  /// struct C : virtual A, B { };
46  ///
47  /// The LLVM type of C will be
48  /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
49  ///
50  /// And the LLVM type of the non-virtual base struct will be
51  /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
52  ///
53  /// This only gets initialized if the base subobject type is
54  /// different from the complete-object type.
55  llvm::StructType *BaseSubobjectType;
56
57  /// FieldInfo - Holds a field and its corresponding LLVM field number.
58  llvm::DenseMap<const FieldDecl *, unsigned> Fields;
59
60  /// BitFieldInfo - Holds location and size information about a bit field.
61  llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
62
63  llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
64  llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
65
66  /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
67  /// primary base classes for some other direct or indirect base class.
68  CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
69
70  /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
71  /// avoid laying out virtual bases more than once.
72  llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
73
74  /// IsZeroInitializable - Whether this struct can be C++
75  /// zero-initialized with an LLVM zeroinitializer.
76  bool IsZeroInitializable;
77  bool IsZeroInitializableAsBase;
78
79  /// Packed - Whether the resulting LLVM struct will be packed or not.
80  bool Packed;
81
82  /// IsMsStruct - Whether ms_struct is in effect or not
83  bool IsMsStruct;
84
85private:
86  CodeGenTypes &Types;
87
88  /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
89  /// last base laid out. Used so that we can replace the last laid out base
90  /// type with an i8 array if needed.
91  struct LastLaidOutBaseInfo {
92    CharUnits Offset;
93    CharUnits NonVirtualSize;
94
95    bool isValid() const { return !NonVirtualSize.isZero(); }
96    void invalidate() { NonVirtualSize = CharUnits::Zero(); }
97
98  } LastLaidOutBase;
99
100  /// Alignment - Contains the alignment of the RecordDecl.
101  CharUnits Alignment;
102
103  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
104  /// this will have the number of bits still available in the field.
105  char BitsAvailableInLastField;
106
107  /// NextFieldOffset - Holds the next field offset.
108  CharUnits NextFieldOffset;
109
110  /// LayoutUnionField - Will layout a field in an union and return the type
111  /// that the field will have.
112  llvm::Type *LayoutUnionField(const FieldDecl *Field,
113                               const ASTRecordLayout &Layout);
114
115  /// LayoutUnion - Will layout a union RecordDecl.
116  void LayoutUnion(const RecordDecl *D);
117
118  /// LayoutField - try to layout all fields in the record decl.
119  /// Returns false if the operation failed because the struct is not packed.
120  bool LayoutFields(const RecordDecl *D);
121
122  /// Layout a single base, virtual or non-virtual
123  bool LayoutBase(const CXXRecordDecl *base,
124                  const CGRecordLayout &baseLayout,
125                  CharUnits baseOffset);
126
127  /// LayoutVirtualBase - layout a single virtual base.
128  bool LayoutVirtualBase(const CXXRecordDecl *base,
129                         CharUnits baseOffset);
130
131  /// LayoutVirtualBases - layout the virtual bases of a record decl.
132  bool LayoutVirtualBases(const CXXRecordDecl *RD,
133                          const ASTRecordLayout &Layout);
134
135  /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
136  /// like MSVC.
137  bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
138                            const ASTRecordLayout &Layout);
139
140  /// LayoutNonVirtualBase - layout a single non-virtual base.
141  bool LayoutNonVirtualBase(const CXXRecordDecl *base,
142                            CharUnits baseOffset);
143
144  /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
145  bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
146                             const ASTRecordLayout &Layout);
147
148  /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
149  bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
150
151  /// LayoutField - layout a single field. Returns false if the operation failed
152  /// because the current struct is not packed.
153  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
154
155  /// LayoutBitField - layout a single bit field.
156  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
157
158  /// AppendField - Appends a field with the given offset and type.
159  void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
160
161  /// AppendPadding - Appends enough padding bytes so that the total
162  /// struct size is a multiple of the field alignment.
163  void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
164
165  /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
166  /// tail padding of a previous base. If this happens, the type of the previous
167  /// base needs to be changed to an array of i8. Returns true if the last
168  /// laid out base was resized.
169  bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
170
171  /// getByteArrayType - Returns a byte array type with the given number of
172  /// elements.
173  llvm::Type *getByteArrayType(CharUnits NumBytes);
174
175  /// AppendBytes - Append a given number of bytes to the record.
176  void AppendBytes(CharUnits numBytes);
177
178  /// AppendTailPadding - Append enough tail padding so that the type will have
179  /// the passed size.
180  void AppendTailPadding(CharUnits RecordSize);
181
182  CharUnits getTypeAlignment(llvm::Type *Ty) const;
183
184  /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
185  /// LLVM element types.
186  CharUnits getAlignmentAsLLVMStruct() const;
187
188  /// CheckZeroInitializable - Check if the given type contains a pointer
189  /// to data member.
190  void CheckZeroInitializable(QualType T);
191
192public:
193  CGRecordLayoutBuilder(CodeGenTypes &Types)
194    : BaseSubobjectType(0),
195      IsZeroInitializable(true), IsZeroInitializableAsBase(true),
196      Packed(false), IsMsStruct(false),
197      Types(Types), BitsAvailableInLastField(0) { }
198
199  /// Layout - Will layout a RecordDecl.
200  void Layout(const RecordDecl *D);
201};
202
203}
204
205void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
206  Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
207  Packed = D->hasAttr<PackedAttr>();
208
209  IsMsStruct = D->hasAttr<MsStructAttr>();
210
211  if (D->isUnion()) {
212    LayoutUnion(D);
213    return;
214  }
215
216  if (LayoutFields(D))
217    return;
218
219  // We weren't able to layout the struct. Try again with a packed struct
220  Packed = true;
221  LastLaidOutBase.invalidate();
222  NextFieldOffset = CharUnits::Zero();
223  FieldTypes.clear();
224  Fields.clear();
225  BitFields.clear();
226  NonVirtualBases.clear();
227  VirtualBases.clear();
228
229  LayoutFields(D);
230}
231
232CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
233                               const FieldDecl *FD,
234                               uint64_t FieldOffset,
235                               uint64_t FieldSize,
236                               uint64_t ContainingTypeSizeInBits,
237                               unsigned ContainingTypeAlign) {
238  assert(ContainingTypeAlign && "Expected alignment to be specified");
239
240  llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
241  CharUnits TypeSizeInBytes =
242    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
243  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
244
245  bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
246
247  if (FieldSize > TypeSizeInBits) {
248    // We have a wide bit-field. The extra bits are only used for padding, so
249    // if we have a bitfield of type T, with size N:
250    //
251    // T t : N;
252    //
253    // We can just assume that it's:
254    //
255    // T t : sizeof(T);
256    //
257    FieldSize = TypeSizeInBits;
258  }
259
260  // in big-endian machines the first fields are in higher bit positions,
261  // so revert the offset. The byte offsets are reversed(back) later.
262  if (Types.getTargetData().isBigEndian()) {
263    FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
264  }
265
266  // Compute the access components. The policy we use is to start by attempting
267  // to access using the width of the bit-field type itself and to always access
268  // at aligned indices of that type. If such an access would fail because it
269  // extends past the bound of the type, then we reduce size to the next smaller
270  // power of two and retry. The current algorithm assumes pow2 sized types,
271  // although this is easy to fix.
272  //
273  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
274  CGBitFieldInfo::AccessInfo Components[3];
275  unsigned NumComponents = 0;
276  unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
277  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
278
279  // If requested, widen the initial bit-field access to be register sized. The
280  // theory is that this is most likely to allow multiple accesses into the same
281  // structure to be coalesced, and that the backend should be smart enough to
282  // narrow the store if no coalescing is ever done.
283  //
284  // The subsequent code will handle align these access to common boundaries and
285  // guaranteeing that we do not access past the end of the structure.
286  if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
287    if (AccessWidth < Types.getTarget().getRegisterWidth())
288      AccessWidth = Types.getTarget().getRegisterWidth();
289  }
290
291  // Round down from the field offset to find the first access position that is
292  // at an aligned offset of the initial access type.
293  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
294
295  // Adjust initial access size to fit within record.
296  while (AccessWidth > Types.getTarget().getCharWidth() &&
297         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
298    AccessWidth >>= 1;
299    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
300  }
301
302  while (AccessedTargetBits < FieldSize) {
303    // Check that we can access using a type of this size, without reading off
304    // the end of the structure. This can occur with packed structures and
305    // -fno-bitfield-type-align, for example.
306    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
307      // If so, reduce access size to the next smaller power-of-two and retry.
308      AccessWidth >>= 1;
309      assert(AccessWidth >= Types.getTarget().getCharWidth()
310             && "Cannot access under byte size!");
311      continue;
312    }
313
314    // Otherwise, add an access component.
315
316    // First, compute the bits inside this access which are part of the
317    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
318    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
319    // in the target that we are reading.
320    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
321    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
322    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
323    uint64_t AccessBitsInFieldSize =
324      std::min(AccessWidth + AccessStart,
325               FieldOffset + FieldSize) - AccessBitsInFieldStart;
326
327    assert(NumComponents < 3 && "Unexpected number of components!");
328    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
329    AI.FieldIndex = 0;
330    // FIXME: We still follow the old access pattern of only using the field
331    // byte offset. We should switch this once we fix the struct layout to be
332    // pretty.
333
334    // on big-endian machines we reverted the bit offset because first fields are
335    // in higher bits. But this also reverts the bytes, so fix this here by reverting
336    // the byte offset on big-endian machines.
337    if (Types.getTargetData().isBigEndian()) {
338      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
339          ContainingTypeSizeInBits - AccessStart - AccessWidth);
340    } else {
341      AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
342    }
343    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
344    AI.AccessWidth = AccessWidth;
345    AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
346        llvm::MinAlign(ContainingTypeAlign, AccessStart));
347    AI.TargetBitOffset = AccessedTargetBits;
348    AI.TargetBitWidth = AccessBitsInFieldSize;
349
350    AccessStart += AccessWidth;
351    AccessedTargetBits += AI.TargetBitWidth;
352  }
353
354  assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
355  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
356}
357
358CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
359                                        const FieldDecl *FD,
360                                        uint64_t FieldOffset,
361                                        uint64_t FieldSize) {
362  const RecordDecl *RD = FD->getParent();
363  const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
364  uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
365  unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
366
367  return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
368                  ContainingTypeAlign);
369}
370
371void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
372                                           uint64_t fieldOffset) {
373  uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
374
375  if (fieldSize == 0)
376    return;
377
378  uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
379  CharUnits numBytesToAppend;
380  unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
381
382  if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
383    assert(fieldOffset % charAlign == 0 &&
384           "Field offset not aligned correctly");
385
386    CharUnits fieldOffsetInCharUnits =
387      Types.getContext().toCharUnitsFromBits(fieldOffset);
388
389    // Try to resize the last base field.
390    if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
391      nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
392  }
393
394  if (fieldOffset < nextFieldOffsetInBits) {
395    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
396    assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
397
398    // The bitfield begins in the previous bit-field.
399    numBytesToAppend = Types.getContext().toCharUnitsFromBits(
400      llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
401                               charAlign));
402  } else {
403    assert(fieldOffset % charAlign == 0 &&
404           "Field offset not aligned correctly");
405
406    // Append padding if necessary.
407    AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
408                  CharUnits::One());
409
410    numBytesToAppend = Types.getContext().toCharUnitsFromBits(
411        llvm::RoundUpToAlignment(fieldSize, charAlign));
412
413    assert(!numBytesToAppend.isZero() && "No bytes to append!");
414  }
415
416  // Add the bit field info.
417  BitFields.insert(std::make_pair(D,
418                   CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
419
420  AppendBytes(numBytesToAppend);
421
422  BitsAvailableInLastField =
423    Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
424}
425
426bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
427                                        uint64_t fieldOffset) {
428  // If the field is packed, then we need a packed struct.
429  if (!Packed && D->hasAttr<PackedAttr>())
430    return false;
431
432  if (D->isBitField()) {
433    // We must use packed structs for unnamed bit fields since they
434    // don't affect the struct alignment.
435    if (!Packed && !D->getDeclName())
436      return false;
437
438    LayoutBitField(D, fieldOffset);
439    return true;
440  }
441
442  CheckZeroInitializable(D->getType());
443
444  assert(fieldOffset % Types.getTarget().getCharWidth() == 0
445         && "field offset is not on a byte boundary!");
446  CharUnits fieldOffsetInBytes
447    = Types.getContext().toCharUnitsFromBits(fieldOffset);
448
449  llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
450  CharUnits typeAlignment = getTypeAlignment(Ty);
451
452  // If the type alignment is larger then the struct alignment, we must use
453  // a packed struct.
454  if (typeAlignment > Alignment) {
455    assert(!Packed && "Alignment is wrong even with packed struct!");
456    return false;
457  }
458
459  if (!Packed) {
460    if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
461      const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
462      if (const MaxFieldAlignmentAttr *MFAA =
463            RD->getAttr<MaxFieldAlignmentAttr>()) {
464        if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
465          return false;
466      }
467    }
468  }
469
470  // Round up the field offset to the alignment of the field type.
471  CharUnits alignedNextFieldOffsetInBytes =
472    NextFieldOffset.RoundUpToAlignment(typeAlignment);
473
474  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
475    // Try to resize the last base field.
476    if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
477      alignedNextFieldOffsetInBytes =
478        NextFieldOffset.RoundUpToAlignment(typeAlignment);
479    }
480  }
481
482  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
483    assert(!Packed && "Could not place field even with packed struct!");
484    return false;
485  }
486
487  AppendPadding(fieldOffsetInBytes, typeAlignment);
488
489  // Now append the field.
490  Fields[D] = FieldTypes.size();
491  AppendField(fieldOffsetInBytes, Ty);
492
493  LastLaidOutBase.invalidate();
494  return true;
495}
496
497llvm::Type *
498CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
499                                        const ASTRecordLayout &Layout) {
500  if (Field->isBitField()) {
501    uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
502
503    // Ignore zero sized bit fields.
504    if (FieldSize == 0)
505      return 0;
506
507    llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
508    CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
509      llvm::RoundUpToAlignment(FieldSize,
510                               Types.getContext().getTargetInfo().getCharAlign()));
511
512    if (NumBytesToAppend > CharUnits::One())
513      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
514
515    // Add the bit field info.
516    BitFields.insert(std::make_pair(Field,
517                         CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
518    return FieldTy;
519  }
520
521  // This is a regular union field.
522  Fields[Field] = 0;
523  return Types.ConvertTypeForMem(Field->getType());
524}
525
526void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
527  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
528
529  const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
530
531  llvm::Type *unionType = 0;
532  CharUnits unionSize = CharUnits::Zero();
533  CharUnits unionAlign = CharUnits::Zero();
534
535  bool hasOnlyZeroSizedBitFields = true;
536  bool checkedFirstFieldZeroInit = false;
537
538  unsigned fieldNo = 0;
539  for (RecordDecl::field_iterator field = D->field_begin(),
540       fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
541    assert(layout.getFieldOffset(fieldNo) == 0 &&
542          "Union field offset did not start at the beginning of record!");
543    llvm::Type *fieldType = LayoutUnionField(*field, layout);
544
545    if (!fieldType)
546      continue;
547
548    if (field->getDeclName() && !checkedFirstFieldZeroInit) {
549      CheckZeroInitializable(field->getType());
550      checkedFirstFieldZeroInit = true;
551    }
552
553    hasOnlyZeroSizedBitFields = false;
554
555    CharUnits fieldAlign = CharUnits::fromQuantity(
556                          Types.getTargetData().getABITypeAlignment(fieldType));
557    CharUnits fieldSize = CharUnits::fromQuantity(
558                             Types.getTargetData().getTypeAllocSize(fieldType));
559
560    if (fieldAlign < unionAlign)
561      continue;
562
563    if (fieldAlign > unionAlign || fieldSize > unionSize) {
564      unionType = fieldType;
565      unionAlign = fieldAlign;
566      unionSize = fieldSize;
567    }
568  }
569
570  // Now add our field.
571  if (unionType) {
572    AppendField(CharUnits::Zero(), unionType);
573
574    if (getTypeAlignment(unionType) > layout.getAlignment()) {
575      // We need a packed struct.
576      Packed = true;
577      unionAlign = CharUnits::One();
578    }
579  }
580  if (unionAlign.isZero()) {
581    (void)hasOnlyZeroSizedBitFields;
582    assert(hasOnlyZeroSizedBitFields &&
583           "0-align record did not have all zero-sized bit-fields!");
584    unionAlign = CharUnits::One();
585  }
586
587  // Append tail padding.
588  CharUnits recordSize = layout.getSize();
589  if (recordSize > unionSize)
590    AppendPadding(recordSize, unionAlign);
591}
592
593bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
594                                       const CGRecordLayout &baseLayout,
595                                       CharUnits baseOffset) {
596  ResizeLastBaseFieldIfNecessary(baseOffset);
597
598  AppendPadding(baseOffset, CharUnits::One());
599
600  const ASTRecordLayout &baseASTLayout
601    = Types.getContext().getASTRecordLayout(base);
602
603  LastLaidOutBase.Offset = NextFieldOffset;
604  LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
605
606  llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
607  if (getTypeAlignment(subobjectType) > Alignment)
608    return false;
609
610  AppendField(baseOffset, subobjectType);
611  return true;
612}
613
614bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
615                                                 CharUnits baseOffset) {
616  // Ignore empty bases.
617  if (base->isEmpty()) return true;
618
619  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
620  if (IsZeroInitializableAsBase) {
621    assert(IsZeroInitializable &&
622           "class zero-initializable as base but not as complete object");
623
624    IsZeroInitializable = IsZeroInitializableAsBase =
625      baseLayout.isZeroInitializableAsBase();
626  }
627
628  if (!LayoutBase(base, baseLayout, baseOffset))
629    return false;
630  NonVirtualBases[base] = (FieldTypes.size() - 1);
631  return true;
632}
633
634bool
635CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
636                                         CharUnits baseOffset) {
637  // Ignore empty bases.
638  if (base->isEmpty()) return true;
639
640  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
641  if (IsZeroInitializable)
642    IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
643
644  if (!LayoutBase(base, baseLayout, baseOffset))
645    return false;
646  VirtualBases[base] = (FieldTypes.size() - 1);
647  return true;
648}
649
650bool
651CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
652                                          const ASTRecordLayout &Layout) {
653  if (!RD->getNumVBases())
654    return true;
655
656  // The vbases list is uniqued and ordered by a depth-first
657  // traversal, which is what we need here.
658  for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
659        E = RD->vbases_end(); I != E; ++I) {
660
661    const CXXRecordDecl *BaseDecl =
662      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
663
664    CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
665    if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
666      return false;
667  }
668  return true;
669}
670
671/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
672bool
673CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
674                                          const ASTRecordLayout &Layout) {
675  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
676       E = RD->bases_end(); I != E; ++I) {
677    const CXXRecordDecl *BaseDecl =
678      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
679
680    // We only want to lay out virtual bases that aren't indirect primary bases
681    // of some other base.
682    if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
683      // Only lay out the base once.
684      if (!LaidOutVirtualBases.insert(BaseDecl))
685        continue;
686
687      CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
688      if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
689        return false;
690    }
691
692    if (!BaseDecl->getNumVBases()) {
693      // This base isn't interesting since it doesn't have any virtual bases.
694      continue;
695    }
696
697    if (!LayoutVirtualBases(BaseDecl, Layout))
698      return false;
699  }
700  return true;
701}
702
703bool
704CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
705                                             const ASTRecordLayout &Layout) {
706  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
707
708  // If we have a primary base, lay it out first.
709  if (PrimaryBase) {
710    if (!Layout.isPrimaryBaseVirtual()) {
711      if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
712        return false;
713    } else {
714      if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
715        return false;
716    }
717
718  // Otherwise, add a vtable / vf-table if the layout says to do so.
719  } else if (Layout.hasOwnVFPtr()) {
720    llvm::Type *FunctionType =
721      llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
722                              /*isVarArg=*/true);
723    llvm::Type *VTableTy = FunctionType->getPointerTo();
724
725    if (getTypeAlignment(VTableTy) > Alignment) {
726      // FIXME: Should we allow this to happen in Sema?
727      assert(!Packed && "Alignment is wrong even with packed struct!");
728      return false;
729    }
730
731    assert(NextFieldOffset.isZero() &&
732           "VTable pointer must come first!");
733    AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
734  }
735
736  // Layout the non-virtual bases.
737  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
738       E = RD->bases_end(); I != E; ++I) {
739    if (I->isVirtual())
740      continue;
741
742    const CXXRecordDecl *BaseDecl =
743      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
744
745    // We've already laid out the primary base.
746    if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
747      continue;
748
749    if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
750      return false;
751  }
752
753  // Add a vb-table pointer if the layout insists.
754  if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
755    CharUnits VBPtrOffset = Layout.getVBPtrOffset();
756    llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
757    AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
758    AppendField(VBPtrOffset, Vbptr);
759  }
760
761  return true;
762}
763
764bool
765CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
766  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
767
768  CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
769  CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
770  CharUnits AlignedNonVirtualTypeSize =
771    NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
772
773  // First check if we can use the same fields as for the complete class.
774  CharUnits RecordSize = Layout.getSize();
775  if (AlignedNonVirtualTypeSize == RecordSize)
776    return true;
777
778  // Check if we need padding.
779  CharUnits AlignedNextFieldOffset =
780    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
781
782  if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
783    assert(!Packed && "cannot layout even as packed struct");
784    return false; // Needs packing.
785  }
786
787  bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
788  if (needsPadding) {
789    CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
790    FieldTypes.push_back(getByteArrayType(NumBytes));
791  }
792
793  BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
794                                               FieldTypes, "", Packed);
795  Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
796
797  // Pull the padding back off.
798  if (needsPadding)
799    FieldTypes.pop_back();
800
801  return true;
802}
803
804bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
805  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
806  assert(!Alignment.isZero() && "Did not set alignment!");
807
808  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
809
810  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
811  if (RD)
812    if (!LayoutNonVirtualBases(RD, Layout))
813      return false;
814
815  unsigned FieldNo = 0;
816  const FieldDecl *LastFD = 0;
817
818  for (RecordDecl::field_iterator Field = D->field_begin(),
819       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
820    if (IsMsStruct) {
821      // Zero-length bitfields following non-bitfield members are
822      // ignored:
823      const FieldDecl *FD = *Field;
824      if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
825        --FieldNo;
826        continue;
827      }
828      LastFD = FD;
829    }
830
831    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
832      assert(!Packed &&
833             "Could not layout fields even with a packed LLVM struct!");
834      return false;
835    }
836  }
837
838  if (RD) {
839    // We've laid out the non-virtual bases and the fields, now compute the
840    // non-virtual base field types.
841    if (!ComputeNonVirtualBaseType(RD)) {
842      assert(!Packed && "Could not layout even with a packed LLVM struct!");
843      return false;
844    }
845
846    // Lay out the virtual bases.  The MS ABI uses a different
847    // algorithm here due to the lack of primary virtual bases.
848    if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
849      RD->getIndirectPrimaryBases(IndirectPrimaryBases);
850      if (Layout.isPrimaryBaseVirtual())
851        IndirectPrimaryBases.insert(Layout.getPrimaryBase());
852
853      if (!LayoutVirtualBases(RD, Layout))
854        return false;
855    } else {
856      if (!MSLayoutVirtualBases(RD, Layout))
857        return false;
858    }
859  }
860
861  // Append tail padding if necessary.
862  AppendTailPadding(Layout.getSize());
863
864  return true;
865}
866
867void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
868  ResizeLastBaseFieldIfNecessary(RecordSize);
869
870  assert(NextFieldOffset <= RecordSize && "Size mismatch!");
871
872  CharUnits AlignedNextFieldOffset =
873    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
874
875  if (AlignedNextFieldOffset == RecordSize) {
876    // We don't need any padding.
877    return;
878  }
879
880  CharUnits NumPadBytes = RecordSize - NextFieldOffset;
881  AppendBytes(NumPadBytes);
882}
883
884void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
885                                        llvm::Type *fieldType) {
886  CharUnits fieldSize =
887    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
888
889  FieldTypes.push_back(fieldType);
890
891  NextFieldOffset = fieldOffset + fieldSize;
892  BitsAvailableInLastField = 0;
893}
894
895void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
896                                          CharUnits fieldAlignment) {
897  assert(NextFieldOffset <= fieldOffset &&
898         "Incorrect field layout!");
899
900  // Do nothing if we're already at the right offset.
901  if (fieldOffset == NextFieldOffset) return;
902
903  // If we're not emitting a packed LLVM type, try to avoid adding
904  // unnecessary padding fields.
905  if (!Packed) {
906    // Round up the field offset to the alignment of the field type.
907    CharUnits alignedNextFieldOffset =
908      NextFieldOffset.RoundUpToAlignment(fieldAlignment);
909    assert(alignedNextFieldOffset <= fieldOffset);
910
911    // If that's the right offset, we're done.
912    if (alignedNextFieldOffset == fieldOffset) return;
913  }
914
915  // Otherwise we need explicit padding.
916  CharUnits padding = fieldOffset - NextFieldOffset;
917  AppendBytes(padding);
918}
919
920bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
921  // Check if we have a base to resize.
922  if (!LastLaidOutBase.isValid())
923    return false;
924
925  // This offset does not overlap with the tail padding.
926  if (offset >= NextFieldOffset)
927    return false;
928
929  // Restore the field offset and append an i8 array instead.
930  FieldTypes.pop_back();
931  NextFieldOffset = LastLaidOutBase.Offset;
932  AppendBytes(LastLaidOutBase.NonVirtualSize);
933  LastLaidOutBase.invalidate();
934
935  return true;
936}
937
938llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
939  assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
940
941  llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
942  if (numBytes > CharUnits::One())
943    Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
944
945  return Ty;
946}
947
948void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
949  if (numBytes.isZero())
950    return;
951
952  // Append the padding field
953  AppendField(NextFieldOffset, getByteArrayType(numBytes));
954}
955
956CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
957  if (Packed)
958    return CharUnits::One();
959
960  return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
961}
962
963CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
964  if (Packed)
965    return CharUnits::One();
966
967  CharUnits maxAlignment = CharUnits::One();
968  for (size_t i = 0; i != FieldTypes.size(); ++i)
969    maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
970
971  return maxAlignment;
972}
973
974/// Merge in whether a field of the given type is zero-initializable.
975void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
976  // This record already contains a member pointer.
977  if (!IsZeroInitializableAsBase)
978    return;
979
980  // Can only have member pointers if we're compiling C++.
981  if (!Types.getContext().getLangOpts().CPlusPlus)
982    return;
983
984  const Type *elementType = T->getBaseElementTypeUnsafe();
985
986  if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
987    if (!Types.getCXXABI().isZeroInitializable(MPT))
988      IsZeroInitializable = IsZeroInitializableAsBase = false;
989  } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
990    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
991    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
992    if (!Layout.isZeroInitializable())
993      IsZeroInitializable = IsZeroInitializableAsBase = false;
994  }
995}
996
997CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
998                                                  llvm::StructType *Ty) {
999  CGRecordLayoutBuilder Builder(*this);
1000
1001  Builder.Layout(D);
1002
1003  Ty->setBody(Builder.FieldTypes, Builder.Packed);
1004
1005  // If we're in C++, compute the base subobject type.
1006  llvm::StructType *BaseTy = 0;
1007  if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
1008    BaseTy = Builder.BaseSubobjectType;
1009    if (!BaseTy) BaseTy = Ty;
1010  }
1011
1012  CGRecordLayout *RL =
1013    new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
1014                       Builder.IsZeroInitializableAsBase);
1015
1016  RL->NonVirtualBases.swap(Builder.NonVirtualBases);
1017  RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
1018
1019  // Add all the field numbers.
1020  RL->FieldInfo.swap(Builder.Fields);
1021
1022  // Add bitfield info.
1023  RL->BitFields.swap(Builder.BitFields);
1024
1025  // Dump the layout, if requested.
1026  if (getContext().getLangOpts().DumpRecordLayouts) {
1027    llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
1028    llvm::errs() << "Record: ";
1029    D->dump();
1030    llvm::errs() << "\nLayout: ";
1031    RL->dump();
1032  }
1033
1034#ifndef NDEBUG
1035  // Verify that the computed LLVM struct size matches the AST layout size.
1036  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
1037
1038  uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
1039  assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
1040         "Type size mismatch!");
1041
1042  if (BaseTy) {
1043    CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
1044    CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
1045    CharUnits AlignedNonVirtualTypeSize =
1046      NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
1047
1048    uint64_t AlignedNonVirtualTypeSizeInBits =
1049      getContext().toBits(AlignedNonVirtualTypeSize);
1050
1051    assert(AlignedNonVirtualTypeSizeInBits ==
1052           getTargetData().getTypeAllocSizeInBits(BaseTy) &&
1053           "Type size mismatch!");
1054  }
1055
1056  // Verify that the LLVM and AST field offsets agree.
1057  llvm::StructType *ST =
1058    dyn_cast<llvm::StructType>(RL->getLLVMType());
1059  const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
1060
1061  const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
1062  RecordDecl::field_iterator it = D->field_begin();
1063  const FieldDecl *LastFD = 0;
1064  bool IsMsStruct = D->hasAttr<MsStructAttr>();
1065  for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
1066    const FieldDecl *FD = *it;
1067
1068    // For non-bit-fields, just check that the LLVM struct offset matches the
1069    // AST offset.
1070    if (!FD->isBitField()) {
1071      unsigned FieldNo = RL->getLLVMFieldNo(FD);
1072      assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
1073             "Invalid field offset!");
1074      LastFD = FD;
1075      continue;
1076    }
1077
1078    if (IsMsStruct) {
1079      // Zero-length bitfields following non-bitfield members are
1080      // ignored:
1081      if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
1082        --i;
1083        continue;
1084      }
1085      LastFD = FD;
1086    }
1087
1088    // Ignore unnamed bit-fields.
1089    if (!FD->getDeclName()) {
1090      LastFD = FD;
1091      continue;
1092    }
1093
1094    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
1095    for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1096      const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1097
1098      // Verify that every component access is within the structure.
1099      uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
1100      uint64_t AccessBitOffset = FieldOffset +
1101        getContext().toBits(AI.FieldByteOffset);
1102      assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
1103             "Invalid bit-field access (out of range)!");
1104    }
1105  }
1106#endif
1107
1108  return RL;
1109}
1110
1111void CGRecordLayout::print(raw_ostream &OS) const {
1112  OS << "<CGRecordLayout\n";
1113  OS << "  LLVMType:" << *CompleteObjectType << "\n";
1114  if (BaseSubobjectType)
1115    OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1116  OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
1117  OS << "  BitFields:[\n";
1118
1119  // Print bit-field infos in declaration order.
1120  std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1121  for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1122         it = BitFields.begin(), ie = BitFields.end();
1123       it != ie; ++it) {
1124    const RecordDecl *RD = it->first->getParent();
1125    unsigned Index = 0;
1126    for (RecordDecl::field_iterator
1127           it2 = RD->field_begin(); *it2 != it->first; ++it2)
1128      ++Index;
1129    BFIs.push_back(std::make_pair(Index, &it->second));
1130  }
1131  llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1132  for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1133    OS.indent(4);
1134    BFIs[i].second->print(OS);
1135    OS << "\n";
1136  }
1137
1138  OS << "]>\n";
1139}
1140
1141void CGRecordLayout::dump() const {
1142  print(llvm::errs());
1143}
1144
1145void CGBitFieldInfo::print(raw_ostream &OS) const {
1146  OS << "<CGBitFieldInfo";
1147  OS << " Size:" << Size;
1148  OS << " IsSigned:" << IsSigned << "\n";
1149
1150  OS.indent(4 + strlen("<CGBitFieldInfo"));
1151  OS << " NumComponents:" << getNumComponents();
1152  OS << " Components: [";
1153  if (getNumComponents()) {
1154    OS << "\n";
1155    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
1156      const AccessInfo &AI = getComponent(i);
1157      OS.indent(8);
1158      OS << "<AccessInfo"
1159         << " FieldIndex:" << AI.FieldIndex
1160         << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
1161         << " FieldBitStart:" << AI.FieldBitStart
1162         << " AccessWidth:" << AI.AccessWidth << "\n";
1163      OS.indent(8 + strlen("<AccessInfo"));
1164      OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
1165         << " TargetBitOffset:" << AI.TargetBitOffset
1166         << " TargetBitWidth:" << AI.TargetBitWidth
1167         << ">\n";
1168    }
1169    OS.indent(4);
1170  }
1171  OS << "]>";
1172}
1173
1174void CGBitFieldInfo::dump() const {
1175  print(llvm::errs());
1176}
1177