CGRecordLayoutBuilder.cpp revision 3d155e683a74d3783362ef1865be91544eb8a9fc
1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Builder implementation for CGRecordLayout objects.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGRecordLayout.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/Expr.h"
19#include "clang/AST/RecordLayout.h"
20#include "CodeGenTypes.h"
21#include "CGCXXABI.h"
22#include "llvm/DerivedTypes.h"
23#include "llvm/Type.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/raw_ostream.h"
26#include "llvm/Target/TargetData.h"
27using namespace clang;
28using namespace CodeGen;
29
30namespace clang {
31namespace CodeGen {
32
33class CGRecordLayoutBuilder {
34public:
35  /// FieldTypes - Holds the LLVM types that the struct is created from.
36  std::vector<const llvm::Type *> FieldTypes;
37
38  /// NonVirtualBaseFieldTypes - Holds the LLVM types for the non-virtual part
39  /// of the struct. For example, consider:
40  ///
41  /// struct A { int i; };
42  /// struct B { void *v; };
43  /// struct C : virtual A, B { };
44  ///
45  /// The LLVM type of C will be
46  /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
47  ///
48  /// And the LLVM type of the non-virtual base struct will be
49  /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
50  std::vector<const llvm::Type *> NonVirtualBaseFieldTypes;
51
52  /// NonVirtualBaseTypeIsSameAsCompleteType - Whether the non-virtual part of
53  /// the struct is equivalent to the complete struct.
54  bool NonVirtualBaseTypeIsSameAsCompleteType;
55
56  /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
57  typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
58  llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
59
60  /// LLVMBitFieldInfo - Holds location and size information about a bit field.
61  typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
62  llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
63
64  typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
65  llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
66
67  /// IsZeroInitializable - Whether this struct can be C++
68  /// zero-initialized with an LLVM zeroinitializer.
69  bool IsZeroInitializable;
70
71  /// Packed - Whether the resulting LLVM struct will be packed or not.
72  bool Packed;
73
74private:
75  CodeGenTypes &Types;
76
77  /// Alignment - Contains the alignment of the RecordDecl.
78  //
79  // FIXME: This is not needed and should be removed.
80  unsigned Alignment;
81
82  /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
83  /// LLVM types.
84  unsigned AlignmentAsLLVMStruct;
85
86  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
87  /// this will have the number of bits still available in the field.
88  char BitsAvailableInLastField;
89
90  /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
91  uint64_t NextFieldOffsetInBytes;
92
93  /// LayoutUnionField - Will layout a field in an union and return the type
94  /// that the field will have.
95  const llvm::Type *LayoutUnionField(const FieldDecl *Field,
96                                     const ASTRecordLayout &Layout);
97
98  /// LayoutUnion - Will layout a union RecordDecl.
99  void LayoutUnion(const RecordDecl *D);
100
101  /// LayoutField - try to layout all fields in the record decl.
102  /// Returns false if the operation failed because the struct is not packed.
103  bool LayoutFields(const RecordDecl *D);
104
105  /// LayoutNonVirtualBase - layout a single non-virtual base.
106  void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
107                            uint64_t BaseOffset);
108
109  /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
110  void LayoutNonVirtualBases(const CXXRecordDecl *RD,
111                             const ASTRecordLayout &Layout);
112
113  /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
114  void ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
115
116  /// LayoutField - layout a single field. Returns false if the operation failed
117  /// because the current struct is not packed.
118  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
119
120  /// LayoutBitField - layout a single bit field.
121  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
122
123  /// AppendField - Appends a field with the given offset and type.
124  void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
125
126  /// AppendPadding - Appends enough padding bytes so that the total
127  /// struct size is a multiple of the field alignment.
128  void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
129
130  /// getByteArrayType - Returns a byte array type with the given number of
131  /// elements.
132  const llvm::Type *getByteArrayType(uint64_t NumBytes);
133
134  /// AppendBytes - Append a given number of bytes to the record.
135  void AppendBytes(uint64_t NumBytes);
136
137  /// AppendTailPadding - Append enough tail padding so that the type will have
138  /// the passed size.
139  void AppendTailPadding(uint64_t RecordSize);
140
141  unsigned getTypeAlignment(const llvm::Type *Ty) const;
142
143  /// CheckZeroInitializable - Check if the given type contains a pointer
144  /// to data member.
145  void CheckZeroInitializable(QualType T);
146  void CheckZeroInitializable(const CXXRecordDecl *RD);
147
148public:
149  CGRecordLayoutBuilder(CodeGenTypes &Types)
150    : NonVirtualBaseTypeIsSameAsCompleteType(false), IsZeroInitializable(true),
151      Packed(false), Types(Types), Alignment(0), AlignmentAsLLVMStruct(1),
152      BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
153
154  /// Layout - Will layout a RecordDecl.
155  void Layout(const RecordDecl *D);
156};
157
158}
159}
160
161void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
162  Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
163  Packed = D->hasAttr<PackedAttr>();
164
165  if (D->isUnion()) {
166    LayoutUnion(D);
167    return;
168  }
169
170  if (LayoutFields(D))
171    return;
172
173  // We weren't able to layout the struct. Try again with a packed struct
174  Packed = true;
175  AlignmentAsLLVMStruct = 1;
176  NextFieldOffsetInBytes = 0;
177  FieldTypes.clear();
178  LLVMFields.clear();
179  LLVMBitFields.clear();
180  LLVMNonVirtualBases.clear();
181
182  LayoutFields(D);
183}
184
185CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
186                               const FieldDecl *FD,
187                               uint64_t FieldOffset,
188                               uint64_t FieldSize,
189                               uint64_t ContainingTypeSizeInBits,
190                               unsigned ContainingTypeAlign) {
191  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
192  uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
193  uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
194
195  bool IsSigned = FD->getType()->isSignedIntegerType();
196
197  if (FieldSize > TypeSizeInBits) {
198    // We have a wide bit-field. The extra bits are only used for padding, so
199    // if we have a bitfield of type T, with size N:
200    //
201    // T t : N;
202    //
203    // We can just assume that it's:
204    //
205    // T t : sizeof(T);
206    //
207    FieldSize = TypeSizeInBits;
208  }
209
210  // Compute the access components. The policy we use is to start by attempting
211  // to access using the width of the bit-field type itself and to always access
212  // at aligned indices of that type. If such an access would fail because it
213  // extends past the bound of the type, then we reduce size to the next smaller
214  // power of two and retry. The current algorithm assumes pow2 sized types,
215  // although this is easy to fix.
216  //
217  // FIXME: This algorithm is wrong on big-endian systems, I think.
218  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
219  CGBitFieldInfo::AccessInfo Components[3];
220  unsigned NumComponents = 0;
221  unsigned AccessedTargetBits = 0;       // The tumber of target bits accessed.
222  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
223
224  // Round down from the field offset to find the first access position that is
225  // at an aligned offset of the initial access type.
226  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
227
228  // Adjust initial access size to fit within record.
229  while (AccessWidth > 8 &&
230         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
231    AccessWidth >>= 1;
232    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
233  }
234
235  while (AccessedTargetBits < FieldSize) {
236    // Check that we can access using a type of this size, without reading off
237    // the end of the structure. This can occur with packed structures and
238    // -fno-bitfield-type-align, for example.
239    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
240      // If so, reduce access size to the next smaller power-of-two and retry.
241      AccessWidth >>= 1;
242      assert(AccessWidth >= 8 && "Cannot access under byte size!");
243      continue;
244    }
245
246    // Otherwise, add an access component.
247
248    // First, compute the bits inside this access which are part of the
249    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
250    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
251    // in the target that we are reading.
252    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
253    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
254    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
255    uint64_t AccessBitsInFieldSize =
256      std::min(AccessWidth + AccessStart,
257               FieldOffset + FieldSize) - AccessBitsInFieldStart;
258
259    assert(NumComponents < 3 && "Unexpected number of components!");
260    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
261    AI.FieldIndex = 0;
262    // FIXME: We still follow the old access pattern of only using the field
263    // byte offset. We should switch this once we fix the struct layout to be
264    // pretty.
265    AI.FieldByteOffset = AccessStart / 8;
266    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
267    AI.AccessWidth = AccessWidth;
268    AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
269    AI.TargetBitOffset = AccessedTargetBits;
270    AI.TargetBitWidth = AccessBitsInFieldSize;
271
272    AccessStart += AccessWidth;
273    AccessedTargetBits += AI.TargetBitWidth;
274  }
275
276  assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
277  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
278}
279
280CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
281                                        const FieldDecl *FD,
282                                        uint64_t FieldOffset,
283                                        uint64_t FieldSize) {
284  const RecordDecl *RD = FD->getParent();
285  const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
286  uint64_t ContainingTypeSizeInBits = RL.getSize();
287  unsigned ContainingTypeAlign = RL.getAlignment();
288
289  return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
290                  ContainingTypeAlign);
291}
292
293void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
294                                           uint64_t FieldOffset) {
295  uint64_t FieldSize =
296    D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
297
298  if (FieldSize == 0)
299    return;
300
301  uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
302  unsigned NumBytesToAppend;
303
304  if (FieldOffset < NextFieldOffset) {
305    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
306    assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
307
308    // The bitfield begins in the previous bit-field.
309    NumBytesToAppend =
310      llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
311  } else {
312    assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
313
314    // Append padding if necessary.
315    AppendBytes((FieldOffset - NextFieldOffset) / 8);
316
317    NumBytesToAppend =
318      llvm::RoundUpToAlignment(FieldSize, 8) / 8;
319
320    assert(NumBytesToAppend && "No bytes to append!");
321  }
322
323  // Add the bit field info.
324  LLVMBitFields.push_back(
325    LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
326                                                 FieldSize)));
327
328  AppendBytes(NumBytesToAppend);
329
330  BitsAvailableInLastField =
331    NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
332}
333
334bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
335                                        uint64_t FieldOffset) {
336  // If the field is packed, then we need a packed struct.
337  if (!Packed && D->hasAttr<PackedAttr>())
338    return false;
339
340  if (D->isBitField()) {
341    // We must use packed structs for unnamed bit fields since they
342    // don't affect the struct alignment.
343    if (!Packed && !D->getDeclName())
344      return false;
345
346    LayoutBitField(D, FieldOffset);
347    return true;
348  }
349
350  CheckZeroInitializable(D->getType());
351
352  assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
353  uint64_t FieldOffsetInBytes = FieldOffset / 8;
354
355  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
356  unsigned TypeAlignment = getTypeAlignment(Ty);
357
358  // If the type alignment is larger then the struct alignment, we must use
359  // a packed struct.
360  if (TypeAlignment > Alignment) {
361    assert(!Packed && "Alignment is wrong even with packed struct!");
362    return false;
363  }
364
365  if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
366    const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
367    if (const MaxFieldAlignmentAttr *MFAA =
368          RD->getAttr<MaxFieldAlignmentAttr>()) {
369      if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
370        return false;
371    }
372  }
373
374  // Round up the field offset to the alignment of the field type.
375  uint64_t AlignedNextFieldOffsetInBytes =
376    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
377
378  if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
379    assert(!Packed && "Could not place field even with packed struct!");
380    return false;
381  }
382
383  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
384    // Even with alignment, the field offset is not at the right place,
385    // insert padding.
386    uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
387
388    AppendBytes(PaddingInBytes);
389  }
390
391  // Now append the field.
392  LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
393  AppendField(FieldOffsetInBytes, Ty);
394
395  return true;
396}
397
398const llvm::Type *
399CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
400                                        const ASTRecordLayout &Layout) {
401  if (Field->isBitField()) {
402    uint64_t FieldSize =
403      Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
404
405    // Ignore zero sized bit fields.
406    if (FieldSize == 0)
407      return 0;
408
409    const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
410    unsigned NumBytesToAppend =
411      llvm::RoundUpToAlignment(FieldSize, 8) / 8;
412
413    if (NumBytesToAppend > 1)
414      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
415
416    // Add the bit field info.
417    LLVMBitFields.push_back(
418      LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
419                                                       0, FieldSize)));
420    return FieldTy;
421  }
422
423  // This is a regular union field.
424  LLVMFields.push_back(LLVMFieldInfo(Field, 0));
425  return Types.ConvertTypeForMemRecursive(Field->getType());
426}
427
428void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
429  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
430
431  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
432
433  const llvm::Type *Ty = 0;
434  uint64_t Size = 0;
435  unsigned Align = 0;
436
437  bool HasOnlyZeroSizedBitFields = true;
438
439  unsigned FieldNo = 0;
440  for (RecordDecl::field_iterator Field = D->field_begin(),
441       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
442    assert(Layout.getFieldOffset(FieldNo) == 0 &&
443          "Union field offset did not start at the beginning of record!");
444    const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
445
446    if (!FieldTy)
447      continue;
448
449    HasOnlyZeroSizedBitFields = false;
450
451    unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
452    uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
453
454    if (FieldAlign < Align)
455      continue;
456
457    if (FieldAlign > Align || FieldSize > Size) {
458      Ty = FieldTy;
459      Align = FieldAlign;
460      Size = FieldSize;
461    }
462  }
463
464  // Now add our field.
465  if (Ty) {
466    AppendField(0, Ty);
467
468    if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
469      // We need a packed struct.
470      Packed = true;
471      Align = 1;
472    }
473  }
474  if (!Align) {
475    assert(HasOnlyZeroSizedBitFields &&
476           "0-align record did not have all zero-sized bit-fields!");
477    Align = 1;
478  }
479
480  // Append tail padding.
481  if (Layout.getSize() / 8 > Size)
482    AppendPadding(Layout.getSize() / 8, Align);
483}
484
485void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
486                                                 uint64_t BaseOffset) {
487  const ASTRecordLayout &Layout =
488    Types.getContext().getASTRecordLayout(BaseDecl);
489
490  uint64_t NonVirtualSize = Layout.getNonVirtualSize();
491
492  if (BaseDecl->isEmpty()) {
493    // FIXME: Lay out empty bases.
494    return;
495  }
496
497  CheckZeroInitializable(BaseDecl);
498
499  // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
500  AppendPadding(BaseOffset / 8, 1);
501
502  // Append the base field.
503  LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
504
505  AppendBytes(NonVirtualSize / 8);
506}
507
508void
509CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
510                                             const ASTRecordLayout &Layout) {
511  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
512
513  // Check if we need to add a vtable pointer.
514  if (RD->isDynamicClass()) {
515    if (!PrimaryBase) {
516      const llvm::Type *FunctionType =
517        llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
518                                /*isVarArg=*/true);
519      const llvm::Type *VTableTy = FunctionType->getPointerTo();
520
521      assert(NextFieldOffsetInBytes == 0 &&
522             "VTable pointer must come first!");
523      AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
524    } else {
525      // FIXME: Handle a virtual primary base.
526      if (!Layout.getPrimaryBaseWasVirtual())
527        LayoutNonVirtualBase(PrimaryBase, 0);
528    }
529  }
530
531  // Layout the non-virtual bases.
532  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
533       E = RD->bases_end(); I != E; ++I) {
534    if (I->isVirtual())
535      continue;
536
537    const CXXRecordDecl *BaseDecl =
538      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
539
540    // We've already laid out the primary base.
541    if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
542      continue;
543
544    LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl));
545  }
546}
547
548void
549CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
550  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
551
552  uint64_t AlignedNonVirtualTypeSize =
553    llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
554                             Layout.getNonVirtualAlign()) / 8;
555
556
557  // First check if we can use the same fields as for the complete class.
558  if (AlignedNonVirtualTypeSize == Layout.getSize() / 8) {
559    NonVirtualBaseTypeIsSameAsCompleteType = true;
560    return;
561  }
562
563  NonVirtualBaseFieldTypes = FieldTypes;
564
565  // Check if we need padding.
566  uint64_t AlignedNextFieldOffset =
567    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
568
569  assert(AlignedNextFieldOffset <= AlignedNonVirtualTypeSize &&
570         "Size mismatch!");
571
572  if (AlignedNonVirtualTypeSize == AlignedNextFieldOffset) {
573    // We don't need any padding.
574    return;
575  }
576
577  uint64_t NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
578  NonVirtualBaseFieldTypes.push_back(getByteArrayType(NumBytes));
579
580  printf("nvts: %llu, aligned nfo: %llu\n",
581         AlignedNonVirtualTypeSize, AlignedNextFieldOffset);
582}
583
584bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
585  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
586  assert(Alignment && "Did not set alignment!");
587
588  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
589
590  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
591  if (RD)
592    LayoutNonVirtualBases(RD, Layout);
593
594  unsigned FieldNo = 0;
595
596  for (RecordDecl::field_iterator Field = D->field_begin(),
597       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
598    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
599      assert(!Packed &&
600             "Could not layout fields even with a packed LLVM struct!");
601      return false;
602    }
603  }
604
605  // We've laid out the non-virtual bases and the fields, now compute the
606  // non-virtual base field types.
607  if (RD)
608    ComputeNonVirtualBaseType(RD);
609
610  // FIXME: Lay out the virtual bases instead of just treating them as tail
611  // padding.
612
613  // Append tail padding if necessary.
614  AppendTailPadding(Layout.getSize());
615
616  return true;
617}
618
619void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
620  assert(RecordSize % 8 == 0 && "Invalid record size!");
621
622  uint64_t RecordSizeInBytes = RecordSize / 8;
623  assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
624
625  uint64_t AlignedNextFieldOffset =
626    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
627
628  if (AlignedNextFieldOffset == RecordSizeInBytes) {
629    // We don't need any padding.
630    return;
631  }
632
633  unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
634  AppendBytes(NumPadBytes);
635}
636
637void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
638                                        const llvm::Type *FieldTy) {
639  AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
640                                   getTypeAlignment(FieldTy));
641
642  uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
643
644  FieldTypes.push_back(FieldTy);
645
646  NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
647  BitsAvailableInLastField = 0;
648}
649
650void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
651                                          unsigned FieldAlignment) {
652  assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
653         "Incorrect field layout!");
654
655  // Round up the field offset to the alignment of the field type.
656  uint64_t AlignedNextFieldOffsetInBytes =
657    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
658
659  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
660    // Even with alignment, the field offset is not at the right place,
661    // insert padding.
662    uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
663
664    AppendBytes(PaddingInBytes);
665  }
666}
667
668const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(uint64_t NumBytes) {
669  assert(NumBytes != 0 && "Empty byte array's aren't allowed.");
670
671  const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
672  if (NumBytes > 1)
673    Ty = llvm::ArrayType::get(Ty, NumBytes);
674
675  return Ty;
676}
677
678void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
679  if (NumBytes == 0)
680    return;
681
682  // Append the padding field
683  AppendField(NextFieldOffsetInBytes, getByteArrayType(NumBytes));
684}
685
686unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
687  if (Packed)
688    return 1;
689
690  return Types.getTargetData().getABITypeAlignment(Ty);
691}
692
693void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
694  // This record already contains a member pointer.
695  if (!IsZeroInitializable)
696    return;
697
698  // Can only have member pointers if we're compiling C++.
699  if (!Types.getContext().getLangOptions().CPlusPlus)
700    return;
701
702  T = Types.getContext().getBaseElementType(T);
703
704  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
705    if (!Types.getCXXABI().isZeroInitializable(MPT))
706      IsZeroInitializable = false;
707  } else if (const RecordType *RT = T->getAs<RecordType>()) {
708    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
709    CheckZeroInitializable(RD);
710  }
711}
712
713void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
714  // This record already contains a member pointer.
715  if (!IsZeroInitializable)
716    return;
717
718  // FIXME: It would be better if there was a way to explicitly compute the
719  // record layout instead of converting to a type.
720  Types.ConvertTagDeclType(RD);
721
722  const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
723
724  if (!Layout.isZeroInitializable())
725    IsZeroInitializable = false;
726}
727
728CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
729  CGRecordLayoutBuilder Builder(*this);
730
731  Builder.Layout(D);
732
733  const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
734                                               Builder.FieldTypes,
735                                               Builder.Packed);
736
737  const llvm::Type *BaseTy = 0;
738  if (isa<CXXRecordDecl>(D)) {
739    if (Builder.NonVirtualBaseTypeIsSameAsCompleteType)
740      BaseTy = Ty;
741    else if (!Builder.NonVirtualBaseFieldTypes.empty())
742      BaseTy = llvm::StructType::get(getLLVMContext(),
743                                     Builder.NonVirtualBaseFieldTypes,
744                                     Builder.Packed);
745  }
746
747  CGRecordLayout *RL =
748    new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable);
749
750  // Add all the non-virtual base field numbers.
751  RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
752                                  Builder.LLVMNonVirtualBases.end());
753
754  // Add all the field numbers.
755  RL->FieldInfo.insert(Builder.LLVMFields.begin(),
756                       Builder.LLVMFields.end());
757
758  // Add bitfield info.
759  RL->BitFields.insert(Builder.LLVMBitFields.begin(),
760                       Builder.LLVMBitFields.end());
761
762  // Dump the layout, if requested.
763  if (getContext().getLangOptions().DumpRecordLayouts) {
764    llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
765    llvm::errs() << "Record: ";
766    D->dump();
767    llvm::errs() << "\nLayout: ";
768    RL->dump();
769  }
770
771#ifndef NDEBUG
772  // Verify that the computed LLVM struct size matches the AST layout size.
773  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
774
775  uint64_t TypeSizeInBits = Layout.getSize();
776  assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
777         "Type size mismatch!");
778
779  if (BaseTy) {
780    uint64_t AlignedNonVirtualTypeSizeInBits =
781      llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
782                               Layout.getNonVirtualAlign());
783
784    assert(AlignedNonVirtualTypeSizeInBits ==
785           getTargetData().getTypeAllocSizeInBits(BaseTy) &&
786           "Type size mismatch!");
787  }
788
789  // Verify that the LLVM and AST field offsets agree.
790  const llvm::StructType *ST =
791    dyn_cast<llvm::StructType>(RL->getLLVMType());
792  const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
793
794  const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
795  RecordDecl::field_iterator it = D->field_begin();
796  for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
797    const FieldDecl *FD = *it;
798
799    // For non-bit-fields, just check that the LLVM struct offset matches the
800    // AST offset.
801    if (!FD->isBitField()) {
802      unsigned FieldNo = RL->getLLVMFieldNo(FD);
803      assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
804             "Invalid field offset!");
805      continue;
806    }
807
808    // Ignore unnamed bit-fields.
809    if (!FD->getDeclName())
810      continue;
811
812    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
813    for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
814      const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
815
816      // Verify that every component access is within the structure.
817      uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
818      uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
819      assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
820             "Invalid bit-field access (out of range)!");
821    }
822  }
823#endif
824
825  return RL;
826}
827
828void CGRecordLayout::print(llvm::raw_ostream &OS) const {
829  OS << "<CGRecordLayout\n";
830  OS << "  LLVMType:" << *LLVMType << "\n";
831  if (BaseLLVMType)
832    OS << "  BaseLLVMType:" << *BaseLLVMType << "\n";
833  OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
834  OS << "  BitFields:[\n";
835
836  // Print bit-field infos in declaration order.
837  std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
838  for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
839         it = BitFields.begin(), ie = BitFields.end();
840       it != ie; ++it) {
841    const RecordDecl *RD = it->first->getParent();
842    unsigned Index = 0;
843    for (RecordDecl::field_iterator
844           it2 = RD->field_begin(); *it2 != it->first; ++it2)
845      ++Index;
846    BFIs.push_back(std::make_pair(Index, &it->second));
847  }
848  llvm::array_pod_sort(BFIs.begin(), BFIs.end());
849  for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
850    OS.indent(4);
851    BFIs[i].second->print(OS);
852    OS << "\n";
853  }
854
855  OS << "]>\n";
856}
857
858void CGRecordLayout::dump() const {
859  print(llvm::errs());
860}
861
862void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
863  OS << "<CGBitFieldInfo";
864  OS << " Size:" << Size;
865  OS << " IsSigned:" << IsSigned << "\n";
866
867  OS.indent(4 + strlen("<CGBitFieldInfo"));
868  OS << " NumComponents:" << getNumComponents();
869  OS << " Components: [";
870  if (getNumComponents()) {
871    OS << "\n";
872    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
873      const AccessInfo &AI = getComponent(i);
874      OS.indent(8);
875      OS << "<AccessInfo"
876         << " FieldIndex:" << AI.FieldIndex
877         << " FieldByteOffset:" << AI.FieldByteOffset
878         << " FieldBitStart:" << AI.FieldBitStart
879         << " AccessWidth:" << AI.AccessWidth << "\n";
880      OS.indent(8 + strlen("<AccessInfo"));
881      OS << " AccessAlignment:" << AI.AccessAlignment
882         << " TargetBitOffset:" << AI.TargetBitOffset
883         << " TargetBitWidth:" << AI.TargetBitWidth
884         << ">\n";
885    }
886    OS.indent(4);
887  }
888  OS << "]>";
889}
890
891void CGBitFieldInfo::dump() const {
892  print(llvm::errs());
893}
894