CGExprConstant.cpp revision 106ca049e9a5b10ba80df1b60c0708b2491b7e18
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGRecordLayout.h"
19#include "clang/AST/APValue.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/AST/StmtVisitor.h"
23#include "clang/Basic/Builtins.h"
24#include "llvm/Constants.h"
25#include "llvm/Function.h"
26#include "llvm/GlobalVariable.h"
27#include "llvm/Target/TargetData.h"
28using namespace clang;
29using namespace CodeGen;
30
31//===----------------------------------------------------------------------===//
32//                            ConstStructBuilder
33//===----------------------------------------------------------------------===//
34
35namespace {
36class ConstStructBuilder {
37  CodeGenModule &CGM;
38  CodeGenFunction *CGF;
39
40  bool Packed;
41  unsigned NextFieldOffsetInBytes;
42  unsigned LLVMStructAlignment;
43  std::vector<llvm::Constant *> Elements;
44public:
45  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
46                                     InitListExpr *ILE);
47
48private:
49  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
50    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
51    LLVMStructAlignment(1) { }
52
53  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
54                   llvm::Constant *InitExpr);
55
56  void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
57                      llvm::ConstantInt *InitExpr);
58
59  void AppendPadding(uint64_t NumBytes);
60
61  void AppendTailPadding(CharUnits RecordSize);
62
63  void ConvertStructToPacked();
64
65  bool Build(InitListExpr *ILE);
66
67  unsigned getAlignment(const llvm::Constant *C) const {
68    if (Packed)  return 1;
69    return CGM.getTargetData().getABITypeAlignment(C->getType());
70  }
71
72  uint64_t getSizeInBytes(const llvm::Constant *C) const {
73    return CGM.getTargetData().getTypeAllocSize(C->getType());
74  }
75};
76
77bool ConstStructBuilder::
78AppendField(const FieldDecl *Field, uint64_t FieldOffset,
79            llvm::Constant *InitCst) {
80  uint64_t FieldOffsetInBytes = FieldOffset / 8;
81
82  assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
83         && "Field offset mismatch!");
84
85  unsigned FieldAlignment = getAlignment(InitCst);
86
87  // Round up the field offset to the alignment of the field type.
88  uint64_t AlignedNextFieldOffsetInBytes =
89    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
90
91  if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
92    assert(!Packed && "Alignment is wrong even with a packed struct!");
93
94    // Convert the struct to a packed struct.
95    ConvertStructToPacked();
96
97    AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
98  }
99
100  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
101    // We need to append padding.
102    AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
103
104    assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
105           "Did not add enough padding!");
106
107    AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
108  }
109
110  // Add the field.
111  Elements.push_back(InitCst);
112  NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
113                             getSizeInBytes(InitCst);
114
115  if (Packed)
116    assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
117  else
118    LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
119
120  return true;
121}
122
123void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
124                                        uint64_t FieldOffset,
125                                        llvm::ConstantInt *CI) {
126  if (FieldOffset > NextFieldOffsetInBytes * 8) {
127    // We need to add padding.
128    uint64_t NumBytes =
129      llvm::RoundUpToAlignment(FieldOffset -
130                               NextFieldOffsetInBytes * 8, 8) / 8;
131
132    AppendPadding(NumBytes);
133  }
134
135  uint64_t FieldSize =
136    Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
137
138  llvm::APInt FieldValue = CI->getValue();
139
140  // Promote the size of FieldValue if necessary
141  // FIXME: This should never occur, but currently it can because initializer
142  // constants are cast to bool, and because clang is not enforcing bitfield
143  // width limits.
144  if (FieldSize > FieldValue.getBitWidth())
145    FieldValue = FieldValue.zext(FieldSize);
146
147  // Truncate the size of FieldValue to the bit field size.
148  if (FieldSize < FieldValue.getBitWidth())
149    FieldValue = FieldValue.trunc(FieldSize);
150
151  if (FieldOffset < NextFieldOffsetInBytes * 8) {
152    // Either part of the field or the entire field can go into the previous
153    // byte.
154    assert(!Elements.empty() && "Elements can't be empty!");
155
156    unsigned BitsInPreviousByte =
157      NextFieldOffsetInBytes * 8 - FieldOffset;
158
159    bool FitsCompletelyInPreviousByte =
160      BitsInPreviousByte >= FieldValue.getBitWidth();
161
162    llvm::APInt Tmp = FieldValue;
163
164    if (!FitsCompletelyInPreviousByte) {
165      unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
166
167      if (CGM.getTargetData().isBigEndian()) {
168        Tmp = Tmp.lshr(NewFieldWidth);
169        Tmp = Tmp.trunc(BitsInPreviousByte);
170
171        // We want the remaining high bits.
172        FieldValue = FieldValue.trunc(NewFieldWidth);
173      } else {
174        Tmp = Tmp.trunc(BitsInPreviousByte);
175
176        // We want the remaining low bits.
177        FieldValue = FieldValue.lshr(BitsInPreviousByte);
178        FieldValue = FieldValue.trunc(NewFieldWidth);
179      }
180    }
181
182    Tmp = Tmp.zext(8);
183    if (CGM.getTargetData().isBigEndian()) {
184      if (FitsCompletelyInPreviousByte)
185        Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
186    } else {
187      Tmp = Tmp.shl(8 - BitsInPreviousByte);
188    }
189
190    // 'or' in the bits that go into the previous byte.
191    llvm::Value *LastElt = Elements.back();
192    if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
193      Tmp |= Val->getValue();
194    else {
195      assert(isa<llvm::UndefValue>(LastElt));
196      // If there is an undef field that we're adding to, it can either be a
197      // scalar undef (in which case, we just replace it with our field) or it
198      // is an array.  If it is an array, we have to pull one byte off the
199      // array so that the other undef bytes stay around.
200      if (!isa<llvm::IntegerType>(LastElt->getType())) {
201        // The undef padding will be a multibyte array, create a new smaller
202        // padding and then an hole for our i8 to get plopped into.
203        assert(isa<llvm::ArrayType>(LastElt->getType()) &&
204               "Expected array padding of undefs");
205        const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
206        assert(AT->getElementType()->isIntegerTy(8) &&
207               AT->getNumElements() != 0 &&
208               "Expected non-empty array padding of undefs");
209
210        // Remove the padding array.
211        NextFieldOffsetInBytes -= AT->getNumElements();
212        Elements.pop_back();
213
214        // Add the padding back in two chunks.
215        AppendPadding(AT->getNumElements()-1);
216        AppendPadding(1);
217        assert(isa<llvm::UndefValue>(Elements.back()) &&
218               Elements.back()->getType()->isIntegerTy(8) &&
219               "Padding addition didn't work right");
220      }
221    }
222
223    Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
224
225    if (FitsCompletelyInPreviousByte)
226      return;
227  }
228
229  while (FieldValue.getBitWidth() > 8) {
230    llvm::APInt Tmp;
231
232    if (CGM.getTargetData().isBigEndian()) {
233      // We want the high bits.
234      Tmp = FieldValue.lshr(FieldValue.getBitWidth() - 8).trunc(8);
235    } else {
236      // We want the low bits.
237      Tmp = FieldValue.trunc(8);
238
239      FieldValue = FieldValue.lshr(8);
240    }
241
242    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
243    NextFieldOffsetInBytes++;
244
245    FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - 8);
246  }
247
248  assert(FieldValue.getBitWidth() > 0 &&
249         "Should have at least one bit left!");
250  assert(FieldValue.getBitWidth() <= 8 &&
251         "Should not have more than a byte left!");
252
253  if (FieldValue.getBitWidth() < 8) {
254    if (CGM.getTargetData().isBigEndian()) {
255      unsigned BitWidth = FieldValue.getBitWidth();
256
257      FieldValue = FieldValue.zext(8) << (8 - BitWidth);
258    } else
259      FieldValue = FieldValue.zext(8);
260  }
261
262  // Append the last element.
263  Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
264                                            FieldValue));
265  NextFieldOffsetInBytes++;
266}
267
268void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
269  if (!NumBytes)
270    return;
271
272  const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
273  if (NumBytes > 1)
274    Ty = llvm::ArrayType::get(Ty, NumBytes);
275
276  llvm::Constant *C = llvm::UndefValue::get(Ty);
277  Elements.push_back(C);
278  assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
279
280  NextFieldOffsetInBytes += getSizeInBytes(C);
281}
282
283void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
284  assert(NextFieldOffsetInBytes <= RecordSize.getQuantity() &&
285         "Size mismatch!");
286
287  unsigned NumPadBytes = RecordSize.getQuantity() - NextFieldOffsetInBytes;
288  AppendPadding(NumPadBytes);
289}
290
291void ConstStructBuilder::ConvertStructToPacked() {
292  std::vector<llvm::Constant *> PackedElements;
293  uint64_t ElementOffsetInBytes = 0;
294
295  for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
296    llvm::Constant *C = Elements[i];
297
298    unsigned ElementAlign =
299      CGM.getTargetData().getABITypeAlignment(C->getType());
300    uint64_t AlignedElementOffsetInBytes =
301      llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
302
303    if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
304      // We need some padding.
305      uint64_t NumBytes =
306        AlignedElementOffsetInBytes - ElementOffsetInBytes;
307
308      const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
309      if (NumBytes > 1)
310        Ty = llvm::ArrayType::get(Ty, NumBytes);
311
312      llvm::Constant *Padding = llvm::UndefValue::get(Ty);
313      PackedElements.push_back(Padding);
314      ElementOffsetInBytes += getSizeInBytes(Padding);
315    }
316
317    PackedElements.push_back(C);
318    ElementOffsetInBytes += getSizeInBytes(C);
319  }
320
321  assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
322         "Packing the struct changed its size!");
323
324  Elements = PackedElements;
325  LLVMStructAlignment = 1;
326  Packed = true;
327}
328
329bool ConstStructBuilder::Build(InitListExpr *ILE) {
330  RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
331  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
332
333  unsigned FieldNo = 0;
334  unsigned ElementNo = 0;
335  for (RecordDecl::field_iterator Field = RD->field_begin(),
336       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
337
338    // If this is a union, skip all the fields that aren't being initialized.
339    if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
340      continue;
341
342    // Don't emit anonymous bitfields, they just affect layout.
343    if (Field->isBitField() && !Field->getIdentifier())
344      continue;
345
346    // Get the initializer.  A struct can include fields without initializers,
347    // we just use explicit null values for them.
348    llvm::Constant *EltInit;
349    if (ElementNo < ILE->getNumInits())
350      EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
351                                     Field->getType(), CGF);
352    else
353      EltInit = CGM.EmitNullConstant(Field->getType());
354
355    if (!EltInit)
356      return false;
357
358    if (!Field->isBitField()) {
359      // Handle non-bitfield members.
360      if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
361        return false;
362    } else {
363      // Otherwise we have a bitfield.
364      AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
365                     cast<llvm::ConstantInt>(EltInit));
366    }
367  }
368
369  uint64_t LayoutSizeInBytes = Layout.getSize().getQuantity();
370
371  if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
372    // If the struct is bigger than the size of the record type,
373    // we must have a flexible array member at the end.
374    assert(RD->hasFlexibleArrayMember() &&
375           "Must have flexible array member if struct is bigger than type!");
376
377    // No tail padding is necessary.
378    return true;
379  }
380
381  uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
382                                                      LLVMStructAlignment);
383
384  // Check if we need to convert the struct to a packed struct.
385  if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
386      LLVMSizeInBytes > LayoutSizeInBytes) {
387    assert(!Packed && "Size mismatch!");
388
389    ConvertStructToPacked();
390    assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
391           "Converting to packed did not help!");
392  }
393
394  // Append tail padding if necessary.
395  AppendTailPadding(Layout.getSize());
396
397  assert(Layout.getSize().getQuantity() == NextFieldOffsetInBytes &&
398         "Tail padding mismatch!");
399
400  return true;
401}
402
403llvm::Constant *ConstStructBuilder::
404  BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
405  ConstStructBuilder Builder(CGM, CGF);
406
407  if (!Builder.Build(ILE))
408    return 0;
409
410  llvm::Constant *Result =
411  llvm::ConstantStruct::get(CGM.getLLVMContext(),
412                            Builder.Elements, Builder.Packed);
413
414  assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
415                                  Builder.getAlignment(Result)) ==
416         Builder.getSizeInBytes(Result) && "Size mismatch!");
417
418  return Result;
419}
420
421
422//===----------------------------------------------------------------------===//
423//                             ConstExprEmitter
424//===----------------------------------------------------------------------===//
425
426class ConstExprEmitter :
427  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
428  CodeGenModule &CGM;
429  CodeGenFunction *CGF;
430  llvm::LLVMContext &VMContext;
431public:
432  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
433    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
434  }
435
436  //===--------------------------------------------------------------------===//
437  //                            Visitor Methods
438  //===--------------------------------------------------------------------===//
439
440  llvm::Constant *VisitStmt(Stmt *S) {
441    return 0;
442  }
443
444  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
445    return Visit(PE->getSubExpr());
446  }
447
448  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
449    return Visit(E->getInitializer());
450  }
451
452  llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
453    if (E->getType()->isMemberPointerType())
454      return CGM.getMemberPointerConstant(E);
455
456    return 0;
457  }
458
459  llvm::Constant *VisitBinSub(BinaryOperator *E) {
460    // This must be a pointer/pointer subtraction.  This only happens for
461    // address of label.
462    if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
463       !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
464      return 0;
465
466    llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
467                                               E->getLHS()->getType(), CGF);
468    llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
469                                               E->getRHS()->getType(), CGF);
470
471    const llvm::Type *ResultType = ConvertType(E->getType());
472    LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
473    RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
474
475    // No need to divide by element size, since addr of label is always void*,
476    // which has size 1 in GNUish.
477    return llvm::ConstantExpr::getSub(LHS, RHS);
478  }
479
480  llvm::Constant *VisitCastExpr(CastExpr* E) {
481    switch (E->getCastKind()) {
482    case CK_ToUnion: {
483      // GCC cast to union extension
484      assert(E->getType()->isUnionType() &&
485             "Destination type is not union type!");
486      const llvm::Type *Ty = ConvertType(E->getType());
487      Expr *SubExpr = E->getSubExpr();
488
489      llvm::Constant *C =
490        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
491      if (!C)
492        return 0;
493
494      // Build a struct with the union sub-element as the first member,
495      // and padded to the appropriate size
496      std::vector<llvm::Constant*> Elts;
497      std::vector<const llvm::Type*> Types;
498      Elts.push_back(C);
499      Types.push_back(C->getType());
500      unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
501      unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
502
503      assert(CurSize <= TotalSize && "Union size mismatch!");
504      if (unsigned NumPadBytes = TotalSize - CurSize) {
505        const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
506        if (NumPadBytes > 1)
507          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
508
509        Elts.push_back(llvm::UndefValue::get(Ty));
510        Types.push_back(Ty);
511      }
512
513      llvm::StructType* STy =
514        llvm::StructType::get(C->getType()->getContext(), Types, false);
515      return llvm::ConstantStruct::get(STy, Elts);
516    }
517    case CK_NullToMemberPointer: {
518      const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
519      return CGM.getCXXABI().EmitNullMemberPointer(MPT);
520    }
521
522    case CK_BaseToDerivedMemberPointer: {
523      Expr *SubExpr = E->getSubExpr();
524      llvm::Constant *C =
525        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
526      if (!C) return 0;
527
528      return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
529    }
530
531    case CK_BitCast:
532      // This must be a member function pointer cast.
533      return Visit(E->getSubExpr());
534
535    default: {
536      // FIXME: This should be handled by the CK_NoOp cast kind.
537      // Explicit and implicit no-op casts
538      QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
539      if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
540        return Visit(E->getSubExpr());
541
542      // Handle integer->integer casts for address-of-label differences.
543      if (Ty->isIntegerType() && SubTy->isIntegerType() &&
544          CGF) {
545        llvm::Value *Src = Visit(E->getSubExpr());
546        if (Src == 0) return 0;
547
548        // Use EmitScalarConversion to perform the conversion.
549        return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
550      }
551
552      return 0;
553    }
554    }
555  }
556
557  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
558    return Visit(DAE->getExpr());
559  }
560
561  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
562    unsigned NumInitElements = ILE->getNumInits();
563    if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
564        (isa<StringLiteral>(ILE->getInit(0)) ||
565         isa<ObjCEncodeExpr>(ILE->getInit(0))))
566      return Visit(ILE->getInit(0));
567
568    std::vector<llvm::Constant*> Elts;
569    const llvm::ArrayType *AType =
570        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
571    const llvm::Type *ElemTy = AType->getElementType();
572    unsigned NumElements = AType->getNumElements();
573
574    // Initialising an array requires us to automatically
575    // initialise any elements that have not been initialised explicitly
576    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
577
578    // Copy initializer elements.
579    unsigned i = 0;
580    bool RewriteType = false;
581    for (; i < NumInitableElts; ++i) {
582      Expr *Init = ILE->getInit(i);
583      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
584      if (!C)
585        return 0;
586      RewriteType |= (C->getType() != ElemTy);
587      Elts.push_back(C);
588    }
589
590    // Initialize remaining array elements.
591    // FIXME: This doesn't handle member pointers correctly!
592    for (; i < NumElements; ++i)
593      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
594
595    if (RewriteType) {
596      // FIXME: Try to avoid packing the array
597      std::vector<const llvm::Type*> Types;
598      for (unsigned i = 0; i < Elts.size(); ++i)
599        Types.push_back(Elts[i]->getType());
600      const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
601                                                            Types, true);
602      return llvm::ConstantStruct::get(SType, Elts);
603    }
604
605    return llvm::ConstantArray::get(AType, Elts);
606  }
607
608  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
609    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
610  }
611
612  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
613    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
614  }
615
616  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
617    return CGM.EmitNullConstant(E->getType());
618  }
619
620  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
621    if (ILE->getType()->isScalarType()) {
622      // We have a scalar in braces. Just use the first element.
623      if (ILE->getNumInits() > 0) {
624        Expr *Init = ILE->getInit(0);
625        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
626      }
627      return CGM.EmitNullConstant(ILE->getType());
628    }
629
630    if (ILE->getType()->isArrayType())
631      return EmitArrayInitialization(ILE);
632
633    if (ILE->getType()->isRecordType())
634      return EmitStructInitialization(ILE);
635
636    if (ILE->getType()->isUnionType())
637      return EmitUnionInitialization(ILE);
638
639    // If ILE was a constant vector, we would have handled it already.
640    if (ILE->getType()->isVectorType())
641      return 0;
642
643    assert(0 && "Unable to handle InitListExpr");
644    // Get rid of control reaches end of void function warning.
645    // Not reached.
646    return 0;
647  }
648
649  llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
650    if (!E->getConstructor()->isTrivial())
651      return 0;
652
653    QualType Ty = E->getType();
654
655    // FIXME: We should not have to call getBaseElementType here.
656    const RecordType *RT =
657      CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
658    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
659
660    // If the class doesn't have a trivial destructor, we can't emit it as a
661    // constant expr.
662    if (!RD->hasTrivialDestructor())
663      return 0;
664
665    // Only copy and default constructors can be trivial.
666
667
668    if (E->getNumArgs()) {
669      assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
670      assert(E->getConstructor()->isCopyConstructor() &&
671             "trivial ctor has argument but isn't a copy ctor");
672
673      Expr *Arg = E->getArg(0);
674      assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
675             "argument to copy ctor is of wrong type");
676
677      return Visit(Arg);
678    }
679
680    return CGM.EmitNullConstant(Ty);
681  }
682
683  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
684    assert(!E->getType()->isPointerType() && "Strings are always arrays");
685
686    // This must be a string initializing an array in a static initializer.
687    // Don't emit it as the address of the string, emit the string data itself
688    // as an inline array.
689    return llvm::ConstantArray::get(VMContext,
690                                    CGM.GetStringForStringLiteral(E), false);
691  }
692
693  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
694    // This must be an @encode initializing an array in a static initializer.
695    // Don't emit it as the address of the string, emit the string data itself
696    // as an inline array.
697    std::string Str;
698    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
699    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
700
701    // Resize the string to the right size, adding zeros at the end, or
702    // truncating as needed.
703    Str.resize(CAT->getSize().getZExtValue(), '\0');
704    return llvm::ConstantArray::get(VMContext, Str, false);
705  }
706
707  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
708    return Visit(E->getSubExpr());
709  }
710
711  // Utility methods
712  const llvm::Type *ConvertType(QualType T) {
713    return CGM.getTypes().ConvertType(T);
714  }
715
716public:
717  llvm::Constant *EmitLValue(Expr *E) {
718    switch (E->getStmtClass()) {
719    default: break;
720    case Expr::CompoundLiteralExprClass: {
721      // Note that due to the nature of compound literals, this is guaranteed
722      // to be the only use of the variable, so we just generate it here.
723      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
724      llvm::Constant* C = Visit(CLE->getInitializer());
725      // FIXME: "Leaked" on failure.
726      if (C)
727        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
728                                     E->getType().isConstant(CGM.getContext()),
729                                     llvm::GlobalValue::InternalLinkage,
730                                     C, ".compoundliteral", 0, false,
731                                     E->getType().getAddressSpace());
732      return C;
733    }
734    case Expr::DeclRefExprClass: {
735      ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
736      if (Decl->hasAttr<WeakRefAttr>())
737        return CGM.GetWeakRefReference(Decl);
738      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
739        return CGM.GetAddrOfFunction(FD);
740      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
741        // We can never refer to a variable with local storage.
742        if (!VD->hasLocalStorage()) {
743          if (VD->isFileVarDecl() || VD->hasExternalStorage())
744            return CGM.GetAddrOfGlobalVar(VD);
745          else if (VD->isLocalVarDecl()) {
746            assert(CGF && "Can't access static local vars without CGF");
747            return CGF->GetAddrOfStaticLocalVar(VD);
748          }
749        }
750      }
751      break;
752    }
753    case Expr::StringLiteralClass:
754      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
755    case Expr::ObjCEncodeExprClass:
756      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
757    case Expr::ObjCStringLiteralClass: {
758      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
759      llvm::Constant *C =
760          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
761      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
762    }
763    case Expr::PredefinedExprClass: {
764      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
765      if (CGF) {
766        LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
767        return cast<llvm::Constant>(Res.getAddress());
768      } else if (Type == PredefinedExpr::PrettyFunction) {
769        return CGM.GetAddrOfConstantCString("top level", ".tmp");
770      }
771
772      return CGM.GetAddrOfConstantCString("", ".tmp");
773    }
774    case Expr::AddrLabelExprClass: {
775      assert(CGF && "Invalid address of label expression outside function.");
776      llvm::Constant *Ptr =
777        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
778      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
779    }
780    case Expr::CallExprClass: {
781      CallExpr* CE = cast<CallExpr>(E);
782      unsigned builtin = CE->isBuiltinCall(CGM.getContext());
783      if (builtin !=
784            Builtin::BI__builtin___CFStringMakeConstantString &&
785          builtin !=
786            Builtin::BI__builtin___NSStringMakeConstantString)
787        break;
788      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
789      const StringLiteral *Literal = cast<StringLiteral>(Arg);
790      if (builtin ==
791            Builtin::BI__builtin___NSStringMakeConstantString) {
792        return CGM.getObjCRuntime().GenerateConstantString(Literal);
793      }
794      // FIXME: need to deal with UCN conversion issues.
795      return CGM.GetAddrOfConstantCFString(Literal);
796    }
797    case Expr::BlockExprClass: {
798      std::string FunctionName;
799      if (CGF)
800        FunctionName = CGF->CurFn->getName();
801      else
802        FunctionName = "global";
803
804      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
805    }
806    }
807
808    return 0;
809  }
810};
811
812}  // end anonymous namespace.
813
814llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
815                                                QualType DestType,
816                                                CodeGenFunction *CGF) {
817  Expr::EvalResult Result;
818
819  bool Success = false;
820
821  if (DestType->isReferenceType())
822    Success = E->EvaluateAsLValue(Result, Context);
823  else
824    Success = E->Evaluate(Result, Context);
825
826  if (Success && !Result.HasSideEffects) {
827    switch (Result.Val.getKind()) {
828    case APValue::Uninitialized:
829      assert(0 && "Constant expressions should be initialized.");
830      return 0;
831    case APValue::LValue: {
832      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
833      llvm::Constant *Offset =
834        llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
835                               Result.Val.getLValueOffset().getQuantity());
836
837      llvm::Constant *C;
838      if (const Expr *LVBase = Result.Val.getLValueBase()) {
839        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
840
841        // Apply offset if necessary.
842        if (!Offset->isNullValue()) {
843          const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
844          llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
845          Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
846          C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
847        }
848
849        // Convert to the appropriate type; this could be an lvalue for
850        // an integer.
851        if (isa<llvm::PointerType>(DestTy))
852          return llvm::ConstantExpr::getBitCast(C, DestTy);
853
854        return llvm::ConstantExpr::getPtrToInt(C, DestTy);
855      } else {
856        C = Offset;
857
858        // Convert to the appropriate type; this could be an lvalue for
859        // an integer.
860        if (isa<llvm::PointerType>(DestTy))
861          return llvm::ConstantExpr::getIntToPtr(C, DestTy);
862
863        // If the types don't match this should only be a truncate.
864        if (C->getType() != DestTy)
865          return llvm::ConstantExpr::getTrunc(C, DestTy);
866
867        return C;
868      }
869    }
870    case APValue::Int: {
871      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
872                                                 Result.Val.getInt());
873
874      if (C->getType()->isIntegerTy(1)) {
875        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
876        C = llvm::ConstantExpr::getZExt(C, BoolTy);
877      }
878      return C;
879    }
880    case APValue::ComplexInt: {
881      llvm::Constant *Complex[2];
882
883      Complex[0] = llvm::ConstantInt::get(VMContext,
884                                          Result.Val.getComplexIntReal());
885      Complex[1] = llvm::ConstantInt::get(VMContext,
886                                          Result.Val.getComplexIntImag());
887
888      // FIXME: the target may want to specify that this is packed.
889      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
890    }
891    case APValue::Float:
892      return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
893    case APValue::ComplexFloat: {
894      llvm::Constant *Complex[2];
895
896      Complex[0] = llvm::ConstantFP::get(VMContext,
897                                         Result.Val.getComplexFloatReal());
898      Complex[1] = llvm::ConstantFP::get(VMContext,
899                                         Result.Val.getComplexFloatImag());
900
901      // FIXME: the target may want to specify that this is packed.
902      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
903    }
904    case APValue::Vector: {
905      llvm::SmallVector<llvm::Constant *, 4> Inits;
906      unsigned NumElts = Result.Val.getVectorLength();
907
908      for (unsigned i = 0; i != NumElts; ++i) {
909        APValue &Elt = Result.Val.getVectorElt(i);
910        if (Elt.isInt())
911          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
912        else
913          Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
914      }
915      return llvm::ConstantVector::get(Inits);
916    }
917    }
918  }
919
920  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
921  if (C && C->getType()->isIntegerTy(1)) {
922    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
923    C = llvm::ConstantExpr::getZExt(C, BoolTy);
924  }
925  return C;
926}
927
928static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
929  const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
930  return layout.getFieldOffset(field->getFieldIndex());
931}
932
933llvm::Constant *
934CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
935  // Member pointer constants always have a very particular form.
936  const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
937  const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
938
939  // A member function pointer.
940  if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
941    return getCXXABI().EmitMemberPointer(method);
942
943  // Otherwise, a member data pointer.
944  uint64_t fieldOffset;
945  if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
946    fieldOffset = getFieldOffset(getContext(), field);
947  else {
948    const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
949
950    fieldOffset = 0;
951    for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
952           ce = ifield->chain_end(); ci != ce; ++ci)
953      fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
954  }
955
956  CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
957  return getCXXABI().EmitMemberDataPointer(type, chars);
958}
959
960static void
961FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
962                             std::vector<llvm::Constant *> &Elements,
963                             uint64_t StartOffset) {
964  assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
965
966  if (CGM.getTypes().isZeroInitializable(T))
967    return;
968
969  if (const ConstantArrayType *CAT =
970        CGM.getContext().getAsConstantArrayType(T)) {
971    QualType ElementTy = CAT->getElementType();
972    uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
973
974    for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
975      FillInNullDataMemberPointers(CGM, ElementTy, Elements,
976                                   StartOffset + I * ElementSize);
977    }
978  } else if (const RecordType *RT = T->getAs<RecordType>()) {
979    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
980    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
981
982    // Go through all bases and fill in any null pointer to data members.
983    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
984         E = RD->bases_end(); I != E; ++I) {
985      if (I->isVirtual()) {
986        // Ignore virtual bases.
987        continue;
988      }
989
990      const CXXRecordDecl *BaseDecl =
991      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
992
993      // Ignore empty bases.
994      if (BaseDecl->isEmpty())
995        continue;
996
997      // Ignore bases that don't have any pointer to data members.
998      if (CGM.getTypes().isZeroInitializable(BaseDecl))
999        continue;
1000
1001      uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
1002      FillInNullDataMemberPointers(CGM, I->getType(),
1003                                   Elements, StartOffset + BaseOffset);
1004    }
1005
1006    // Visit all fields.
1007    unsigned FieldNo = 0;
1008    for (RecordDecl::field_iterator I = RD->field_begin(),
1009         E = RD->field_end(); I != E; ++I, ++FieldNo) {
1010      QualType FieldType = I->getType();
1011
1012      if (CGM.getTypes().isZeroInitializable(FieldType))
1013        continue;
1014
1015      uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1016      FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1017    }
1018  } else {
1019    assert(T->isMemberPointerType() && "Should only see member pointers here!");
1020    assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1021           "Should only see pointers to data members here!");
1022
1023    uint64_t StartIndex = StartOffset / 8;
1024    uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
1025
1026    // FIXME: hardcodes Itanium member pointer representation!
1027    llvm::Constant *NegativeOne =
1028      llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
1029                             -1ULL, /*isSigned*/true);
1030
1031    // Fill in the null data member pointer.
1032    for (uint64_t I = StartIndex; I != EndIndex; ++I)
1033      Elements[I] = NegativeOne;
1034  }
1035}
1036
1037static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1038                                               const llvm::Type *baseType,
1039                                               const CXXRecordDecl *base);
1040
1041static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1042                                        const CXXRecordDecl *record,
1043                                        bool asCompleteObject) {
1044  const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1045  const llvm::StructType *structure =
1046    (asCompleteObject ? layout.getLLVMType()
1047                      : layout.getBaseSubobjectLLVMType());
1048
1049  unsigned numElements = structure->getNumElements();
1050  std::vector<llvm::Constant *> elements(numElements);
1051
1052  // Fill in all the bases.
1053  for (CXXRecordDecl::base_class_const_iterator
1054         I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
1055    if (I->isVirtual()) {
1056      // Ignore virtual bases; if we're laying out for a complete
1057      // object, we'll lay these out later.
1058      continue;
1059    }
1060
1061    const CXXRecordDecl *base =
1062      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1063
1064    // Ignore empty bases.
1065    if (base->isEmpty())
1066      continue;
1067
1068    unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1069    const llvm::Type *baseType = structure->getElementType(fieldIndex);
1070    elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1071  }
1072
1073  // Fill in all the fields.
1074  for (RecordDecl::field_iterator I = record->field_begin(),
1075         E = record->field_end(); I != E; ++I) {
1076    const FieldDecl *field = *I;
1077
1078    // Ignore bit fields.
1079    if (field->isBitField())
1080      continue;
1081
1082    unsigned fieldIndex = layout.getLLVMFieldNo(field);
1083    elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
1084  }
1085
1086  // Fill in the virtual bases, if we're working with the complete object.
1087  if (asCompleteObject) {
1088    for (CXXRecordDecl::base_class_const_iterator
1089           I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
1090      const CXXRecordDecl *base =
1091        cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1092
1093      // Ignore empty bases.
1094      if (base->isEmpty())
1095        continue;
1096
1097      unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1098
1099      // We might have already laid this field out.
1100      if (elements[fieldIndex]) continue;
1101
1102      const llvm::Type *baseType = structure->getElementType(fieldIndex);
1103      elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1104    }
1105  }
1106
1107  // Now go through all other fields and zero them out.
1108  for (unsigned i = 0; i != numElements; ++i) {
1109    if (!elements[i])
1110      elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1111  }
1112
1113  return llvm::ConstantStruct::get(structure, elements);
1114}
1115
1116/// Emit the null constant for a base subobject.
1117static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1118                                               const llvm::Type *baseType,
1119                                               const CXXRecordDecl *base) {
1120  const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1121
1122  // Just zero out bases that don't have any pointer to data members.
1123  if (baseLayout.isZeroInitializableAsBase())
1124    return llvm::Constant::getNullValue(baseType);
1125
1126  // If the base type is a struct, we can just use its null constant.
1127  if (isa<llvm::StructType>(baseType)) {
1128    return EmitNullConstant(CGM, base, /*complete*/ false);
1129  }
1130
1131  // Otherwise, some bases are represented as arrays of i8 if the size
1132  // of the base is smaller than its corresponding LLVM type.  Figure
1133  // out how many elements this base array has.
1134  const llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
1135  unsigned numBaseElements = baseArrayType->getNumElements();
1136
1137  // Fill in null data member pointers.
1138  std::vector<llvm::Constant *> baseElements(numBaseElements);
1139  FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
1140                               baseElements, 0);
1141
1142  // Now go through all other elements and zero them out.
1143  if (numBaseElements) {
1144    const llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
1145    llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
1146    for (unsigned i = 0; i != numBaseElements; ++i) {
1147      if (!baseElements[i])
1148        baseElements[i] = i8_zero;
1149    }
1150  }
1151
1152  return llvm::ConstantArray::get(baseArrayType, baseElements);
1153}
1154
1155llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1156  if (getTypes().isZeroInitializable(T))
1157    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1158
1159  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1160
1161    QualType ElementTy = CAT->getElementType();
1162
1163    llvm::Constant *Element = EmitNullConstant(ElementTy);
1164    unsigned NumElements = CAT->getSize().getZExtValue();
1165    std::vector<llvm::Constant *> Array(NumElements);
1166    for (unsigned i = 0; i != NumElements; ++i)
1167      Array[i] = Element;
1168
1169    const llvm::ArrayType *ATy =
1170      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1171    return llvm::ConstantArray::get(ATy, Array);
1172  }
1173
1174  if (const RecordType *RT = T->getAs<RecordType>()) {
1175    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1176    return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1177  }
1178
1179  assert(T->isMemberPointerType() && "Should only see member pointers here!");
1180  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1181         "Should only see pointers to data members here!");
1182
1183  // Itanium C++ ABI 2.3:
1184  //   A NULL pointer is represented as -1.
1185  return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1186}
1187