CGExprConstant.cpp revision d608cdb7c044365cf4e8764ade1e11e99c176078
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "clang/AST/APValue.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/RecordLayout.h"
21#include "clang/AST/StmtVisitor.h"
22#include "clang/Basic/Builtins.h"
23#include "llvm/Constants.h"
24#include "llvm/Function.h"
25#include "llvm/GlobalVariable.h"
26#include "llvm/Target/TargetData.h"
27using namespace clang;
28using namespace CodeGen;
29
30//===----------------------------------------------------------------------===//
31//                            ConstStructBuilder
32//===----------------------------------------------------------------------===//
33
34namespace {
35class ConstStructBuilder {
36  CodeGenModule &CGM;
37  CodeGenFunction *CGF;
38
39  bool Packed;
40  unsigned NextFieldOffsetInBytes;
41  unsigned LLVMStructAlignment;
42  std::vector<llvm::Constant *> Elements;
43public:
44  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
45                                     InitListExpr *ILE);
46
47private:
48  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
49    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
50    LLVMStructAlignment(1) { }
51
52  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
53                   llvm::Constant *InitExpr);
54
55  void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
56                      llvm::ConstantInt *InitExpr);
57
58  void AppendPadding(uint64_t NumBytes);
59
60  void AppendTailPadding(uint64_t RecordSize);
61
62  void ConvertStructToPacked();
63
64  bool Build(InitListExpr *ILE);
65
66  unsigned getAlignment(const llvm::Constant *C) const {
67    if (Packed)  return 1;
68    return CGM.getTargetData().getABITypeAlignment(C->getType());
69  }
70
71  uint64_t getSizeInBytes(const llvm::Constant *C) const {
72    return CGM.getTargetData().getTypeAllocSize(C->getType());
73  }
74};
75
76bool ConstStructBuilder::
77AppendField(const FieldDecl *Field, uint64_t FieldOffset,
78            llvm::Constant *InitCst) {
79  uint64_t FieldOffsetInBytes = FieldOffset / 8;
80
81  assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
82         && "Field offset mismatch!");
83
84  unsigned FieldAlignment = getAlignment(InitCst);
85
86  // Round up the field offset to the alignment of the field type.
87  uint64_t AlignedNextFieldOffsetInBytes =
88    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
89
90  if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
91    assert(!Packed && "Alignment is wrong even with a packed struct!");
92
93    // Convert the struct to a packed struct.
94    ConvertStructToPacked();
95
96    AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
97  }
98
99  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
100    // We need to append padding.
101    AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
102
103    assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
104           "Did not add enough padding!");
105
106    AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
107  }
108
109  // Add the field.
110  Elements.push_back(InitCst);
111  NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
112                             getSizeInBytes(InitCst);
113
114  if (Packed)
115    assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
116  else
117    LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
118
119  return true;
120}
121
122void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
123                                        uint64_t FieldOffset,
124                                        llvm::ConstantInt *CI) {
125  if (FieldOffset > NextFieldOffsetInBytes * 8) {
126    // We need to add padding.
127    uint64_t NumBytes =
128      llvm::RoundUpToAlignment(FieldOffset -
129                               NextFieldOffsetInBytes * 8, 8) / 8;
130
131    AppendPadding(NumBytes);
132  }
133
134  uint64_t FieldSize =
135    Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
136
137  llvm::APInt FieldValue = CI->getValue();
138
139  // Promote the size of FieldValue if necessary
140  // FIXME: This should never occur, but currently it can because initializer
141  // constants are cast to bool, and because clang is not enforcing bitfield
142  // width limits.
143  if (FieldSize > FieldValue.getBitWidth())
144    FieldValue.zext(FieldSize);
145
146  // Truncate the size of FieldValue to the bit field size.
147  if (FieldSize < FieldValue.getBitWidth())
148    FieldValue.trunc(FieldSize);
149
150  if (FieldOffset < NextFieldOffsetInBytes * 8) {
151    // Either part of the field or the entire field can go into the previous
152    // byte.
153    assert(!Elements.empty() && "Elements can't be empty!");
154
155    unsigned BitsInPreviousByte =
156      NextFieldOffsetInBytes * 8 - FieldOffset;
157
158    bool FitsCompletelyInPreviousByte =
159      BitsInPreviousByte >= FieldValue.getBitWidth();
160
161    llvm::APInt Tmp = FieldValue;
162
163    if (!FitsCompletelyInPreviousByte) {
164      unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
165
166      if (CGM.getTargetData().isBigEndian()) {
167        Tmp = Tmp.lshr(NewFieldWidth);
168        Tmp.trunc(BitsInPreviousByte);
169
170        // We want the remaining high bits.
171        FieldValue.trunc(NewFieldWidth);
172      } else {
173        Tmp.trunc(BitsInPreviousByte);
174
175        // We want the remaining low bits.
176        FieldValue = FieldValue.lshr(BitsInPreviousByte);
177        FieldValue.trunc(NewFieldWidth);
178      }
179    }
180
181    Tmp.zext(8);
182    if (CGM.getTargetData().isBigEndian()) {
183      if (FitsCompletelyInPreviousByte)
184        Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
185    } else {
186      Tmp = Tmp.shl(8 - BitsInPreviousByte);
187    }
188
189    // 'or' in the bits that go into the previous byte.
190    llvm::Value *LastElt = Elements.back();
191    if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
192      Tmp |= Val->getValue();
193    else {
194      assert(isa<llvm::UndefValue>(LastElt));
195      // If there is an undef field that we're adding to, it can either be a
196      // scalar undef (in which case, we just replace it with our field) or it
197      // is an array.  If it is an array, we have to pull one byte off the
198      // array so that the other undef bytes stay around.
199      if (!isa<llvm::IntegerType>(LastElt->getType())) {
200        // The undef padding will be a multibyte array, create a new smaller
201        // padding and then an hole for our i8 to get plopped into.
202        assert(isa<llvm::ArrayType>(LastElt->getType()) &&
203               "Expected array padding of undefs");
204        const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
205        assert(AT->getElementType()->isIntegerTy(8) &&
206               AT->getNumElements() != 0 &&
207               "Expected non-empty array padding of undefs");
208
209        // Remove the padding array.
210        NextFieldOffsetInBytes -= AT->getNumElements();
211        Elements.pop_back();
212
213        // Add the padding back in two chunks.
214        AppendPadding(AT->getNumElements()-1);
215        AppendPadding(1);
216        assert(isa<llvm::UndefValue>(Elements.back()) &&
217               Elements.back()->getType()->isIntegerTy(8) &&
218               "Padding addition didn't work right");
219      }
220    }
221
222    Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
223
224    if (FitsCompletelyInPreviousByte)
225      return;
226  }
227
228  while (FieldValue.getBitWidth() > 8) {
229    llvm::APInt Tmp;
230
231    if (CGM.getTargetData().isBigEndian()) {
232      // We want the high bits.
233      Tmp = FieldValue;
234      Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
235      Tmp.trunc(8);
236    } else {
237      // We want the low bits.
238      Tmp = FieldValue;
239      Tmp.trunc(8);
240
241      FieldValue = FieldValue.lshr(8);
242    }
243
244    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
245    NextFieldOffsetInBytes++;
246
247    FieldValue.trunc(FieldValue.getBitWidth() - 8);
248  }
249
250  assert(FieldValue.getBitWidth() > 0 &&
251         "Should have at least one bit left!");
252  assert(FieldValue.getBitWidth() <= 8 &&
253         "Should not have more than a byte left!");
254
255  if (FieldValue.getBitWidth() < 8) {
256    if (CGM.getTargetData().isBigEndian()) {
257      unsigned BitWidth = FieldValue.getBitWidth();
258
259      FieldValue.zext(8);
260      FieldValue = FieldValue << (8 - BitWidth);
261    } else
262      FieldValue.zext(8);
263  }
264
265  // Append the last element.
266  Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
267                                            FieldValue));
268  NextFieldOffsetInBytes++;
269}
270
271void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
272  if (!NumBytes)
273    return;
274
275  const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
276  if (NumBytes > 1)
277    Ty = llvm::ArrayType::get(Ty, NumBytes);
278
279  llvm::Constant *C = llvm::UndefValue::get(Ty);
280  Elements.push_back(C);
281  assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
282
283  NextFieldOffsetInBytes += getSizeInBytes(C);
284}
285
286void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
287  assert(RecordSize % 8 == 0 && "Invalid record size!");
288
289  uint64_t RecordSizeInBytes = RecordSize / 8;
290  assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
291
292  unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
293  AppendPadding(NumPadBytes);
294}
295
296void ConstStructBuilder::ConvertStructToPacked() {
297  std::vector<llvm::Constant *> PackedElements;
298  uint64_t ElementOffsetInBytes = 0;
299
300  for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
301    llvm::Constant *C = Elements[i];
302
303    unsigned ElementAlign =
304      CGM.getTargetData().getABITypeAlignment(C->getType());
305    uint64_t AlignedElementOffsetInBytes =
306      llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
307
308    if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
309      // We need some padding.
310      uint64_t NumBytes =
311        AlignedElementOffsetInBytes - ElementOffsetInBytes;
312
313      const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
314      if (NumBytes > 1)
315        Ty = llvm::ArrayType::get(Ty, NumBytes);
316
317      llvm::Constant *Padding = llvm::UndefValue::get(Ty);
318      PackedElements.push_back(Padding);
319      ElementOffsetInBytes += getSizeInBytes(Padding);
320    }
321
322    PackedElements.push_back(C);
323    ElementOffsetInBytes += getSizeInBytes(C);
324  }
325
326  assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
327         "Packing the struct changed its size!");
328
329  Elements = PackedElements;
330  LLVMStructAlignment = 1;
331  Packed = true;
332}
333
334bool ConstStructBuilder::Build(InitListExpr *ILE) {
335  RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
336  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
337
338  unsigned FieldNo = 0;
339  unsigned ElementNo = 0;
340  for (RecordDecl::field_iterator Field = RD->field_begin(),
341       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
342
343    // If this is a union, skip all the fields that aren't being initialized.
344    if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
345      continue;
346
347    // Don't emit anonymous bitfields, they just affect layout.
348    if (Field->isBitField() && !Field->getIdentifier())
349      continue;
350
351    // Get the initializer.  A struct can include fields without initializers,
352    // we just use explicit null values for them.
353    llvm::Constant *EltInit;
354    if (ElementNo < ILE->getNumInits())
355      EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
356                                     Field->getType(), CGF);
357    else
358      EltInit = CGM.EmitNullConstant(Field->getType());
359
360    if (!EltInit)
361      return false;
362
363    if (!Field->isBitField()) {
364      // Handle non-bitfield members.
365      if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
366        return false;
367    } else {
368      // Otherwise we have a bitfield.
369      AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
370                     cast<llvm::ConstantInt>(EltInit));
371    }
372  }
373
374  uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
375
376  if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
377    // If the struct is bigger than the size of the record type,
378    // we must have a flexible array member at the end.
379    assert(RD->hasFlexibleArrayMember() &&
380           "Must have flexible array member if struct is bigger than type!");
381
382    // No tail padding is necessary.
383    return true;
384  }
385
386  uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
387                                                      LLVMStructAlignment);
388
389  // Check if we need to convert the struct to a packed struct.
390  if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
391      LLVMSizeInBytes > LayoutSizeInBytes) {
392    assert(!Packed && "Size mismatch!");
393
394    ConvertStructToPacked();
395    assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
396           "Converting to packed did not help!");
397  }
398
399  // Append tail padding if necessary.
400  AppendTailPadding(Layout.getSize());
401
402  assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
403         "Tail padding mismatch!");
404
405  return true;
406}
407
408llvm::Constant *ConstStructBuilder::
409  BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
410  ConstStructBuilder Builder(CGM, CGF);
411
412  if (!Builder.Build(ILE))
413    return 0;
414
415  llvm::Constant *Result =
416  llvm::ConstantStruct::get(CGM.getLLVMContext(),
417                            Builder.Elements, Builder.Packed);
418
419  assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
420                                  Builder.getAlignment(Result)) ==
421         Builder.getSizeInBytes(Result) && "Size mismatch!");
422
423  return Result;
424}
425
426
427//===----------------------------------------------------------------------===//
428//                             ConstExprEmitter
429//===----------------------------------------------------------------------===//
430
431class ConstExprEmitter :
432  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
433  CodeGenModule &CGM;
434  CodeGenFunction *CGF;
435  llvm::LLVMContext &VMContext;
436public:
437  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
438    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
439  }
440
441  //===--------------------------------------------------------------------===//
442  //                            Visitor Methods
443  //===--------------------------------------------------------------------===//
444
445  llvm::Constant *VisitStmt(Stmt *S) {
446    return 0;
447  }
448
449  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
450    return Visit(PE->getSubExpr());
451  }
452
453  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
454    return Visit(E->getInitializer());
455  }
456
457  llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
458    return CGM.getCXXABI().EmitMemberFunctionPointer(MD);
459  }
460
461  llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
462    if (const MemberPointerType *MPT =
463          E->getType()->getAs<MemberPointerType>()) {
464      QualType T = MPT->getPointeeType();
465      DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
466
467      NamedDecl *ND = DRE->getDecl();
468      if (T->isFunctionProtoType())
469        return EmitMemberFunctionPointer(cast<CXXMethodDecl>(ND));
470
471      // We have a pointer to data member.
472      return CGM.EmitPointerToDataMember(cast<FieldDecl>(ND));
473    }
474
475    return 0;
476  }
477
478  llvm::Constant *VisitBinSub(BinaryOperator *E) {
479    // This must be a pointer/pointer subtraction.  This only happens for
480    // address of label.
481    if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
482       !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
483      return 0;
484
485    llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
486                                               E->getLHS()->getType(), CGF);
487    llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
488                                               E->getRHS()->getType(), CGF);
489
490    const llvm::Type *ResultType = ConvertType(E->getType());
491    LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
492    RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
493
494    // No need to divide by element size, since addr of label is always void*,
495    // which has size 1 in GNUish.
496    return llvm::ConstantExpr::getSub(LHS, RHS);
497  }
498
499  llvm::Constant *VisitCastExpr(CastExpr* E) {
500    switch (E->getCastKind()) {
501    case CastExpr::CK_ToUnion: {
502      // GCC cast to union extension
503      assert(E->getType()->isUnionType() &&
504             "Destination type is not union type!");
505      const llvm::Type *Ty = ConvertType(E->getType());
506      Expr *SubExpr = E->getSubExpr();
507
508      llvm::Constant *C =
509        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
510      if (!C)
511        return 0;
512
513      // Build a struct with the union sub-element as the first member,
514      // and padded to the appropriate size
515      std::vector<llvm::Constant*> Elts;
516      std::vector<const llvm::Type*> Types;
517      Elts.push_back(C);
518      Types.push_back(C->getType());
519      unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
520      unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
521
522      assert(CurSize <= TotalSize && "Union size mismatch!");
523      if (unsigned NumPadBytes = TotalSize - CurSize) {
524        const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
525        if (NumPadBytes > 1)
526          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
527
528        Elts.push_back(llvm::UndefValue::get(Ty));
529        Types.push_back(Ty);
530      }
531
532      llvm::StructType* STy =
533        llvm::StructType::get(C->getType()->getContext(), Types, false);
534      return llvm::ConstantStruct::get(STy, Elts);
535    }
536    case CastExpr::CK_NullToMemberPointer: {
537      const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
538      if (MPT->getPointeeType()->isFunctionType())
539        return CGM.getCXXABI().EmitNullMemberFunctionPointer(MPT);
540      return CGM.EmitNullConstant(E->getType());
541    }
542
543    case CastExpr::CK_BaseToDerivedMemberPointer: {
544      const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
545
546      // TODO: support data-member conversions here!
547      if (!MPT->getPointeeType()->isFunctionType())
548        return 0;
549
550      Expr *SubExpr = E->getSubExpr();
551      llvm::Constant *C =
552        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
553      if (!C) return 0;
554
555      return CGM.getCXXABI().EmitMemberFunctionPointerConversion(C, E);
556    }
557
558    case CastExpr::CK_BitCast:
559      // This must be a member function pointer cast.
560      return Visit(E->getSubExpr());
561
562    default: {
563      // FIXME: This should be handled by the CK_NoOp cast kind.
564      // Explicit and implicit no-op casts
565      QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
566      if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
567        return Visit(E->getSubExpr());
568
569      // Handle integer->integer casts for address-of-label differences.
570      if (Ty->isIntegerType() && SubTy->isIntegerType() &&
571          CGF) {
572        llvm::Value *Src = Visit(E->getSubExpr());
573        if (Src == 0) return 0;
574
575        // Use EmitScalarConversion to perform the conversion.
576        return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
577      }
578
579      return 0;
580    }
581    }
582  }
583
584  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
585    return Visit(DAE->getExpr());
586  }
587
588  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
589    unsigned NumInitElements = ILE->getNumInits();
590    if (NumInitElements == 1 &&
591        (isa<StringLiteral>(ILE->getInit(0)) ||
592         isa<ObjCEncodeExpr>(ILE->getInit(0))))
593      return Visit(ILE->getInit(0));
594
595    std::vector<llvm::Constant*> Elts;
596    const llvm::ArrayType *AType =
597        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
598    const llvm::Type *ElemTy = AType->getElementType();
599    unsigned NumElements = AType->getNumElements();
600
601    // Initialising an array requires us to automatically
602    // initialise any elements that have not been initialised explicitly
603    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
604
605    // Copy initializer elements.
606    unsigned i = 0;
607    bool RewriteType = false;
608    for (; i < NumInitableElts; ++i) {
609      Expr *Init = ILE->getInit(i);
610      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
611      if (!C)
612        return 0;
613      RewriteType |= (C->getType() != ElemTy);
614      Elts.push_back(C);
615    }
616
617    // Initialize remaining array elements.
618    // FIXME: This doesn't handle member pointers correctly!
619    for (; i < NumElements; ++i)
620      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
621
622    if (RewriteType) {
623      // FIXME: Try to avoid packing the array
624      std::vector<const llvm::Type*> Types;
625      for (unsigned i = 0; i < Elts.size(); ++i)
626        Types.push_back(Elts[i]->getType());
627      const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
628                                                            Types, true);
629      return llvm::ConstantStruct::get(SType, Elts);
630    }
631
632    return llvm::ConstantArray::get(AType, Elts);
633  }
634
635  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
636    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
637  }
638
639  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
640    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
641  }
642
643  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
644    return CGM.EmitNullConstant(E->getType());
645  }
646
647  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
648    if (ILE->getType()->isScalarType()) {
649      // We have a scalar in braces. Just use the first element.
650      if (ILE->getNumInits() > 0) {
651        Expr *Init = ILE->getInit(0);
652        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
653      }
654      return CGM.EmitNullConstant(ILE->getType());
655    }
656
657    if (ILE->getType()->isArrayType())
658      return EmitArrayInitialization(ILE);
659
660    if (ILE->getType()->isRecordType())
661      return EmitStructInitialization(ILE);
662
663    if (ILE->getType()->isUnionType())
664      return EmitUnionInitialization(ILE);
665
666    // If ILE was a constant vector, we would have handled it already.
667    if (ILE->getType()->isVectorType())
668      return 0;
669
670    assert(0 && "Unable to handle InitListExpr");
671    // Get rid of control reaches end of void function warning.
672    // Not reached.
673    return 0;
674  }
675
676  llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
677    if (!E->getConstructor()->isTrivial())
678      return 0;
679
680    QualType Ty = E->getType();
681
682    // FIXME: We should not have to call getBaseElementType here.
683    const RecordType *RT =
684      CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
685    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
686
687    // If the class doesn't have a trivial destructor, we can't emit it as a
688    // constant expr.
689    if (!RD->hasTrivialDestructor())
690      return 0;
691
692    // Only copy and default constructors can be trivial.
693
694
695    if (E->getNumArgs()) {
696      assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
697      assert(E->getConstructor()->isCopyConstructor() &&
698             "trivial ctor has argument but isn't a copy ctor");
699
700      Expr *Arg = E->getArg(0);
701      assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
702             "argument to copy ctor is of wrong type");
703
704      return Visit(Arg);
705    }
706
707    return CGM.EmitNullConstant(Ty);
708  }
709
710  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
711    assert(!E->getType()->isPointerType() && "Strings are always arrays");
712
713    // This must be a string initializing an array in a static initializer.
714    // Don't emit it as the address of the string, emit the string data itself
715    // as an inline array.
716    return llvm::ConstantArray::get(VMContext,
717                                    CGM.GetStringForStringLiteral(E), false);
718  }
719
720  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
721    // This must be an @encode initializing an array in a static initializer.
722    // Don't emit it as the address of the string, emit the string data itself
723    // as an inline array.
724    std::string Str;
725    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
726    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
727
728    // Resize the string to the right size, adding zeros at the end, or
729    // truncating as needed.
730    Str.resize(CAT->getSize().getZExtValue(), '\0');
731    return llvm::ConstantArray::get(VMContext, Str, false);
732  }
733
734  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
735    return Visit(E->getSubExpr());
736  }
737
738  // Utility methods
739  const llvm::Type *ConvertType(QualType T) {
740    return CGM.getTypes().ConvertType(T);
741  }
742
743public:
744  llvm::Constant *EmitLValue(Expr *E) {
745    switch (E->getStmtClass()) {
746    default: break;
747    case Expr::CompoundLiteralExprClass: {
748      // Note that due to the nature of compound literals, this is guaranteed
749      // to be the only use of the variable, so we just generate it here.
750      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
751      llvm::Constant* C = Visit(CLE->getInitializer());
752      // FIXME: "Leaked" on failure.
753      if (C)
754        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
755                                     E->getType().isConstant(CGM.getContext()),
756                                     llvm::GlobalValue::InternalLinkage,
757                                     C, ".compoundliteral", 0, false,
758                                     E->getType().getAddressSpace());
759      return C;
760    }
761    case Expr::DeclRefExprClass: {
762      ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
763      if (Decl->hasAttr<WeakRefAttr>())
764        return CGM.GetWeakRefReference(Decl);
765      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
766        return CGM.GetAddrOfFunction(FD);
767      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
768        // We can never refer to a variable with local storage.
769        if (!VD->hasLocalStorage()) {
770          if (VD->isFileVarDecl() || VD->hasExternalStorage())
771            return CGM.GetAddrOfGlobalVar(VD);
772          else if (VD->isBlockVarDecl()) {
773            assert(CGF && "Can't access static local vars without CGF");
774            return CGF->GetAddrOfStaticLocalVar(VD);
775          }
776        }
777      }
778      break;
779    }
780    case Expr::StringLiteralClass:
781      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
782    case Expr::ObjCEncodeExprClass:
783      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
784    case Expr::ObjCStringLiteralClass: {
785      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
786      llvm::Constant *C =
787          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
788      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
789    }
790    case Expr::PredefinedExprClass: {
791      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
792      if (CGF) {
793        LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
794        return cast<llvm::Constant>(Res.getAddress());
795      } else if (Type == PredefinedExpr::PrettyFunction) {
796        return CGM.GetAddrOfConstantCString("top level", ".tmp");
797      }
798
799      return CGM.GetAddrOfConstantCString("", ".tmp");
800    }
801    case Expr::AddrLabelExprClass: {
802      assert(CGF && "Invalid address of label expression outside function.");
803      llvm::Constant *Ptr =
804        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
805      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
806    }
807    case Expr::CallExprClass: {
808      CallExpr* CE = cast<CallExpr>(E);
809      unsigned builtin = CE->isBuiltinCall(CGM.getContext());
810      if (builtin !=
811            Builtin::BI__builtin___CFStringMakeConstantString &&
812          builtin !=
813            Builtin::BI__builtin___NSStringMakeConstantString)
814        break;
815      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
816      const StringLiteral *Literal = cast<StringLiteral>(Arg);
817      if (builtin ==
818            Builtin::BI__builtin___NSStringMakeConstantString) {
819        return CGM.getObjCRuntime().GenerateConstantString(Literal);
820      }
821      // FIXME: need to deal with UCN conversion issues.
822      return CGM.GetAddrOfConstantCFString(Literal);
823    }
824    case Expr::BlockExprClass: {
825      std::string FunctionName;
826      if (CGF)
827        FunctionName = CGF->CurFn->getName();
828      else
829        FunctionName = "global";
830
831      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
832    }
833    }
834
835    return 0;
836  }
837};
838
839}  // end anonymous namespace.
840
841llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
842                                                QualType DestType,
843                                                CodeGenFunction *CGF) {
844  Expr::EvalResult Result;
845
846  bool Success = false;
847
848  if (DestType->isReferenceType())
849    Success = E->EvaluateAsLValue(Result, Context);
850  else
851    Success = E->Evaluate(Result, Context);
852
853  if (Success && !Result.HasSideEffects) {
854    switch (Result.Val.getKind()) {
855    case APValue::Uninitialized:
856      assert(0 && "Constant expressions should be initialized.");
857      return 0;
858    case APValue::LValue: {
859      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
860      llvm::Constant *Offset =
861        llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
862                               Result.Val.getLValueOffset().getQuantity());
863
864      llvm::Constant *C;
865      if (const Expr *LVBase = Result.Val.getLValueBase()) {
866        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
867
868        // Apply offset if necessary.
869        if (!Offset->isNullValue()) {
870          const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
871          llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
872          Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
873          C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
874        }
875
876        // Convert to the appropriate type; this could be an lvalue for
877        // an integer.
878        if (isa<llvm::PointerType>(DestTy))
879          return llvm::ConstantExpr::getBitCast(C, DestTy);
880
881        return llvm::ConstantExpr::getPtrToInt(C, DestTy);
882      } else {
883        C = Offset;
884
885        // Convert to the appropriate type; this could be an lvalue for
886        // an integer.
887        if (isa<llvm::PointerType>(DestTy))
888          return llvm::ConstantExpr::getIntToPtr(C, DestTy);
889
890        // If the types don't match this should only be a truncate.
891        if (C->getType() != DestTy)
892          return llvm::ConstantExpr::getTrunc(C, DestTy);
893
894        return C;
895      }
896    }
897    case APValue::Int: {
898      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
899                                                 Result.Val.getInt());
900
901      if (C->getType()->isIntegerTy(1)) {
902        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
903        C = llvm::ConstantExpr::getZExt(C, BoolTy);
904      }
905      return C;
906    }
907    case APValue::ComplexInt: {
908      llvm::Constant *Complex[2];
909
910      Complex[0] = llvm::ConstantInt::get(VMContext,
911                                          Result.Val.getComplexIntReal());
912      Complex[1] = llvm::ConstantInt::get(VMContext,
913                                          Result.Val.getComplexIntImag());
914
915      // FIXME: the target may want to specify that this is packed.
916      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
917    }
918    case APValue::Float:
919      return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
920    case APValue::ComplexFloat: {
921      llvm::Constant *Complex[2];
922
923      Complex[0] = llvm::ConstantFP::get(VMContext,
924                                         Result.Val.getComplexFloatReal());
925      Complex[1] = llvm::ConstantFP::get(VMContext,
926                                         Result.Val.getComplexFloatImag());
927
928      // FIXME: the target may want to specify that this is packed.
929      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
930    }
931    case APValue::Vector: {
932      llvm::SmallVector<llvm::Constant *, 4> Inits;
933      unsigned NumElts = Result.Val.getVectorLength();
934
935      for (unsigned i = 0; i != NumElts; ++i) {
936        APValue &Elt = Result.Val.getVectorElt(i);
937        if (Elt.isInt())
938          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
939        else
940          Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
941      }
942      return llvm::ConstantVector::get(&Inits[0], Inits.size());
943    }
944    }
945  }
946
947  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
948  if (C && C->getType()->isIntegerTy(1)) {
949    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
950    C = llvm::ConstantExpr::getZExt(C, BoolTy);
951  }
952  return C;
953}
954
955static void
956FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
957                             std::vector<llvm::Constant *> &Elements,
958                             uint64_t StartOffset) {
959  assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
960
961  if (!CGM.getLangOptions().CPlusPlus ||
962      !CGM.getCXXABI().RequiresNonZeroInitializer(T))
963    return;
964
965  if (const ConstantArrayType *CAT =
966        CGM.getContext().getAsConstantArrayType(T)) {
967    QualType ElementTy = CAT->getElementType();
968    uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
969
970    for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
971      FillInNullDataMemberPointers(CGM, ElementTy, Elements,
972                                   StartOffset + I * ElementSize);
973    }
974  } else if (const RecordType *RT = T->getAs<RecordType>()) {
975    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
976    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
977
978    // Go through all bases and fill in any null pointer to data members.
979    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
980         E = RD->bases_end(); I != E; ++I) {
981      if (I->isVirtual()) {
982        // FIXME: We should initialize null pointer to data members in virtual
983        // bases here.
984        continue;
985      }
986
987      const CXXRecordDecl *BaseDecl =
988      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
989
990      // Ignore empty bases.
991      if (BaseDecl->isEmpty())
992        continue;
993
994      // Ignore bases that don't have any pointer to data members.
995      if (!CGM.getCXXABI().RequiresNonZeroInitializer(BaseDecl))
996        continue;
997
998      uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
999      FillInNullDataMemberPointers(CGM, I->getType(),
1000                                   Elements, StartOffset + BaseOffset);
1001    }
1002
1003    // Visit all fields.
1004    unsigned FieldNo = 0;
1005    for (RecordDecl::field_iterator I = RD->field_begin(),
1006         E = RD->field_end(); I != E; ++I, ++FieldNo) {
1007      QualType FieldType = I->getType();
1008
1009      if (!CGM.getCXXABI().RequiresNonZeroInitializer(FieldType))
1010        continue;
1011
1012      uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1013      FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1014    }
1015  } else {
1016    assert(T->isMemberPointerType() && "Should only see member pointers here!");
1017    assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1018           "Should only see pointers to data members here!");
1019
1020    uint64_t StartIndex = StartOffset / 8;
1021    uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
1022
1023    llvm::Constant *NegativeOne =
1024      llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
1025                             -1ULL, /*isSigned=*/true);
1026
1027    // Fill in the null data member pointer.
1028    for (uint64_t I = StartIndex; I != EndIndex; ++I)
1029      Elements[I] = NegativeOne;
1030  }
1031}
1032
1033llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1034  if (!getLangOptions().CPlusPlus ||
1035      !getCXXABI().RequiresNonZeroInitializer(T))
1036    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1037
1038  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1039
1040    QualType ElementTy = CAT->getElementType();
1041
1042    llvm::Constant *Element = EmitNullConstant(ElementTy);
1043    unsigned NumElements = CAT->getSize().getZExtValue();
1044    std::vector<llvm::Constant *> Array(NumElements);
1045    for (unsigned i = 0; i != NumElements; ++i)
1046      Array[i] = Element;
1047
1048    const llvm::ArrayType *ATy =
1049      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1050    return llvm::ConstantArray::get(ATy, Array);
1051  }
1052
1053  if (const RecordType *RT = T->getAs<RecordType>()) {
1054    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1055    const llvm::StructType *STy =
1056      cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
1057    unsigned NumElements = STy->getNumElements();
1058    std::vector<llvm::Constant *> Elements(NumElements);
1059
1060    const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
1061
1062    // Go through all bases and fill in any null pointer to data members.
1063    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1064         E = RD->bases_end(); I != E; ++I) {
1065      if (I->isVirtual()) {
1066        // FIXME: We should initialize null pointer to data members in virtual
1067        // bases here.
1068        continue;
1069      }
1070
1071      const CXXRecordDecl *BaseDecl =
1072        cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1073
1074      // Ignore empty bases.
1075      if (BaseDecl->isEmpty())
1076        continue;
1077
1078      // Ignore bases that don't have any pointer to data members.
1079      if (!getCXXABI().RequiresNonZeroInitializer(BaseDecl))
1080        continue;
1081
1082      // Currently, all bases are arrays of i8. Figure out how many elements
1083      // this base array has.
1084      unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
1085      const llvm::ArrayType *BaseArrayTy =
1086        cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
1087
1088      unsigned NumBaseElements = BaseArrayTy->getNumElements();
1089      std::vector<llvm::Constant *> BaseElements(NumBaseElements);
1090
1091      // Now fill in null data member pointers.
1092      FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
1093
1094      // Now go through all other elements and zero them out.
1095      if (NumBaseElements) {
1096        llvm::Constant *Zero =
1097          llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
1098
1099        for (unsigned I = 0; I != NumBaseElements; ++I) {
1100          if (!BaseElements[I])
1101            BaseElements[I] = Zero;
1102        }
1103      }
1104
1105      Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
1106                                                       BaseElements);
1107    }
1108
1109    for (RecordDecl::field_iterator I = RD->field_begin(),
1110         E = RD->field_end(); I != E; ++I) {
1111      const FieldDecl *FD = *I;
1112
1113      // Ignore bit fields.
1114      if (FD->isBitField())
1115        continue;
1116
1117      unsigned FieldNo = Layout.getLLVMFieldNo(FD);
1118      Elements[FieldNo] = EmitNullConstant(FD->getType());
1119    }
1120
1121    // Now go through all other fields and zero them out.
1122    for (unsigned i = 0; i != NumElements; ++i) {
1123      if (!Elements[i])
1124        Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
1125    }
1126
1127    return llvm::ConstantStruct::get(STy, Elements);
1128  }
1129
1130  assert(T->isMemberPointerType() && "Should only see member pointers here!");
1131  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1132         "Should only see pointers to data members here!");
1133
1134  // Itanium C++ ABI 2.3:
1135  //   A NULL pointer is represented as -1.
1136  return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
1137                                /*isSigned=*/true);
1138}
1139
1140llvm::Constant *
1141CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
1142
1143  // Itanium C++ ABI 2.3:
1144  //   A pointer to data member is an offset from the base address of the class
1145  //   object containing it, represented as a ptrdiff_t
1146
1147  const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(FD->getParent());
1148  QualType ClassType =
1149    getContext().getTypeDeclType(const_cast<CXXRecordDecl *>(ClassDecl));
1150
1151  const llvm::StructType *ClassLTy =
1152    cast<llvm::StructType>(getTypes().ConvertType(ClassType));
1153
1154  const CGRecordLayout &RL =
1155    getTypes().getCGRecordLayout(FD->getParent());
1156  unsigned FieldNo = RL.getLLVMFieldNo(FD);
1157  uint64_t Offset =
1158    getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
1159
1160  const llvm::Type *PtrDiffTy =
1161    getTypes().ConvertType(getContext().getPointerDiffType());
1162
1163  return llvm::ConstantInt::get(PtrDiffTy, Offset);
1164}
1165