CGExprConstant.cpp revision 9b7da1c46d6d2849f9cb51328d7fcddf2c417672
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGCXXABI.h"
17#include "CGObjCRuntime.h"
18#include "CGRecordLayout.h"
19#include "clang/AST/APValue.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/AST/StmtVisitor.h"
23#include "clang/Basic/Builtins.h"
24#include "llvm/Constants.h"
25#include "llvm/Function.h"
26#include "llvm/GlobalVariable.h"
27#include "llvm/Target/TargetData.h"
28using namespace clang;
29using namespace CodeGen;
30
31//===----------------------------------------------------------------------===//
32//                            ConstStructBuilder
33//===----------------------------------------------------------------------===//
34
35namespace {
36class ConstStructBuilder {
37  CodeGenModule &CGM;
38  CodeGenFunction *CGF;
39
40  bool Packed;
41  unsigned NextFieldOffsetInBytes;
42  unsigned LLVMStructAlignment;
43  std::vector<llvm::Constant *> Elements;
44public:
45  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
46                                     InitListExpr *ILE);
47
48private:
49  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
50    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
51    LLVMStructAlignment(1) { }
52
53  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
54                   llvm::Constant *InitExpr);
55
56  void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
57                      llvm::ConstantInt *InitExpr);
58
59  void AppendPadding(uint64_t NumBytes);
60
61  void AppendTailPadding(uint64_t RecordSize);
62
63  void ConvertStructToPacked();
64
65  bool Build(InitListExpr *ILE);
66
67  unsigned getAlignment(const llvm::Constant *C) const {
68    if (Packed)  return 1;
69    return CGM.getTargetData().getABITypeAlignment(C->getType());
70  }
71
72  uint64_t getSizeInBytes(const llvm::Constant *C) const {
73    return CGM.getTargetData().getTypeAllocSize(C->getType());
74  }
75};
76
77bool ConstStructBuilder::
78AppendField(const FieldDecl *Field, uint64_t FieldOffset,
79            llvm::Constant *InitCst) {
80  uint64_t FieldOffsetInBytes = FieldOffset / 8;
81
82  assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
83         && "Field offset mismatch!");
84
85  unsigned FieldAlignment = getAlignment(InitCst);
86
87  // Round up the field offset to the alignment of the field type.
88  uint64_t AlignedNextFieldOffsetInBytes =
89    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
90
91  if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
92    assert(!Packed && "Alignment is wrong even with a packed struct!");
93
94    // Convert the struct to a packed struct.
95    ConvertStructToPacked();
96
97    AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
98  }
99
100  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
101    // We need to append padding.
102    AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
103
104    assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
105           "Did not add enough padding!");
106
107    AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
108  }
109
110  // Add the field.
111  Elements.push_back(InitCst);
112  NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
113                             getSizeInBytes(InitCst);
114
115  if (Packed)
116    assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
117  else
118    LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
119
120  return true;
121}
122
123void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
124                                        uint64_t FieldOffset,
125                                        llvm::ConstantInt *CI) {
126  if (FieldOffset > NextFieldOffsetInBytes * 8) {
127    // We need to add padding.
128    uint64_t NumBytes =
129      llvm::RoundUpToAlignment(FieldOffset -
130                               NextFieldOffsetInBytes * 8, 8) / 8;
131
132    AppendPadding(NumBytes);
133  }
134
135  uint64_t FieldSize =
136    Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
137
138  llvm::APInt FieldValue = CI->getValue();
139
140  // Promote the size of FieldValue if necessary
141  // FIXME: This should never occur, but currently it can because initializer
142  // constants are cast to bool, and because clang is not enforcing bitfield
143  // width limits.
144  if (FieldSize > FieldValue.getBitWidth())
145    FieldValue = FieldValue.zext(FieldSize);
146
147  // Truncate the size of FieldValue to the bit field size.
148  if (FieldSize < FieldValue.getBitWidth())
149    FieldValue = FieldValue.trunc(FieldSize);
150
151  if (FieldOffset < NextFieldOffsetInBytes * 8) {
152    // Either part of the field or the entire field can go into the previous
153    // byte.
154    assert(!Elements.empty() && "Elements can't be empty!");
155
156    unsigned BitsInPreviousByte =
157      NextFieldOffsetInBytes * 8 - FieldOffset;
158
159    bool FitsCompletelyInPreviousByte =
160      BitsInPreviousByte >= FieldValue.getBitWidth();
161
162    llvm::APInt Tmp = FieldValue;
163
164    if (!FitsCompletelyInPreviousByte) {
165      unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
166
167      if (CGM.getTargetData().isBigEndian()) {
168        Tmp = Tmp.lshr(NewFieldWidth);
169        Tmp = Tmp.trunc(BitsInPreviousByte);
170
171        // We want the remaining high bits.
172        FieldValue = FieldValue.trunc(NewFieldWidth);
173      } else {
174        Tmp = Tmp.trunc(BitsInPreviousByte);
175
176        // We want the remaining low bits.
177        FieldValue = FieldValue.lshr(BitsInPreviousByte);
178        FieldValue = FieldValue.trunc(NewFieldWidth);
179      }
180    }
181
182    Tmp = Tmp.zext(8);
183    if (CGM.getTargetData().isBigEndian()) {
184      if (FitsCompletelyInPreviousByte)
185        Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
186    } else {
187      Tmp = Tmp.shl(8 - BitsInPreviousByte);
188    }
189
190    // 'or' in the bits that go into the previous byte.
191    llvm::Value *LastElt = Elements.back();
192    if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
193      Tmp |= Val->getValue();
194    else {
195      assert(isa<llvm::UndefValue>(LastElt));
196      // If there is an undef field that we're adding to, it can either be a
197      // scalar undef (in which case, we just replace it with our field) or it
198      // is an array.  If it is an array, we have to pull one byte off the
199      // array so that the other undef bytes stay around.
200      if (!isa<llvm::IntegerType>(LastElt->getType())) {
201        // The undef padding will be a multibyte array, create a new smaller
202        // padding and then an hole for our i8 to get plopped into.
203        assert(isa<llvm::ArrayType>(LastElt->getType()) &&
204               "Expected array padding of undefs");
205        const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
206        assert(AT->getElementType()->isIntegerTy(8) &&
207               AT->getNumElements() != 0 &&
208               "Expected non-empty array padding of undefs");
209
210        // Remove the padding array.
211        NextFieldOffsetInBytes -= AT->getNumElements();
212        Elements.pop_back();
213
214        // Add the padding back in two chunks.
215        AppendPadding(AT->getNumElements()-1);
216        AppendPadding(1);
217        assert(isa<llvm::UndefValue>(Elements.back()) &&
218               Elements.back()->getType()->isIntegerTy(8) &&
219               "Padding addition didn't work right");
220      }
221    }
222
223    Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
224
225    if (FitsCompletelyInPreviousByte)
226      return;
227  }
228
229  while (FieldValue.getBitWidth() > 8) {
230    llvm::APInt Tmp;
231
232    if (CGM.getTargetData().isBigEndian()) {
233      // We want the high bits.
234      Tmp = FieldValue.lshr(Tmp.getBitWidth() - 8).trunc(8);
235    } else {
236      // We want the low bits.
237      Tmp = FieldValue.trunc(8);
238
239      FieldValue = FieldValue.lshr(8);
240    }
241
242    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
243    NextFieldOffsetInBytes++;
244
245    FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - 8);
246  }
247
248  assert(FieldValue.getBitWidth() > 0 &&
249         "Should have at least one bit left!");
250  assert(FieldValue.getBitWidth() <= 8 &&
251         "Should not have more than a byte left!");
252
253  if (FieldValue.getBitWidth() < 8) {
254    if (CGM.getTargetData().isBigEndian()) {
255      unsigned BitWidth = FieldValue.getBitWidth();
256
257      FieldValue = FieldValue.zext(8) << (8 - BitWidth);
258    } else
259      FieldValue = FieldValue.zext(8);
260  }
261
262  // Append the last element.
263  Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
264                                            FieldValue));
265  NextFieldOffsetInBytes++;
266}
267
268void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
269  if (!NumBytes)
270    return;
271
272  const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
273  if (NumBytes > 1)
274    Ty = llvm::ArrayType::get(Ty, NumBytes);
275
276  llvm::Constant *C = llvm::UndefValue::get(Ty);
277  Elements.push_back(C);
278  assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
279
280  NextFieldOffsetInBytes += getSizeInBytes(C);
281}
282
283void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
284  assert(RecordSize % 8 == 0 && "Invalid record size!");
285
286  uint64_t RecordSizeInBytes = RecordSize / 8;
287  assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
288
289  unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
290  AppendPadding(NumPadBytes);
291}
292
293void ConstStructBuilder::ConvertStructToPacked() {
294  std::vector<llvm::Constant *> PackedElements;
295  uint64_t ElementOffsetInBytes = 0;
296
297  for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
298    llvm::Constant *C = Elements[i];
299
300    unsigned ElementAlign =
301      CGM.getTargetData().getABITypeAlignment(C->getType());
302    uint64_t AlignedElementOffsetInBytes =
303      llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
304
305    if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
306      // We need some padding.
307      uint64_t NumBytes =
308        AlignedElementOffsetInBytes - ElementOffsetInBytes;
309
310      const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
311      if (NumBytes > 1)
312        Ty = llvm::ArrayType::get(Ty, NumBytes);
313
314      llvm::Constant *Padding = llvm::UndefValue::get(Ty);
315      PackedElements.push_back(Padding);
316      ElementOffsetInBytes += getSizeInBytes(Padding);
317    }
318
319    PackedElements.push_back(C);
320    ElementOffsetInBytes += getSizeInBytes(C);
321  }
322
323  assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
324         "Packing the struct changed its size!");
325
326  Elements = PackedElements;
327  LLVMStructAlignment = 1;
328  Packed = true;
329}
330
331bool ConstStructBuilder::Build(InitListExpr *ILE) {
332  RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
333  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
334
335  unsigned FieldNo = 0;
336  unsigned ElementNo = 0;
337  for (RecordDecl::field_iterator Field = RD->field_begin(),
338       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
339
340    // If this is a union, skip all the fields that aren't being initialized.
341    if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
342      continue;
343
344    // Don't emit anonymous bitfields, they just affect layout.
345    if (Field->isBitField() && !Field->getIdentifier())
346      continue;
347
348    // Get the initializer.  A struct can include fields without initializers,
349    // we just use explicit null values for them.
350    llvm::Constant *EltInit;
351    if (ElementNo < ILE->getNumInits())
352      EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
353                                     Field->getType(), CGF);
354    else
355      EltInit = CGM.EmitNullConstant(Field->getType());
356
357    if (!EltInit)
358      return false;
359
360    if (!Field->isBitField()) {
361      // Handle non-bitfield members.
362      if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
363        return false;
364    } else {
365      // Otherwise we have a bitfield.
366      AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
367                     cast<llvm::ConstantInt>(EltInit));
368    }
369  }
370
371  uint64_t LayoutSizeInBytes = Layout.getSize().getQuantity();
372
373  if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
374    // If the struct is bigger than the size of the record type,
375    // we must have a flexible array member at the end.
376    assert(RD->hasFlexibleArrayMember() &&
377           "Must have flexible array member if struct is bigger than type!");
378
379    // No tail padding is necessary.
380    return true;
381  }
382
383  uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
384                                                      LLVMStructAlignment);
385
386  // Check if we need to convert the struct to a packed struct.
387  if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
388      LLVMSizeInBytes > LayoutSizeInBytes) {
389    assert(!Packed && "Size mismatch!");
390
391    ConvertStructToPacked();
392    assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
393           "Converting to packed did not help!");
394  }
395
396  // Append tail padding if necessary.
397  AppendTailPadding(CGM.getContext().toBits(Layout.getSize()));
398
399  assert(Layout.getSize().getQuantity() == NextFieldOffsetInBytes &&
400         "Tail padding mismatch!");
401
402  return true;
403}
404
405llvm::Constant *ConstStructBuilder::
406  BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
407  ConstStructBuilder Builder(CGM, CGF);
408
409  if (!Builder.Build(ILE))
410    return 0;
411
412  llvm::Constant *Result =
413  llvm::ConstantStruct::get(CGM.getLLVMContext(),
414                            Builder.Elements, Builder.Packed);
415
416  assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
417                                  Builder.getAlignment(Result)) ==
418         Builder.getSizeInBytes(Result) && "Size mismatch!");
419
420  return Result;
421}
422
423
424//===----------------------------------------------------------------------===//
425//                             ConstExprEmitter
426//===----------------------------------------------------------------------===//
427
428class ConstExprEmitter :
429  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
430  CodeGenModule &CGM;
431  CodeGenFunction *CGF;
432  llvm::LLVMContext &VMContext;
433public:
434  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
435    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
436  }
437
438  //===--------------------------------------------------------------------===//
439  //                            Visitor Methods
440  //===--------------------------------------------------------------------===//
441
442  llvm::Constant *VisitStmt(Stmt *S) {
443    return 0;
444  }
445
446  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
447    return Visit(PE->getSubExpr());
448  }
449
450  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
451    return Visit(E->getInitializer());
452  }
453
454  llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
455    if (E->getType()->isMemberPointerType())
456      return CGM.getMemberPointerConstant(E);
457
458    return 0;
459  }
460
461  llvm::Constant *VisitBinSub(BinaryOperator *E) {
462    // This must be a pointer/pointer subtraction.  This only happens for
463    // address of label.
464    if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
465       !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
466      return 0;
467
468    llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
469                                               E->getLHS()->getType(), CGF);
470    llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
471                                               E->getRHS()->getType(), CGF);
472
473    const llvm::Type *ResultType = ConvertType(E->getType());
474    LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
475    RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
476
477    // No need to divide by element size, since addr of label is always void*,
478    // which has size 1 in GNUish.
479    return llvm::ConstantExpr::getSub(LHS, RHS);
480  }
481
482  llvm::Constant *VisitCastExpr(CastExpr* E) {
483    switch (E->getCastKind()) {
484    case CK_ToUnion: {
485      // GCC cast to union extension
486      assert(E->getType()->isUnionType() &&
487             "Destination type is not union type!");
488      const llvm::Type *Ty = ConvertType(E->getType());
489      Expr *SubExpr = E->getSubExpr();
490
491      llvm::Constant *C =
492        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
493      if (!C)
494        return 0;
495
496      // Build a struct with the union sub-element as the first member,
497      // and padded to the appropriate size
498      std::vector<llvm::Constant*> Elts;
499      std::vector<const llvm::Type*> Types;
500      Elts.push_back(C);
501      Types.push_back(C->getType());
502      unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
503      unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
504
505      assert(CurSize <= TotalSize && "Union size mismatch!");
506      if (unsigned NumPadBytes = TotalSize - CurSize) {
507        const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
508        if (NumPadBytes > 1)
509          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
510
511        Elts.push_back(llvm::UndefValue::get(Ty));
512        Types.push_back(Ty);
513      }
514
515      llvm::StructType* STy =
516        llvm::StructType::get(C->getType()->getContext(), Types, false);
517      return llvm::ConstantStruct::get(STy, Elts);
518    }
519    case CK_NullToMemberPointer: {
520      const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
521      return CGM.getCXXABI().EmitNullMemberPointer(MPT);
522    }
523
524    case CK_BaseToDerivedMemberPointer: {
525      Expr *SubExpr = E->getSubExpr();
526      llvm::Constant *C =
527        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
528      if (!C) return 0;
529
530      return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
531    }
532
533    case CK_BitCast:
534      // This must be a member function pointer cast.
535      return Visit(E->getSubExpr());
536
537    default: {
538      // FIXME: This should be handled by the CK_NoOp cast kind.
539      // Explicit and implicit no-op casts
540      QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
541      if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
542        return Visit(E->getSubExpr());
543
544      // Handle integer->integer casts for address-of-label differences.
545      if (Ty->isIntegerType() && SubTy->isIntegerType() &&
546          CGF) {
547        llvm::Value *Src = Visit(E->getSubExpr());
548        if (Src == 0) return 0;
549
550        // Use EmitScalarConversion to perform the conversion.
551        return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
552      }
553
554      return 0;
555    }
556    }
557  }
558
559  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
560    return Visit(DAE->getExpr());
561  }
562
563  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
564    unsigned NumInitElements = ILE->getNumInits();
565    if (NumInitElements == 1 &&
566        (isa<StringLiteral>(ILE->getInit(0)) ||
567         isa<ObjCEncodeExpr>(ILE->getInit(0))))
568      return Visit(ILE->getInit(0));
569
570    std::vector<llvm::Constant*> Elts;
571    const llvm::ArrayType *AType =
572        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
573    const llvm::Type *ElemTy = AType->getElementType();
574    unsigned NumElements = AType->getNumElements();
575
576    // Initialising an array requires us to automatically
577    // initialise any elements that have not been initialised explicitly
578    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
579
580    // Copy initializer elements.
581    unsigned i = 0;
582    bool RewriteType = false;
583    for (; i < NumInitableElts; ++i) {
584      Expr *Init = ILE->getInit(i);
585      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
586      if (!C)
587        return 0;
588      RewriteType |= (C->getType() != ElemTy);
589      Elts.push_back(C);
590    }
591
592    // Initialize remaining array elements.
593    // FIXME: This doesn't handle member pointers correctly!
594    for (; i < NumElements; ++i)
595      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
596
597    if (RewriteType) {
598      // FIXME: Try to avoid packing the array
599      std::vector<const llvm::Type*> Types;
600      for (unsigned i = 0; i < Elts.size(); ++i)
601        Types.push_back(Elts[i]->getType());
602      const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
603                                                            Types, true);
604      return llvm::ConstantStruct::get(SType, Elts);
605    }
606
607    return llvm::ConstantArray::get(AType, Elts);
608  }
609
610  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
611    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
612  }
613
614  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
615    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
616  }
617
618  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
619    return CGM.EmitNullConstant(E->getType());
620  }
621
622  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
623    if (ILE->getType()->isScalarType()) {
624      // We have a scalar in braces. Just use the first element.
625      if (ILE->getNumInits() > 0) {
626        Expr *Init = ILE->getInit(0);
627        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
628      }
629      return CGM.EmitNullConstant(ILE->getType());
630    }
631
632    if (ILE->getType()->isArrayType())
633      return EmitArrayInitialization(ILE);
634
635    if (ILE->getType()->isRecordType())
636      return EmitStructInitialization(ILE);
637
638    if (ILE->getType()->isUnionType())
639      return EmitUnionInitialization(ILE);
640
641    // If ILE was a constant vector, we would have handled it already.
642    if (ILE->getType()->isVectorType())
643      return 0;
644
645    assert(0 && "Unable to handle InitListExpr");
646    // Get rid of control reaches end of void function warning.
647    // Not reached.
648    return 0;
649  }
650
651  llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
652    if (!E->getConstructor()->isTrivial())
653      return 0;
654
655    QualType Ty = E->getType();
656
657    // FIXME: We should not have to call getBaseElementType here.
658    const RecordType *RT =
659      CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
660    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
661
662    // If the class doesn't have a trivial destructor, we can't emit it as a
663    // constant expr.
664    if (!RD->hasTrivialDestructor())
665      return 0;
666
667    // Only copy and default constructors can be trivial.
668
669
670    if (E->getNumArgs()) {
671      assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
672      assert(E->getConstructor()->isCopyConstructor() &&
673             "trivial ctor has argument but isn't a copy ctor");
674
675      Expr *Arg = E->getArg(0);
676      assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
677             "argument to copy ctor is of wrong type");
678
679      return Visit(Arg);
680    }
681
682    return CGM.EmitNullConstant(Ty);
683  }
684
685  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
686    assert(!E->getType()->isPointerType() && "Strings are always arrays");
687
688    // This must be a string initializing an array in a static initializer.
689    // Don't emit it as the address of the string, emit the string data itself
690    // as an inline array.
691    return llvm::ConstantArray::get(VMContext,
692                                    CGM.GetStringForStringLiteral(E), false);
693  }
694
695  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
696    // This must be an @encode initializing an array in a static initializer.
697    // Don't emit it as the address of the string, emit the string data itself
698    // as an inline array.
699    std::string Str;
700    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
701    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
702
703    // Resize the string to the right size, adding zeros at the end, or
704    // truncating as needed.
705    Str.resize(CAT->getSize().getZExtValue(), '\0');
706    return llvm::ConstantArray::get(VMContext, Str, false);
707  }
708
709  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
710    return Visit(E->getSubExpr());
711  }
712
713  // Utility methods
714  const llvm::Type *ConvertType(QualType T) {
715    return CGM.getTypes().ConvertType(T);
716  }
717
718public:
719  llvm::Constant *EmitLValue(Expr *E) {
720    switch (E->getStmtClass()) {
721    default: break;
722    case Expr::CompoundLiteralExprClass: {
723      // Note that due to the nature of compound literals, this is guaranteed
724      // to be the only use of the variable, so we just generate it here.
725      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
726      llvm::Constant* C = Visit(CLE->getInitializer());
727      // FIXME: "Leaked" on failure.
728      if (C)
729        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
730                                     E->getType().isConstant(CGM.getContext()),
731                                     llvm::GlobalValue::InternalLinkage,
732                                     C, ".compoundliteral", 0, false,
733                                     E->getType().getAddressSpace());
734      return C;
735    }
736    case Expr::DeclRefExprClass: {
737      ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
738      if (Decl->hasAttr<WeakRefAttr>())
739        return CGM.GetWeakRefReference(Decl);
740      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
741        return CGM.GetAddrOfFunction(FD);
742      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
743        // We can never refer to a variable with local storage.
744        if (!VD->hasLocalStorage()) {
745          if (VD->isFileVarDecl() || VD->hasExternalStorage())
746            return CGM.GetAddrOfGlobalVar(VD);
747          else if (VD->isLocalVarDecl()) {
748            assert(CGF && "Can't access static local vars without CGF");
749            return CGF->GetAddrOfStaticLocalVar(VD);
750          }
751        }
752      }
753      break;
754    }
755    case Expr::StringLiteralClass:
756      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
757    case Expr::ObjCEncodeExprClass:
758      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
759    case Expr::ObjCStringLiteralClass: {
760      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
761      llvm::Constant *C =
762          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
763      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
764    }
765    case Expr::PredefinedExprClass: {
766      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
767      if (CGF) {
768        LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
769        return cast<llvm::Constant>(Res.getAddress());
770      } else if (Type == PredefinedExpr::PrettyFunction) {
771        return CGM.GetAddrOfConstantCString("top level", ".tmp");
772      }
773
774      return CGM.GetAddrOfConstantCString("", ".tmp");
775    }
776    case Expr::AddrLabelExprClass: {
777      assert(CGF && "Invalid address of label expression outside function.");
778      llvm::Constant *Ptr =
779        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
780      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
781    }
782    case Expr::CallExprClass: {
783      CallExpr* CE = cast<CallExpr>(E);
784      unsigned builtin = CE->isBuiltinCall(CGM.getContext());
785      if (builtin !=
786            Builtin::BI__builtin___CFStringMakeConstantString &&
787          builtin !=
788            Builtin::BI__builtin___NSStringMakeConstantString)
789        break;
790      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
791      const StringLiteral *Literal = cast<StringLiteral>(Arg);
792      if (builtin ==
793            Builtin::BI__builtin___NSStringMakeConstantString) {
794        return CGM.getObjCRuntime().GenerateConstantString(Literal);
795      }
796      // FIXME: need to deal with UCN conversion issues.
797      return CGM.GetAddrOfConstantCFString(Literal);
798    }
799    case Expr::BlockExprClass: {
800      std::string FunctionName;
801      if (CGF)
802        FunctionName = CGF->CurFn->getName();
803      else
804        FunctionName = "global";
805
806      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
807    }
808    }
809
810    return 0;
811  }
812};
813
814}  // end anonymous namespace.
815
816llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
817                                                QualType DestType,
818                                                CodeGenFunction *CGF) {
819  Expr::EvalResult Result;
820
821  bool Success = false;
822
823  if (DestType->isReferenceType())
824    Success = E->EvaluateAsLValue(Result, Context);
825  else
826    Success = E->Evaluate(Result, Context);
827
828  if (Success && !Result.HasSideEffects) {
829    switch (Result.Val.getKind()) {
830    case APValue::Uninitialized:
831      assert(0 && "Constant expressions should be initialized.");
832      return 0;
833    case APValue::LValue: {
834      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
835      llvm::Constant *Offset =
836        llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
837                               Result.Val.getLValueOffset().getQuantity());
838
839      llvm::Constant *C;
840      if (const Expr *LVBase = Result.Val.getLValueBase()) {
841        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
842
843        // Apply offset if necessary.
844        if (!Offset->isNullValue()) {
845          const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
846          llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
847          Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
848          C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
849        }
850
851        // Convert to the appropriate type; this could be an lvalue for
852        // an integer.
853        if (isa<llvm::PointerType>(DestTy))
854          return llvm::ConstantExpr::getBitCast(C, DestTy);
855
856        return llvm::ConstantExpr::getPtrToInt(C, DestTy);
857      } else {
858        C = Offset;
859
860        // Convert to the appropriate type; this could be an lvalue for
861        // an integer.
862        if (isa<llvm::PointerType>(DestTy))
863          return llvm::ConstantExpr::getIntToPtr(C, DestTy);
864
865        // If the types don't match this should only be a truncate.
866        if (C->getType() != DestTy)
867          return llvm::ConstantExpr::getTrunc(C, DestTy);
868
869        return C;
870      }
871    }
872    case APValue::Int: {
873      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
874                                                 Result.Val.getInt());
875
876      if (C->getType()->isIntegerTy(1)) {
877        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
878        C = llvm::ConstantExpr::getZExt(C, BoolTy);
879      }
880      return C;
881    }
882    case APValue::ComplexInt: {
883      llvm::Constant *Complex[2];
884
885      Complex[0] = llvm::ConstantInt::get(VMContext,
886                                          Result.Val.getComplexIntReal());
887      Complex[1] = llvm::ConstantInt::get(VMContext,
888                                          Result.Val.getComplexIntImag());
889
890      // FIXME: the target may want to specify that this is packed.
891      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
892    }
893    case APValue::Float:
894      return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
895    case APValue::ComplexFloat: {
896      llvm::Constant *Complex[2];
897
898      Complex[0] = llvm::ConstantFP::get(VMContext,
899                                         Result.Val.getComplexFloatReal());
900      Complex[1] = llvm::ConstantFP::get(VMContext,
901                                         Result.Val.getComplexFloatImag());
902
903      // FIXME: the target may want to specify that this is packed.
904      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
905    }
906    case APValue::Vector: {
907      llvm::SmallVector<llvm::Constant *, 4> Inits;
908      unsigned NumElts = Result.Val.getVectorLength();
909
910      for (unsigned i = 0; i != NumElts; ++i) {
911        APValue &Elt = Result.Val.getVectorElt(i);
912        if (Elt.isInt())
913          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
914        else
915          Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
916      }
917      return llvm::ConstantVector::get(Inits);
918    }
919    }
920  }
921
922  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
923  if (C && C->getType()->isIntegerTy(1)) {
924    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
925    C = llvm::ConstantExpr::getZExt(C, BoolTy);
926  }
927  return C;
928}
929
930static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
931  const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
932  return layout.getFieldOffset(field->getFieldIndex());
933}
934
935llvm::Constant *
936CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
937  // Member pointer constants always have a very particular form.
938  const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
939  const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
940
941  // A member function pointer.
942  if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
943    return getCXXABI().EmitMemberPointer(method);
944
945  // Otherwise, a member data pointer.
946  uint64_t fieldOffset;
947  if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
948    fieldOffset = getFieldOffset(getContext(), field);
949  else {
950    const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
951
952    fieldOffset = 0;
953    for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
954           ce = ifield->chain_end(); ci != ce; ++ci)
955      fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
956  }
957
958  CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
959  return getCXXABI().EmitMemberDataPointer(type, chars);
960}
961
962static void
963FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
964                             std::vector<llvm::Constant *> &Elements,
965                             uint64_t StartOffset) {
966  assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
967
968  if (CGM.getTypes().isZeroInitializable(T))
969    return;
970
971  if (const ConstantArrayType *CAT =
972        CGM.getContext().getAsConstantArrayType(T)) {
973    QualType ElementTy = CAT->getElementType();
974    uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
975
976    for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
977      FillInNullDataMemberPointers(CGM, ElementTy, Elements,
978                                   StartOffset + I * ElementSize);
979    }
980  } else if (const RecordType *RT = T->getAs<RecordType>()) {
981    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
982    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
983
984    // Go through all bases and fill in any null pointer to data members.
985    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
986         E = RD->bases_end(); I != E; ++I) {
987      if (I->isVirtual()) {
988        // Ignore virtual bases.
989        continue;
990      }
991
992      const CXXRecordDecl *BaseDecl =
993      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
994
995      // Ignore empty bases.
996      if (BaseDecl->isEmpty())
997        continue;
998
999      // Ignore bases that don't have any pointer to data members.
1000      if (CGM.getTypes().isZeroInitializable(BaseDecl))
1001        continue;
1002
1003      uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
1004      FillInNullDataMemberPointers(CGM, I->getType(),
1005                                   Elements, StartOffset + BaseOffset);
1006    }
1007
1008    // Visit all fields.
1009    unsigned FieldNo = 0;
1010    for (RecordDecl::field_iterator I = RD->field_begin(),
1011         E = RD->field_end(); I != E; ++I, ++FieldNo) {
1012      QualType FieldType = I->getType();
1013
1014      if (CGM.getTypes().isZeroInitializable(FieldType))
1015        continue;
1016
1017      uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1018      FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1019    }
1020  } else {
1021    assert(T->isMemberPointerType() && "Should only see member pointers here!");
1022    assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1023           "Should only see pointers to data members here!");
1024
1025    uint64_t StartIndex = StartOffset / 8;
1026    uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
1027
1028    // FIXME: hardcodes Itanium member pointer representation!
1029    llvm::Constant *NegativeOne =
1030      llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
1031                             -1ULL, /*isSigned*/true);
1032
1033    // Fill in the null data member pointer.
1034    for (uint64_t I = StartIndex; I != EndIndex; ++I)
1035      Elements[I] = NegativeOne;
1036  }
1037}
1038
1039static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1040                                               const llvm::Type *baseType,
1041                                               const CXXRecordDecl *base);
1042
1043static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1044                                        const CXXRecordDecl *record,
1045                                        bool asCompleteObject) {
1046  const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1047  const llvm::StructType *structure =
1048    (asCompleteObject ? layout.getLLVMType()
1049                      : layout.getBaseSubobjectLLVMType());
1050
1051  unsigned numElements = structure->getNumElements();
1052  std::vector<llvm::Constant *> elements(numElements);
1053
1054  // Fill in all the bases.
1055  for (CXXRecordDecl::base_class_const_iterator
1056         I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
1057    if (I->isVirtual()) {
1058      // Ignore virtual bases; if we're laying out for a complete
1059      // object, we'll lay these out later.
1060      continue;
1061    }
1062
1063    const CXXRecordDecl *base =
1064      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1065
1066    // Ignore empty bases.
1067    if (base->isEmpty())
1068      continue;
1069
1070    unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1071    const llvm::Type *baseType = structure->getElementType(fieldIndex);
1072    elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1073  }
1074
1075  // Fill in all the fields.
1076  for (RecordDecl::field_iterator I = record->field_begin(),
1077         E = record->field_end(); I != E; ++I) {
1078    const FieldDecl *field = *I;
1079
1080    // Ignore bit fields.
1081    if (field->isBitField())
1082      continue;
1083
1084    unsigned fieldIndex = layout.getLLVMFieldNo(field);
1085    elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
1086  }
1087
1088  // Fill in the virtual bases, if we're working with the complete object.
1089  if (asCompleteObject) {
1090    for (CXXRecordDecl::base_class_const_iterator
1091           I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
1092      const CXXRecordDecl *base =
1093        cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1094
1095      // Ignore empty bases.
1096      if (base->isEmpty())
1097        continue;
1098
1099      unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1100
1101      // We might have already laid this field out.
1102      if (elements[fieldIndex]) continue;
1103
1104      const llvm::Type *baseType = structure->getElementType(fieldIndex);
1105      elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1106    }
1107  }
1108
1109  // Now go through all other fields and zero them out.
1110  for (unsigned i = 0; i != numElements; ++i) {
1111    if (!elements[i])
1112      elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1113  }
1114
1115  return llvm::ConstantStruct::get(structure, elements);
1116}
1117
1118/// Emit the null constant for a base subobject.
1119static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1120                                               const llvm::Type *baseType,
1121                                               const CXXRecordDecl *base) {
1122  const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1123
1124  // Just zero out bases that don't have any pointer to data members.
1125  if (baseLayout.isZeroInitializableAsBase())
1126    return llvm::Constant::getNullValue(baseType);
1127
1128  // If the base type is a struct, we can just use its null constant.
1129  if (isa<llvm::StructType>(baseType)) {
1130    return EmitNullConstant(CGM, base, /*complete*/ false);
1131  }
1132
1133  // Otherwise, some bases are represented as arrays of i8 if the size
1134  // of the base is smaller than its corresponding LLVM type.  Figure
1135  // out how many elements this base array has.
1136  const llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
1137  unsigned numBaseElements = baseArrayType->getNumElements();
1138
1139  // Fill in null data member pointers.
1140  std::vector<llvm::Constant *> baseElements(numBaseElements);
1141  FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
1142                               baseElements, 0);
1143
1144  // Now go through all other elements and zero them out.
1145  if (numBaseElements) {
1146    const llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
1147    llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
1148    for (unsigned i = 0; i != numBaseElements; ++i) {
1149      if (!baseElements[i])
1150        baseElements[i] = i8_zero;
1151    }
1152  }
1153
1154  return llvm::ConstantArray::get(baseArrayType, baseElements);
1155}
1156
1157llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1158  if (getTypes().isZeroInitializable(T))
1159    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1160
1161  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1162
1163    QualType ElementTy = CAT->getElementType();
1164
1165    llvm::Constant *Element = EmitNullConstant(ElementTy);
1166    unsigned NumElements = CAT->getSize().getZExtValue();
1167    std::vector<llvm::Constant *> Array(NumElements);
1168    for (unsigned i = 0; i != NumElements; ++i)
1169      Array[i] = Element;
1170
1171    const llvm::ArrayType *ATy =
1172      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1173    return llvm::ConstantArray::get(ATy, Array);
1174  }
1175
1176  if (const RecordType *RT = T->getAs<RecordType>()) {
1177    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1178    return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1179  }
1180
1181  assert(T->isMemberPointerType() && "Should only see member pointers here!");
1182  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1183         "Should only see pointers to data members here!");
1184
1185  // Itanium C++ ABI 2.3:
1186  //   A NULL pointer is represented as -1.
1187  return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1188}
1189