CGExprConstant.cpp revision 2c12d0319a267b844cb7d569d84426cd344b90f7
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGObjCRuntime.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/RecordLayout.h"
20#include "clang/AST/StmtVisitor.h"
21#include "clang/Basic/Builtins.h"
22#include "llvm/Constants.h"
23#include "llvm/Function.h"
24#include "llvm/GlobalVariable.h"
25#include "llvm/Target/TargetData.h"
26using namespace clang;
27using namespace CodeGen;
28
29namespace  {
30class ConstStructBuilder {
31  CodeGenModule &CGM;
32  CodeGenFunction *CGF;
33
34  bool Packed;
35
36  unsigned NextFieldOffsetInBytes;
37
38  unsigned LLVMStructAlignment;
39
40  std::vector<llvm::Constant *> Elements;
41
42  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
43    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
44    LLVMStructAlignment(1) { }
45
46  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
47                   const Expr *InitExpr) {
48    uint64_t FieldOffsetInBytes = FieldOffset / 8;
49
50    assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
51           && "Field offset mismatch!");
52
53    // Emit the field.
54    llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
55    if (!C)
56      return false;
57
58    unsigned FieldAlignment = getAlignment(C);
59
60    // Round up the field offset to the alignment of the field type.
61    uint64_t AlignedNextFieldOffsetInBytes =
62      llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
63
64    if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
65      assert(!Packed && "Alignment is wrong even with a packed struct!");
66
67      // Convert the struct to a packed struct.
68      ConvertStructToPacked();
69
70      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
71    }
72
73    if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
74      // We need to append padding.
75      AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
76
77      assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
78             "Did not add enough padding!");
79
80      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
81    }
82
83    // Add the field.
84    Elements.push_back(C);
85    NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
86
87    if (Packed)
88      assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
89    else
90      LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
91
92    return true;
93  }
94
95  bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
96                      const Expr *InitExpr) {
97    llvm::ConstantInt *CI =
98      cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
99                                                           Field->getType(),
100                                                           CGF));
101    // FIXME: Can this ever happen?
102    if (!CI)
103      return false;
104
105    if (FieldOffset > NextFieldOffsetInBytes * 8) {
106      // We need to add padding.
107      uint64_t NumBytes =
108        llvm::RoundUpToAlignment(FieldOffset -
109                                 NextFieldOffsetInBytes * 8, 8) / 8;
110
111      AppendPadding(NumBytes);
112    }
113
114    uint64_t FieldSize =
115      Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
116
117    llvm::APInt FieldValue = CI->getValue();
118
119    // Promote the size of FieldValue if necessary
120    // FIXME: This should never occur, but currently it can because initializer
121    // constants are cast to bool, and because clang is not enforcing bitfield
122    // width limits.
123    if (FieldSize > FieldValue.getBitWidth())
124      FieldValue.zext(FieldSize);
125
126    // Truncate the size of FieldValue to the bit field size.
127    if (FieldSize < FieldValue.getBitWidth())
128      FieldValue.trunc(FieldSize);
129
130    if (FieldOffset < NextFieldOffsetInBytes * 8) {
131      // Either part of the field or the entire field can go into the previous
132      // byte.
133      assert(!Elements.empty() && "Elements can't be empty!");
134
135      unsigned BitsInPreviousByte =
136        NextFieldOffsetInBytes * 8 - FieldOffset;
137
138      bool FitsCompletelyInPreviousByte =
139        BitsInPreviousByte >= FieldValue.getBitWidth();
140
141      llvm::APInt Tmp = FieldValue;
142
143      if (!FitsCompletelyInPreviousByte) {
144        unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
145
146        if (CGM.getTargetData().isBigEndian()) {
147          Tmp = Tmp.lshr(NewFieldWidth);
148          Tmp.trunc(BitsInPreviousByte);
149
150          // We want the remaining high bits.
151          FieldValue.trunc(NewFieldWidth);
152        } else {
153          Tmp.trunc(BitsInPreviousByte);
154
155          // We want the remaining low bits.
156          FieldValue = FieldValue.lshr(BitsInPreviousByte);
157          FieldValue.trunc(NewFieldWidth);
158        }
159      }
160
161      Tmp.zext(8);
162      if (CGM.getTargetData().isBigEndian()) {
163        if (FitsCompletelyInPreviousByte)
164          Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
165      } else {
166        Tmp = Tmp.shl(8 - BitsInPreviousByte);
167      }
168
169      // Or in the bits that go into the previous byte.
170      if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
171        Tmp |= Val->getValue();
172      else
173        assert(isa<llvm::UndefValue>(Elements.back()));
174
175      Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
176
177      if (FitsCompletelyInPreviousByte)
178        return true;
179    }
180
181    while (FieldValue.getBitWidth() > 8) {
182      llvm::APInt Tmp;
183
184      if (CGM.getTargetData().isBigEndian()) {
185        // We want the high bits.
186        Tmp = FieldValue;
187        Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
188        Tmp.trunc(8);
189      } else {
190        // We want the low bits.
191        Tmp = FieldValue;
192        Tmp.trunc(8);
193
194        FieldValue = FieldValue.lshr(8);
195      }
196
197      Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
198      NextFieldOffsetInBytes++;
199
200      FieldValue.trunc(FieldValue.getBitWidth() - 8);
201    }
202
203    assert(FieldValue.getBitWidth() > 0 &&
204           "Should have at least one bit left!");
205    assert(FieldValue.getBitWidth() <= 8 &&
206           "Should not have more than a byte left!");
207
208    if (FieldValue.getBitWidth() < 8) {
209      if (CGM.getTargetData().isBigEndian()) {
210        unsigned BitWidth = FieldValue.getBitWidth();
211
212        FieldValue.zext(8);
213        FieldValue = FieldValue << (8 - BitWidth);
214      } else
215        FieldValue.zext(8);
216    }
217
218    // Append the last element.
219    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
220                                              FieldValue));
221    NextFieldOffsetInBytes++;
222    return true;
223  }
224
225  void AppendPadding(uint64_t NumBytes) {
226    if (!NumBytes)
227      return;
228
229    const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
230    if (NumBytes > 1)
231      Ty = llvm::ArrayType::get(Ty, NumBytes);
232
233    llvm::Constant *C = llvm::UndefValue::get(Ty);
234    Elements.push_back(C);
235    assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
236
237    NextFieldOffsetInBytes += getSizeInBytes(C);
238  }
239
240  void AppendTailPadding(uint64_t RecordSize) {
241    assert(RecordSize % 8 == 0 && "Invalid record size!");
242
243    uint64_t RecordSizeInBytes = RecordSize / 8;
244    assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
245
246    unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
247    AppendPadding(NumPadBytes);
248  }
249
250  void ConvertStructToPacked() {
251    std::vector<llvm::Constant *> PackedElements;
252    uint64_t ElementOffsetInBytes = 0;
253
254    for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
255      llvm::Constant *C = Elements[i];
256
257      unsigned ElementAlign =
258        CGM.getTargetData().getABITypeAlignment(C->getType());
259      uint64_t AlignedElementOffsetInBytes =
260        llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
261
262      if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
263        // We need some padding.
264        uint64_t NumBytes =
265          AlignedElementOffsetInBytes - ElementOffsetInBytes;
266
267        const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
268        if (NumBytes > 1)
269          Ty = llvm::ArrayType::get(Ty, NumBytes);
270
271        llvm::Constant *Padding = llvm::UndefValue::get(Ty);
272        PackedElements.push_back(Padding);
273        ElementOffsetInBytes += getSizeInBytes(Padding);
274      }
275
276      PackedElements.push_back(C);
277      ElementOffsetInBytes += getSizeInBytes(C);
278    }
279
280    assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
281           "Packing the struct changed its size!");
282
283    Elements = PackedElements;
284    LLVMStructAlignment = 1;
285    Packed = true;
286  }
287
288  bool Build(InitListExpr *ILE) {
289    RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
290    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
291
292    unsigned FieldNo = 0;
293    unsigned ElementNo = 0;
294    for (RecordDecl::field_iterator Field = RD->field_begin(),
295         FieldEnd = RD->field_end();
296         ElementNo < ILE->getNumInits() && Field != FieldEnd;
297         ++Field, ++FieldNo) {
298      if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
299        continue;
300
301      if (Field->isBitField()) {
302        if (!Field->getIdentifier())
303          continue;
304
305        if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
306                            ILE->getInit(ElementNo)))
307          return false;
308      } else {
309        if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
310                         ILE->getInit(ElementNo)))
311          return false;
312      }
313
314      ElementNo++;
315    }
316
317    uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
318
319    if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
320      // If the struct is bigger than the size of the record type,
321      // we must have a flexible array member at the end.
322      assert(RD->hasFlexibleArrayMember() &&
323             "Must have flexible array member if struct is bigger than type!");
324
325      // No tail padding is necessary.
326      return true;
327    }
328
329    uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
330                                                        LLVMStructAlignment);
331
332    // Check if we need to convert the struct to a packed struct.
333    if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
334        LLVMSizeInBytes > LayoutSizeInBytes) {
335      assert(!Packed && "Size mismatch!");
336
337      ConvertStructToPacked();
338      assert(NextFieldOffsetInBytes == LayoutSizeInBytes &&
339             "Converting to packed did not help!");
340    }
341
342    // Append tail padding if necessary.
343    AppendTailPadding(Layout.getSize());
344
345    assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
346           "Tail padding mismatch!");
347
348    return true;
349  }
350
351  unsigned getAlignment(const llvm::Constant *C) const {
352    if (Packed)
353      return 1;
354
355    return CGM.getTargetData().getABITypeAlignment(C->getType());
356  }
357
358  uint64_t getSizeInBytes(const llvm::Constant *C) const {
359    return CGM.getTargetData().getTypeAllocSize(C->getType());
360  }
361
362public:
363  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
364                                     InitListExpr *ILE) {
365    ConstStructBuilder Builder(CGM, CGF);
366
367    if (!Builder.Build(ILE))
368      return 0;
369
370    llvm::Constant *Result =
371      llvm::ConstantStruct::get(CGM.getLLVMContext(),
372                                Builder.Elements, Builder.Packed);
373
374    assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
375                                    Builder.getAlignment(Result)) ==
376           Builder.getSizeInBytes(Result) && "Size mismatch!");
377
378    return Result;
379  }
380};
381
382class ConstExprEmitter :
383  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
384  CodeGenModule &CGM;
385  CodeGenFunction *CGF;
386  llvm::LLVMContext &VMContext;
387public:
388  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
389    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
390  }
391
392  //===--------------------------------------------------------------------===//
393  //                            Visitor Methods
394  //===--------------------------------------------------------------------===//
395
396  llvm::Constant *VisitStmt(Stmt *S) {
397    return 0;
398  }
399
400  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
401    return Visit(PE->getSubExpr());
402  }
403
404  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
405    return Visit(E->getInitializer());
406  }
407
408  llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
409    assert(MD->isInstance() && "Member function must not be static!");
410
411    MD = MD->getCanonicalDecl();
412
413    const llvm::Type *PtrDiffTy =
414      CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
415
416    llvm::Constant *Values[2];
417
418    // Get the function pointer (or index if this is a virtual function).
419    if (MD->isVirtual()) {
420      uint64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD);
421
422      // The pointer is 1 + the virtual table offset in bytes.
423      Values[0] = llvm::ConstantInt::get(PtrDiffTy, (Index * 8) + 1);
424    } else {
425      llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD);
426
427      Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
428    }
429
430    // The adjustment will always be 0.
431    Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
432
433    return llvm::ConstantStruct::get(CGM.getLLVMContext(),
434                                     Values, 2, /*Packed=*/false);
435  }
436
437  llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
438    if (const MemberPointerType *MPT =
439        E->getType()->getAs<MemberPointerType>()) {
440      QualType T = MPT->getPointeeType();
441      DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
442
443      NamedDecl *ND = DRE->getDecl();
444      if (T->isFunctionProtoType())
445        return EmitMemberFunctionPointer(cast<CXXMethodDecl>(ND));
446
447      // We have a pointer to data member.
448      return CGM.EmitPointerToDataMember(cast<FieldDecl>(ND));
449    }
450
451    return 0;
452  }
453
454  llvm::Constant *VisitBinSub(BinaryOperator *E) {
455    // This must be a pointer/pointer subtraction.  This only happens for
456    // address of label.
457    if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
458       !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
459      return 0;
460
461    llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
462                                               E->getLHS()->getType(), CGF);
463    llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
464                                               E->getRHS()->getType(), CGF);
465
466    const llvm::Type *ResultType = ConvertType(E->getType());
467    LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
468    RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
469
470    // No need to divide by element size, since addr of label is always void*,
471    // which has size 1 in GNUish.
472    return llvm::ConstantExpr::getSub(LHS, RHS);
473  }
474
475  llvm::Constant *VisitCastExpr(CastExpr* E) {
476    switch (E->getCastKind()) {
477    case CastExpr::CK_ToUnion: {
478      // GCC cast to union extension
479      assert(E->getType()->isUnionType() &&
480             "Destination type is not union type!");
481      const llvm::Type *Ty = ConvertType(E->getType());
482      Expr *SubExpr = E->getSubExpr();
483
484      llvm::Constant *C =
485        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
486      if (!C)
487        return 0;
488
489      // Build a struct with the union sub-element as the first member,
490      // and padded to the appropriate size
491      std::vector<llvm::Constant*> Elts;
492      std::vector<const llvm::Type*> Types;
493      Elts.push_back(C);
494      Types.push_back(C->getType());
495      unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
496      unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
497
498      assert(CurSize <= TotalSize && "Union size mismatch!");
499      if (unsigned NumPadBytes = TotalSize - CurSize) {
500        const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
501        if (NumPadBytes > 1)
502          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
503
504        Elts.push_back(llvm::UndefValue::get(Ty));
505        Types.push_back(Ty);
506      }
507
508      llvm::StructType* STy =
509        llvm::StructType::get(C->getType()->getContext(), Types, false);
510      return llvm::ConstantStruct::get(STy, Elts);
511    }
512    case CastExpr::CK_NullToMemberPointer:
513      return CGM.EmitNullConstant(E->getType());
514
515    case CastExpr::CK_BaseToDerivedMemberPointer: {
516      Expr *SubExpr = E->getSubExpr();
517
518      const MemberPointerType *SrcTy =
519        SubExpr->getType()->getAs<MemberPointerType>();
520      const MemberPointerType *DestTy =
521        E->getType()->getAs<MemberPointerType>();
522
523      const CXXRecordDecl *BaseClass =
524        cast<CXXRecordDecl>(cast<RecordType>(SrcTy->getClass())->getDecl());
525      const CXXRecordDecl *DerivedClass =
526        cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
527
528      if (SrcTy->getPointeeType()->isFunctionProtoType()) {
529        llvm::Constant *C =
530          CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
531        if (!C)
532          return 0;
533
534        llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
535
536        // Check if we need to update the adjustment.
537        if (llvm::Constant *Offset =
538              CGM.GetNonVirtualBaseClassOffset(DerivedClass, BaseClass)) {
539          llvm::Constant *Values[2];
540
541          Values[0] = CS->getOperand(0);
542          Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
543          return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
544                                           /*Packed=*/false);
545        }
546
547        return CS;
548      }
549    }
550
551    case CastExpr::CK_BitCast:
552      // This must be a member function pointer cast.
553      return Visit(E->getSubExpr());
554
555    default: {
556      // FIXME: This should be handled by the CK_NoOp cast kind.
557      // Explicit and implicit no-op casts
558      QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
559      if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
560        return Visit(E->getSubExpr());
561
562      // Handle integer->integer casts for address-of-label differences.
563      if (Ty->isIntegerType() && SubTy->isIntegerType() &&
564          CGF) {
565        llvm::Value *Src = Visit(E->getSubExpr());
566        if (Src == 0) return 0;
567
568        // Use EmitScalarConversion to perform the conversion.
569        return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
570      }
571
572      return 0;
573    }
574    }
575  }
576
577  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
578    return Visit(DAE->getExpr());
579  }
580
581  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
582    std::vector<llvm::Constant*> Elts;
583    const llvm::ArrayType *AType =
584        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
585    unsigned NumInitElements = ILE->getNumInits();
586    // FIXME: Check for wide strings
587    // FIXME: Check for NumInitElements exactly equal to 1??
588    if (NumInitElements > 0 &&
589        (isa<StringLiteral>(ILE->getInit(0)) ||
590         isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
591        ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
592      return Visit(ILE->getInit(0));
593    const llvm::Type *ElemTy = AType->getElementType();
594    unsigned NumElements = AType->getNumElements();
595
596    // Initialising an array requires us to automatically
597    // initialise any elements that have not been initialised explicitly
598    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
599
600    // Copy initializer elements.
601    unsigned i = 0;
602    bool RewriteType = false;
603    for (; i < NumInitableElts; ++i) {
604      Expr *Init = ILE->getInit(i);
605      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
606      if (!C)
607        return 0;
608      RewriteType |= (C->getType() != ElemTy);
609      Elts.push_back(C);
610    }
611
612    // Initialize remaining array elements.
613    // FIXME: This doesn't handle member pointers correctly!
614    for (; i < NumElements; ++i)
615      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
616
617    if (RewriteType) {
618      // FIXME: Try to avoid packing the array
619      std::vector<const llvm::Type*> Types;
620      for (unsigned i = 0; i < Elts.size(); ++i)
621        Types.push_back(Elts[i]->getType());
622      const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
623                                                            Types, true);
624      return llvm::ConstantStruct::get(SType, Elts);
625    }
626
627    return llvm::ConstantArray::get(AType, Elts);
628  }
629
630  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
631    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
632  }
633
634  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
635    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
636  }
637
638  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
639    return CGM.EmitNullConstant(E->getType());
640  }
641
642  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
643    if (ILE->getType()->isScalarType()) {
644      // We have a scalar in braces. Just use the first element.
645      if (ILE->getNumInits() > 0) {
646        Expr *Init = ILE->getInit(0);
647        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
648      }
649      return CGM.EmitNullConstant(ILE->getType());
650    }
651
652    if (ILE->getType()->isArrayType())
653      return EmitArrayInitialization(ILE);
654
655    if (ILE->getType()->isRecordType())
656      return EmitStructInitialization(ILE);
657
658    if (ILE->getType()->isUnionType())
659      return EmitUnionInitialization(ILE);
660
661    // If ILE was a constant vector, we would have handled it already.
662    if (ILE->getType()->isVectorType())
663      return 0;
664
665    assert(0 && "Unable to handle InitListExpr");
666    // Get rid of control reaches end of void function warning.
667    // Not reached.
668    return 0;
669  }
670
671  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
672    assert(!E->getType()->isPointerType() && "Strings are always arrays");
673
674    // This must be a string initializing an array in a static initializer.
675    // Don't emit it as the address of the string, emit the string data itself
676    // as an inline array.
677    return llvm::ConstantArray::get(VMContext,
678                                    CGM.GetStringForStringLiteral(E), false);
679  }
680
681  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
682    // This must be an @encode initializing an array in a static initializer.
683    // Don't emit it as the address of the string, emit the string data itself
684    // as an inline array.
685    std::string Str;
686    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
687    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
688
689    // Resize the string to the right size, adding zeros at the end, or
690    // truncating as needed.
691    Str.resize(CAT->getSize().getZExtValue(), '\0');
692    return llvm::ConstantArray::get(VMContext, Str, false);
693  }
694
695  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
696    return Visit(E->getSubExpr());
697  }
698
699  // Utility methods
700  const llvm::Type *ConvertType(QualType T) {
701    return CGM.getTypes().ConvertType(T);
702  }
703
704public:
705  llvm::Constant *EmitLValue(Expr *E) {
706    switch (E->getStmtClass()) {
707    default: break;
708    case Expr::CompoundLiteralExprClass: {
709      // Note that due to the nature of compound literals, this is guaranteed
710      // to be the only use of the variable, so we just generate it here.
711      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
712      llvm::Constant* C = Visit(CLE->getInitializer());
713      // FIXME: "Leaked" on failure.
714      if (C)
715        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
716                                     E->getType().isConstant(CGM.getContext()),
717                                     llvm::GlobalValue::InternalLinkage,
718                                     C, ".compoundliteral", 0, false,
719                                     E->getType().getAddressSpace());
720      return C;
721    }
722    case Expr::DeclRefExprClass: {
723      NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
724      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
725        return CGM.GetAddrOfFunction(FD);
726      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
727        // We can never refer to a variable with local storage.
728        if (!VD->hasLocalStorage()) {
729          if (VD->isFileVarDecl() || VD->hasExternalStorage())
730            return CGM.GetAddrOfGlobalVar(VD);
731          else if (VD->isBlockVarDecl()) {
732            assert(CGF && "Can't access static local vars without CGF");
733            return CGF->GetAddrOfStaticLocalVar(VD);
734          }
735        }
736      }
737      break;
738    }
739    case Expr::StringLiteralClass:
740      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
741    case Expr::ObjCEncodeExprClass:
742      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
743    case Expr::ObjCStringLiteralClass: {
744      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
745      llvm::Constant *C =
746          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
747      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
748    }
749    case Expr::PredefinedExprClass: {
750      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
751      if (CGF) {
752        LValue Res = CGF->EmitPredefinedFunctionName(Type);
753        return cast<llvm::Constant>(Res.getAddress());
754      } else if (Type == PredefinedExpr::PrettyFunction) {
755        return CGM.GetAddrOfConstantCString("top level", ".tmp");
756      }
757
758      return CGM.GetAddrOfConstantCString("", ".tmp");
759    }
760    case Expr::AddrLabelExprClass: {
761      assert(CGF && "Invalid address of label expression outside function.");
762      llvm::Constant *Ptr =
763        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
764      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
765    }
766    case Expr::CallExprClass: {
767      CallExpr* CE = cast<CallExpr>(E);
768      unsigned builtin = CE->isBuiltinCall(CGM.getContext());
769      if (builtin !=
770            Builtin::BI__builtin___CFStringMakeConstantString &&
771          builtin !=
772            Builtin::BI__builtin___NSStringMakeConstantString)
773        break;
774      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
775      const StringLiteral *Literal = cast<StringLiteral>(Arg);
776      if (builtin ==
777            Builtin::BI__builtin___NSStringMakeConstantString) {
778        return CGM.getObjCRuntime().GenerateConstantString(Literal);
779      }
780      // FIXME: need to deal with UCN conversion issues.
781      return CGM.GetAddrOfConstantCFString(Literal);
782    }
783    case Expr::BlockExprClass: {
784      std::string FunctionName;
785      if (CGF)
786        FunctionName = CGF->CurFn->getName();
787      else
788        FunctionName = "global";
789
790      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
791    }
792    }
793
794    return 0;
795  }
796};
797
798}  // end anonymous namespace.
799
800llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
801                                                QualType DestType,
802                                                CodeGenFunction *CGF) {
803  Expr::EvalResult Result;
804
805  bool Success = false;
806
807  if (DestType->isReferenceType())
808    Success = E->EvaluateAsLValue(Result, Context);
809  else
810    Success = E->Evaluate(Result, Context);
811
812  if (Success && !Result.HasSideEffects) {
813    switch (Result.Val.getKind()) {
814    case APValue::Uninitialized:
815      assert(0 && "Constant expressions should be initialized.");
816      return 0;
817    case APValue::LValue: {
818      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
819      llvm::Constant *Offset =
820        llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
821                               Result.Val.getLValueOffset().getQuantity());
822
823      llvm::Constant *C;
824      if (const Expr *LVBase = Result.Val.getLValueBase()) {
825        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
826
827        // Apply offset if necessary.
828        if (!Offset->isNullValue()) {
829          const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
830          llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
831          Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
832          C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
833        }
834
835        // Convert to the appropriate type; this could be an lvalue for
836        // an integer.
837        if (isa<llvm::PointerType>(DestTy))
838          return llvm::ConstantExpr::getBitCast(C, DestTy);
839
840        return llvm::ConstantExpr::getPtrToInt(C, DestTy);
841      } else {
842        C = Offset;
843
844        // Convert to the appropriate type; this could be an lvalue for
845        // an integer.
846        if (isa<llvm::PointerType>(DestTy))
847          return llvm::ConstantExpr::getIntToPtr(C, DestTy);
848
849        // If the types don't match this should only be a truncate.
850        if (C->getType() != DestTy)
851          return llvm::ConstantExpr::getTrunc(C, DestTy);
852
853        return C;
854      }
855    }
856    case APValue::Int: {
857      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
858                                                 Result.Val.getInt());
859
860      if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
861        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
862        C = llvm::ConstantExpr::getZExt(C, BoolTy);
863      }
864      return C;
865    }
866    case APValue::ComplexInt: {
867      llvm::Constant *Complex[2];
868
869      Complex[0] = llvm::ConstantInt::get(VMContext,
870                                          Result.Val.getComplexIntReal());
871      Complex[1] = llvm::ConstantInt::get(VMContext,
872                                          Result.Val.getComplexIntImag());
873
874      // FIXME: the target may want to specify that this is packed.
875      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
876    }
877    case APValue::Float:
878      return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
879    case APValue::ComplexFloat: {
880      llvm::Constant *Complex[2];
881
882      Complex[0] = llvm::ConstantFP::get(VMContext,
883                                         Result.Val.getComplexFloatReal());
884      Complex[1] = llvm::ConstantFP::get(VMContext,
885                                         Result.Val.getComplexFloatImag());
886
887      // FIXME: the target may want to specify that this is packed.
888      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
889    }
890    case APValue::Vector: {
891      llvm::SmallVector<llvm::Constant *, 4> Inits;
892      unsigned NumElts = Result.Val.getVectorLength();
893
894      for (unsigned i = 0; i != NumElts; ++i) {
895        APValue &Elt = Result.Val.getVectorElt(i);
896        if (Elt.isInt())
897          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
898        else
899          Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
900      }
901      return llvm::ConstantVector::get(&Inits[0], Inits.size());
902    }
903    }
904  }
905
906  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
907  if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
908    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
909    C = llvm::ConstantExpr::getZExt(C, BoolTy);
910  }
911  return C;
912}
913
914static bool containsPointerToDataMember(CodeGenTypes &Types, QualType T) {
915  // No need to check for member pointers when not compiling C++.
916  if (!Types.getContext().getLangOptions().CPlusPlus)
917    return false;
918
919  T = Types.getContext().getBaseElementType(T);
920
921  if (const RecordType *RT = T->getAs<RecordType>()) {
922    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
923
924    // FIXME: It would be better if there was a way to explicitly compute the
925    // record layout instead of converting to a type.
926    Types.ConvertTagDeclType(RD);
927
928    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
929    return Layout.containsPointerToDataMember();
930  }
931
932  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
933    return !MPT->getPointeeType()->isFunctionType();
934
935  return false;
936}
937
938llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
939  if (!containsPointerToDataMember(getTypes(), T))
940    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
941
942  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
943
944    QualType ElementTy = CAT->getElementType();
945
946    llvm::Constant *Element = EmitNullConstant(ElementTy);
947    unsigned NumElements = CAT->getSize().getZExtValue();
948    std::vector<llvm::Constant *> Array(NumElements);
949    for (unsigned i = 0; i != NumElements; ++i)
950      Array[i] = Element;
951
952    const llvm::ArrayType *ATy =
953      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
954    return llvm::ConstantArray::get(ATy, Array);
955  }
956
957  if (const RecordType *RT = T->getAs<RecordType>()) {
958    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
959    assert(!RD->getNumBases() &&
960           "FIXME: Handle zero-initializing structs with bases and "
961           "pointers to data members.");
962    const llvm::StructType *STy =
963      cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
964    unsigned NumElements = STy->getNumElements();
965    std::vector<llvm::Constant *> Elements(NumElements);
966
967    for (RecordDecl::field_iterator I = RD->field_begin(),
968         E = RD->field_end(); I != E; ++I) {
969      const FieldDecl *FD = *I;
970
971      unsigned FieldNo = getTypes().getLLVMFieldNo(FD);
972      Elements[FieldNo] = EmitNullConstant(FD->getType());
973    }
974
975    // Now go through all other fields and zero them out.
976    for (unsigned i = 0; i != NumElements; ++i) {
977      if (!Elements[i])
978        Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
979    }
980
981    return llvm::ConstantStruct::get(STy, Elements);
982  }
983
984  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
985         "Should only see pointers to data members here!");
986
987  // Itanium C++ ABI 2.3:
988  //   A NULL pointer is represented as -1.
989  return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1,
990                                /*isSigned=*/true);
991}
992
993llvm::Constant *
994CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
995
996  // Itanium C++ ABI 2.3:
997  //   A pointer to data member is an offset from the base address of the class
998  //   object containing it, represented as a ptrdiff_t
999
1000  const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(FD->getParent());
1001  QualType ClassType =
1002    getContext().getTypeDeclType(const_cast<CXXRecordDecl *>(ClassDecl));
1003
1004  const llvm::StructType *ClassLTy =
1005    cast<llvm::StructType>(getTypes().ConvertType(ClassType));
1006
1007  unsigned FieldNo = getTypes().getLLVMFieldNo(FD);
1008  uint64_t Offset =
1009    getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
1010
1011  const llvm::Type *PtrDiffTy =
1012    getTypes().ConvertType(getContext().getPointerDiffType());
1013
1014  return llvm::ConstantInt::get(PtrDiffTy, Offset);
1015}
1016