CGExprConstant.cpp revision 4a28d5deeba33722aa009eab488591fb9055cc7e
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGObjCRuntime.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/RecordLayout.h"
20#include "clang/AST/StmtVisitor.h"
21#include "clang/Basic/Builtins.h"
22#include "llvm/Constants.h"
23#include "llvm/Function.h"
24#include "llvm/GlobalVariable.h"
25#include "llvm/Support/Compiler.h"
26#include "llvm/Target/TargetData.h"
27using namespace clang;
28using namespace CodeGen;
29
30namespace  {
31
32class VISIBILITY_HIDDEN ConstStructBuilder {
33  CodeGenModule &CGM;
34  CodeGenFunction *CGF;
35
36  bool Packed;
37  unsigned NextFieldOffsetInBytes;
38
39  std::vector<llvm::Constant *> Elements;
40
41  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
42    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { }
43
44  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
45                   const Expr *InitExpr) {
46    uint64_t FieldOffsetInBytes = FieldOffset / 8;
47
48    assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
49           && "Field offset mismatch!");
50
51    // Emit the field.
52    llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
53    if (!C)
54      return false;
55
56    unsigned FieldAlignment = getAlignment(C);
57
58    // Round up the field offset to the alignment of the field type.
59    uint64_t AlignedNextFieldOffsetInBytes =
60      llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
61
62    if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
63      // FIXME: Must convert the struct to a packed struct.
64      return false;
65    }
66
67    if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
68      // We need to append padding.
69      AppendPadding(FieldOffsetInBytes - AlignedNextFieldOffsetInBytes);
70
71      assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
72             "Did not add enough padding!");
73
74      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
75    }
76
77    // Add the field.
78    Elements.push_back(C);
79    NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
80
81    return true;
82  }
83
84  void AppendPadding(uint64_t NumBytes) {
85    if (!NumBytes)
86      return;
87
88    const llvm::Type *Ty = llvm::Type::Int8Ty;
89    if (NumBytes > 1)
90      Ty = CGM.getLLVMContext().getArrayType(Ty, NumBytes);
91
92    llvm::Constant *C = CGM.getLLVMContext().getNullValue(Ty);
93    Elements.push_back(C);
94    assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
95
96    NextFieldOffsetInBytes += getSizeInBytes(C);
97  }
98
99  void AppendTailPadding(uint64_t RecordSize) {
100    assert(RecordSize % 8 == 0 && "Invalid record size!");
101
102    uint64_t RecordSizeInBytes = RecordSize / 8;
103    assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
104
105    unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
106    AppendPadding(NumPadBytes);
107  }
108
109  bool Build(const InitListExpr *ILE) {
110    RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
111    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
112
113    unsigned FieldNo = 0;
114    unsigned ElementNo = 0;
115    for (RecordDecl::field_iterator Field = RD->field_begin(),
116         FieldEnd = RD->field_end();
117         ElementNo < ILE->getNumInits() && Field != FieldEnd;
118         ++Field, ++FieldNo) {
119      if (Field->isBitField()) {
120        if (!Field->getIdentifier())
121          continue;
122
123        // FIXME: Bitfield support.
124        return false;
125      } else {
126        if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
127                         ILE->getInit(ElementNo)))
128          return false;
129      }
130
131      ElementNo++;
132    }
133
134    // Append tail padding if necessary.
135    AppendTailPadding(Layout.getSize());
136
137    assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
138           "Tail padding mismatch!");
139
140    return true;
141  }
142
143  unsigned getAlignment(const llvm::Constant *C) const {
144    if (Packed)
145      return 1;
146
147    return CGM.getTargetData().getABITypeAlignment(C->getType());
148  }
149
150  uint64_t getSizeInBytes(const llvm::Constant *C) const {
151    return CGM.getTargetData().getTypeAllocSize(C->getType());
152  }
153
154public:
155  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
156                                     const InitListExpr *ILE) {
157    ConstStructBuilder Builder(CGM, CGF);
158
159    // FIXME: Use this when it works well enough.
160    return 0;
161
162    if (!Builder.Build(ILE))
163      return 0;
164
165    llvm::Constant *Result =
166      CGM.getLLVMContext().getConstantStruct(Builder.Elements, Builder.Packed);
167
168    assert(Builder.NextFieldOffsetInBytes == Builder.getSizeInBytes(Result));
169
170    return Result;
171  }
172};
173
174class VISIBILITY_HIDDEN ConstExprEmitter :
175  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
176  CodeGenModule &CGM;
177  CodeGenFunction *CGF;
178  llvm::LLVMContext &VMContext;
179public:
180  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
181    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
182  }
183
184  //===--------------------------------------------------------------------===//
185  //                            Visitor Methods
186  //===--------------------------------------------------------------------===//
187
188  llvm::Constant *VisitStmt(Stmt *S) {
189    return 0;
190  }
191
192  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
193    return Visit(PE->getSubExpr());
194  }
195
196  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
197    return Visit(E->getInitializer());
198  }
199
200  llvm::Constant *VisitCastExpr(CastExpr* E) {
201    // GCC cast to union extension
202    if (E->getType()->isUnionType()) {
203      const llvm::Type *Ty = ConvertType(E->getType());
204      Expr *SubExpr = E->getSubExpr();
205      return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF),
206                       Ty);
207    }
208    // Explicit and implicit no-op casts
209    QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
210    if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) {
211      return Visit(E->getSubExpr());
212    }
213    return 0;
214  }
215
216  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
217    return Visit(DAE->getExpr());
218  }
219
220  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
221    std::vector<llvm::Constant*> Elts;
222    const llvm::ArrayType *AType =
223        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
224    unsigned NumInitElements = ILE->getNumInits();
225    // FIXME: Check for wide strings
226    // FIXME: Check for NumInitElements exactly equal to 1??
227    if (NumInitElements > 0 &&
228        (isa<StringLiteral>(ILE->getInit(0)) ||
229         isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
230        ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
231      return Visit(ILE->getInit(0));
232    const llvm::Type *ElemTy = AType->getElementType();
233    unsigned NumElements = AType->getNumElements();
234
235    // Initialising an array requires us to automatically
236    // initialise any elements that have not been initialised explicitly
237    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
238
239    // Copy initializer elements.
240    unsigned i = 0;
241    bool RewriteType = false;
242    for (; i < NumInitableElts; ++i) {
243      Expr *Init = ILE->getInit(i);
244      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
245      if (!C)
246        return 0;
247      RewriteType |= (C->getType() != ElemTy);
248      Elts.push_back(C);
249    }
250
251    // Initialize remaining array elements.
252    // FIXME: This doesn't handle member pointers correctly!
253    for (; i < NumElements; ++i)
254      Elts.push_back(VMContext.getNullValue(ElemTy));
255
256    if (RewriteType) {
257      // FIXME: Try to avoid packing the array
258      std::vector<const llvm::Type*> Types;
259      for (unsigned i = 0; i < Elts.size(); ++i)
260        Types.push_back(Elts[i]->getType());
261      const llvm::StructType *SType = VMContext.getStructType(Types, true);
262      return VMContext.getConstantStruct(SType, Elts);
263    }
264
265    return VMContext.getConstantArray(AType, Elts);
266  }
267
268  void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
269                                FieldDecl* Field, Expr* E) {
270    // Calculate the value to insert
271    llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF);
272    if (!C)
273      return;
274
275    llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
276    if (!CI) {
277      CGM.ErrorUnsupported(E, "bitfield initialization");
278      return;
279    }
280    llvm::APInt V = CI->getValue();
281
282    // Calculate information about the relevant field
283    const llvm::Type* Ty = CI->getType();
284    const llvm::TargetData &TD = CGM.getTypes().getTargetData();
285    unsigned size = TD.getTypeAllocSizeInBits(Ty);
286    CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
287    unsigned FieldOffset = Info.FieldNo * size;
288
289    FieldOffset += Info.Start;
290
291    // Find where to start the insertion
292    // FIXME: This is O(n^2) in the number of bit-fields!
293    // FIXME: This won't work if the struct isn't completely packed!
294    unsigned offset = 0, i = 0;
295    while (offset < (FieldOffset & -8))
296      offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType());
297
298    // Advance over 0 sized elements (must terminate in bounds since
299    // the bitfield must have a size).
300    while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0)
301      ++i;
302
303    // Promote the size of V if necessary
304    // FIXME: This should never occur, but currently it can because initializer
305    // constants are cast to bool, and because clang is not enforcing bitfield
306    // width limits.
307    if (Info.Size > V.getBitWidth())
308      V.zext(Info.Size);
309
310    // Insert the bits into the struct
311    // FIXME: This algorthm is only correct on X86!
312    // FIXME: THis algorthm assumes bit-fields only have byte-size elements!
313    unsigned bitsToInsert = Info.Size;
314    unsigned curBits = std::min(8 - (FieldOffset & 7), bitsToInsert);
315    unsigned byte = V.getLoBits(curBits).getZExtValue() << (FieldOffset & 7);
316    do {
317      llvm::Constant* byteC =
318        llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
319      Elts[i] = VMContext.getConstantExprOr(Elts[i], byteC);
320      ++i;
321      V = V.lshr(curBits);
322      bitsToInsert -= curBits;
323
324      if (!bitsToInsert)
325        break;
326
327      curBits = bitsToInsert > 8 ? 8 : bitsToInsert;
328      byte = V.getLoBits(curBits).getZExtValue();
329    } while (true);
330  }
331
332  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
333    ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
334
335    const llvm::StructType *SType =
336        cast<llvm::StructType>(ConvertType(ILE->getType()));
337    RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
338    std::vector<llvm::Constant*> Elts;
339
340    // Initialize the whole structure to zero.
341    // FIXME: This doesn't handle member pointers correctly!
342    for (unsigned i = 0; i < SType->getNumElements(); ++i) {
343      const llvm::Type *FieldTy = SType->getElementType(i);
344      Elts.push_back(VMContext.getNullValue(FieldTy));
345    }
346
347    // Copy initializer elements. Skip padding fields.
348    unsigned EltNo = 0;  // Element no in ILE
349    bool RewriteType = false;
350    for (RecordDecl::field_iterator Field = RD->field_begin(),
351                                 FieldEnd = RD->field_end();
352         EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) {
353      if (Field->isBitField()) {
354        if (!Field->getIdentifier())
355          continue;
356        InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo));
357      } else {
358        unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field);
359        llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo),
360                                                 Field->getType(), CGF);
361        if (!C) return 0;
362        RewriteType |= (C->getType() != Elts[FieldNo]->getType());
363        Elts[FieldNo] = C;
364      }
365      EltNo++;
366    }
367
368    if (RewriteType) {
369      // FIXME: Make this work for non-packed structs
370      assert(SType->isPacked() && "Cannot recreate unpacked structs");
371      std::vector<const llvm::Type*> Types;
372      for (unsigned i = 0; i < Elts.size(); ++i)
373        Types.push_back(Elts[i]->getType());
374      SType = VMContext.getStructType(Types, true);
375    }
376
377    return VMContext.getConstantStruct(SType, Elts);
378  }
379
380  llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
381    if (!C)
382      return 0;
383
384    // Build a struct with the union sub-element as the first member,
385    // and padded to the appropriate size
386    std::vector<llvm::Constant*> Elts;
387    std::vector<const llvm::Type*> Types;
388    Elts.push_back(C);
389    Types.push_back(C->getType());
390    unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
391    unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
392
393    assert(CurSize <= TotalSize && "Union size mismatch!");
394    if (unsigned NumPadBytes = TotalSize - CurSize) {
395      const llvm::Type *Ty = llvm::Type::Int8Ty;
396      if (NumPadBytes > 1)
397        Ty = VMContext.getArrayType(Ty, NumPadBytes);
398
399      Elts.push_back(VMContext.getNullValue(Ty));
400      Types.push_back(Ty);
401    }
402
403    llvm::StructType* STy = VMContext.getStructType(Types, false);
404    return VMContext.getConstantStruct(STy, Elts);
405  }
406
407  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
408    const llvm::Type *Ty = ConvertType(ILE->getType());
409
410    FieldDecl* curField = ILE->getInitializedFieldInUnion();
411    if (!curField) {
412      // There's no field to initialize, so value-initialize the union.
413#ifndef NDEBUG
414      // Make sure that it's really an empty and not a failure of
415      // semantic analysis.
416      RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
417      for (RecordDecl::field_iterator Field = RD->field_begin(),
418                                   FieldEnd = RD->field_end();
419           Field != FieldEnd; ++Field)
420        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
421#endif
422      return VMContext.getNullValue(Ty);
423    }
424
425    if (curField->isBitField()) {
426      // Create a dummy struct for bit-field insertion
427      unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
428      llvm::Constant* NV =
429        VMContext.getNullValue(llvm::Type::Int8Ty);
430      std::vector<llvm::Constant*> Elts(NumElts, NV);
431
432      InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
433      const llvm::ArrayType *RetTy =
434          VMContext.getArrayType(NV->getType(), NumElts);
435      return VMContext.getConstantArray(RetTy, Elts);
436    }
437
438    llvm::Constant *InitElem;
439    if (ILE->getNumInits() > 0) {
440      Expr *Init = ILE->getInit(0);
441      InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
442    } else {
443      InitElem = CGM.EmitNullConstant(curField->getType());
444    }
445    return EmitUnion(InitElem, Ty);
446  }
447
448  llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
449    const llvm::VectorType *VType =
450        cast<llvm::VectorType>(ConvertType(ILE->getType()));
451    const llvm::Type *ElemTy = VType->getElementType();
452    std::vector<llvm::Constant*> Elts;
453    unsigned NumElements = VType->getNumElements();
454    unsigned NumInitElements = ILE->getNumInits();
455
456    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
457
458    // Copy initializer elements.
459    unsigned i = 0;
460    for (; i < NumInitableElts; ++i) {
461      Expr *Init = ILE->getInit(i);
462      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
463      if (!C)
464        return 0;
465      Elts.push_back(C);
466    }
467
468    for (; i < NumElements; ++i)
469      Elts.push_back(VMContext.getNullValue(ElemTy));
470
471    return VMContext.getConstantVector(VType, Elts);
472  }
473
474  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
475    return CGM.EmitNullConstant(E->getType());
476  }
477
478  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
479    if (ILE->getType()->isScalarType()) {
480      // We have a scalar in braces. Just use the first element.
481      if (ILE->getNumInits() > 0) {
482        Expr *Init = ILE->getInit(0);
483        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
484      }
485      return CGM.EmitNullConstant(ILE->getType());
486    }
487
488    if (ILE->getType()->isArrayType())
489      return EmitArrayInitialization(ILE);
490
491    if (ILE->getType()->isStructureType())
492      return EmitStructInitialization(ILE);
493
494    if (ILE->getType()->isUnionType())
495      return EmitUnionInitialization(ILE);
496
497    if (ILE->getType()->isVectorType())
498      return EmitVectorInitialization(ILE);
499
500    assert(0 && "Unable to handle InitListExpr");
501    // Get rid of control reaches end of void function warning.
502    // Not reached.
503    return 0;
504  }
505
506  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
507    assert(!E->getType()->isPointerType() && "Strings are always arrays");
508
509    // This must be a string initializing an array in a static initializer.
510    // Don't emit it as the address of the string, emit the string data itself
511    // as an inline array.
512    return VMContext.getConstantArray(CGM.GetStringForStringLiteral(E), false);
513  }
514
515  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
516    // This must be an @encode initializing an array in a static initializer.
517    // Don't emit it as the address of the string, emit the string data itself
518    // as an inline array.
519    std::string Str;
520    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
521    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
522
523    // Resize the string to the right size, adding zeros at the end, or
524    // truncating as needed.
525    Str.resize(CAT->getSize().getZExtValue(), '\0');
526    return VMContext.getConstantArray(Str, false);
527  }
528
529  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
530    return Visit(E->getSubExpr());
531  }
532
533  // Utility methods
534  const llvm::Type *ConvertType(QualType T) {
535    return CGM.getTypes().ConvertType(T);
536  }
537
538public:
539  llvm::Constant *EmitLValue(Expr *E) {
540    switch (E->getStmtClass()) {
541    default: break;
542    case Expr::CompoundLiteralExprClass: {
543      // Note that due to the nature of compound literals, this is guaranteed
544      // to be the only use of the variable, so we just generate it here.
545      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
546      llvm::Constant* C = Visit(CLE->getInitializer());
547      // FIXME: "Leaked" on failure.
548      if (C)
549        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
550                                     E->getType().isConstQualified(),
551                                     llvm::GlobalValue::InternalLinkage,
552                                     C, ".compoundliteral");
553      return C;
554    }
555    case Expr::DeclRefExprClass:
556    case Expr::QualifiedDeclRefExprClass: {
557      NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
558      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
559        return CGM.GetAddrOfFunction(GlobalDecl(FD));
560      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
561        // We can never refer to a variable with local storage.
562        if (!VD->hasLocalStorage()) {
563          if (VD->isFileVarDecl() || VD->hasExternalStorage())
564            return CGM.GetAddrOfGlobalVar(VD);
565          else if (VD->isBlockVarDecl()) {
566            assert(CGF && "Can't access static local vars without CGF");
567            return CGF->GetAddrOfStaticLocalVar(VD);
568          }
569        }
570      }
571      break;
572    }
573    case Expr::StringLiteralClass:
574      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
575    case Expr::ObjCEncodeExprClass:
576      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
577    case Expr::ObjCStringLiteralClass: {
578      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
579      llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
580      return VMContext.getConstantExprBitCast(C, ConvertType(E->getType()));
581    }
582    case Expr::PredefinedExprClass: {
583      // __func__/__FUNCTION__ -> "".  __PRETTY_FUNCTION__ -> "top level".
584      std::string Str;
585      if (cast<PredefinedExpr>(E)->getIdentType() ==
586          PredefinedExpr::PrettyFunction)
587        Str = "top level";
588
589      return CGM.GetAddrOfConstantCString(Str, ".tmp");
590    }
591    case Expr::AddrLabelExprClass: {
592      assert(CGF && "Invalid address of label expression outside function.");
593      unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
594      llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
595      return VMContext.getConstantExprIntToPtr(C, ConvertType(E->getType()));
596    }
597    case Expr::CallExprClass: {
598      CallExpr* CE = cast<CallExpr>(E);
599      if (CE->isBuiltinCall(CGM.getContext()) !=
600            Builtin::BI__builtin___CFStringMakeConstantString)
601        break;
602      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
603      const StringLiteral *Literal = cast<StringLiteral>(Arg);
604      // FIXME: need to deal with UCN conversion issues.
605      return CGM.GetAddrOfConstantCFString(Literal);
606    }
607    case Expr::BlockExprClass: {
608      std::string FunctionName;
609      if (CGF)
610        FunctionName = CGF->CurFn->getName();
611      else
612        FunctionName = "global";
613
614      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
615    }
616    }
617
618    return 0;
619  }
620};
621
622}  // end anonymous namespace.
623
624llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
625                                                QualType DestType,
626                                                CodeGenFunction *CGF) {
627  Expr::EvalResult Result;
628
629  bool Success = false;
630
631  if (DestType->isReferenceType())
632    Success = E->EvaluateAsLValue(Result, Context);
633  else
634    Success = E->Evaluate(Result, Context);
635
636  if (Success) {
637    assert(!Result.HasSideEffects &&
638           "Constant expr should not have any side effects!");
639    switch (Result.Val.getKind()) {
640    case APValue::Uninitialized:
641      assert(0 && "Constant expressions should be initialized.");
642      return 0;
643    case APValue::LValue: {
644      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
645      llvm::Constant *Offset =
646        llvm::ConstantInt::get(llvm::Type::Int64Ty,
647                               Result.Val.getLValueOffset());
648
649      llvm::Constant *C;
650      if (const Expr *LVBase = Result.Val.getLValueBase()) {
651        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
652
653        // Apply offset if necessary.
654        if (!Offset->isNullValue()) {
655          const llvm::Type *Type =
656            VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
657          llvm::Constant *Casted = VMContext.getConstantExprBitCast(C, Type);
658          Casted = VMContext.getConstantExprGetElementPtr(Casted, &Offset, 1);
659          C = VMContext.getConstantExprBitCast(Casted, C->getType());
660        }
661
662        // Convert to the appropriate type; this could be an lvalue for
663        // an integer.
664        if (isa<llvm::PointerType>(DestTy))
665          return VMContext.getConstantExprBitCast(C, DestTy);
666
667        return VMContext.getConstantExprPtrToInt(C, DestTy);
668      } else {
669        C = Offset;
670
671        // Convert to the appropriate type; this could be an lvalue for
672        // an integer.
673        if (isa<llvm::PointerType>(DestTy))
674          return VMContext.getConstantExprIntToPtr(C, DestTy);
675
676        // If the types don't match this should only be a truncate.
677        if (C->getType() != DestTy)
678          return VMContext.getConstantExprTrunc(C, DestTy);
679
680        return C;
681      }
682    }
683    case APValue::Int: {
684      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
685                                                 Result.Val.getInt());
686
687      if (C->getType() == llvm::Type::Int1Ty) {
688        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
689        C = VMContext.getConstantExprZExt(C, BoolTy);
690      }
691      return C;
692    }
693    case APValue::ComplexInt: {
694      llvm::Constant *Complex[2];
695
696      Complex[0] = llvm::ConstantInt::get(VMContext,
697                                          Result.Val.getComplexIntReal());
698      Complex[1] = llvm::ConstantInt::get(VMContext,
699                                          Result.Val.getComplexIntImag());
700
701      return VMContext.getConstantStruct(Complex, 2);
702    }
703    case APValue::Float:
704      return VMContext.getConstantFP(Result.Val.getFloat());
705    case APValue::ComplexFloat: {
706      llvm::Constant *Complex[2];
707
708      Complex[0] = VMContext.getConstantFP(Result.Val.getComplexFloatReal());
709      Complex[1] = VMContext.getConstantFP(Result.Val.getComplexFloatImag());
710
711      return VMContext.getConstantStruct(Complex, 2);
712    }
713    case APValue::Vector: {
714      llvm::SmallVector<llvm::Constant *, 4> Inits;
715      unsigned NumElts = Result.Val.getVectorLength();
716
717      for (unsigned i = 0; i != NumElts; ++i) {
718        APValue &Elt = Result.Val.getVectorElt(i);
719        if (Elt.isInt())
720          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
721        else
722          Inits.push_back(VMContext.getConstantFP(Elt.getFloat()));
723      }
724      return VMContext.getConstantVector(&Inits[0], Inits.size());
725    }
726    }
727  }
728
729  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
730  if (C && C->getType() == llvm::Type::Int1Ty) {
731    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
732    C = VMContext.getConstantExprZExt(C, BoolTy);
733  }
734  return C;
735}
736
737llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
738  // Always return an LLVM null constant for now; this will change when we
739  // get support for IRGen of member pointers.
740  return getLLVMContext().getNullValue(getTypes().ConvertType(T));
741}
742