CGExprConstant.cpp revision f6c43965c41e042f0541bdd47b1511d585b2dcfd
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGObjCRuntime.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/RecordLayout.h"
20#include "clang/AST/StmtVisitor.h"
21#include "clang/Basic/Builtins.h"
22#include "llvm/Constants.h"
23#include "llvm/Function.h"
24#include "llvm/GlobalVariable.h"
25#include "llvm/Support/Compiler.h"
26#include "llvm/Target/TargetData.h"
27using namespace clang;
28using namespace CodeGen;
29
30namespace  {
31
32class VISIBILITY_HIDDEN ConstStructBuilder {
33  CodeGenModule &CGM;
34  CodeGenFunction *CGF;
35
36  bool Packed;
37
38  unsigned NextFieldOffsetInBytes;
39
40  std::vector<llvm::Constant *> Elements;
41
42  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
43    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { }
44
45  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
46                   const Expr *InitExpr) {
47    uint64_t FieldOffsetInBytes = FieldOffset / 8;
48
49    assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
50           && "Field offset mismatch!");
51
52    // Emit the field.
53    llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
54    if (!C)
55      return false;
56
57    unsigned FieldAlignment = getAlignment(C);
58
59    // Round up the field offset to the alignment of the field type.
60    uint64_t AlignedNextFieldOffsetInBytes =
61      llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
62
63    if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
64      std::vector<llvm::Constant *> PackedElements;
65
66      assert(!Packed && "Alignment is wrong even with a packed struct!");
67
68      // Convert the struct to a packed struct.
69      uint64_t ElementOffsetInBytes = 0;
70
71      for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
72        llvm::Constant *C = Elements[i];
73
74        unsigned ElementAlign =
75          CGM.getTargetData().getABITypeAlignment(C->getType());
76        uint64_t AlignedElementOffsetInBytes =
77          llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
78
79        if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
80          // We need some padding.
81          uint64_t NumBytes =
82            AlignedElementOffsetInBytes - ElementOffsetInBytes;
83
84          const llvm::Type *Ty = llvm::Type::Int8Ty;
85          if (NumBytes > 1)
86            Ty = llvm::ArrayType::get(Ty, NumBytes);
87
88          llvm::Constant *Padding = llvm::Constant::getNullValue(Ty);
89          PackedElements.push_back(Padding);
90          ElementOffsetInBytes += getSizeInBytes(Padding);
91        }
92
93        PackedElements.push_back(C);
94        ElementOffsetInBytes += getSizeInBytes(C);
95      }
96
97      assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
98             "Packing the struct changed its size!");
99
100      Elements = PackedElements;
101      Packed = true;
102      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
103    }
104
105    if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
106      // We need to append padding.
107      AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
108
109      assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
110             "Did not add enough padding!");
111
112      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
113    }
114
115    // Add the field.
116    Elements.push_back(C);
117    NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
118
119    return true;
120  }
121
122  bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
123                      const Expr *InitExpr) {
124    llvm::ConstantInt *CI =
125      cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
126                                                           Field->getType(),
127                                                           CGF));
128    // FIXME: Can this ever happen?
129    if (!CI)
130      return false;
131
132    if (FieldOffset > NextFieldOffsetInBytes * 8) {
133      // We need to add padding.
134      uint64_t NumBytes =
135        llvm::RoundUpToAlignment(FieldOffset -
136                                 NextFieldOffsetInBytes * 8, 8) / 8;
137
138      AppendPadding(NumBytes);
139    }
140
141    uint64_t FieldSize =
142      Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
143
144    llvm::APInt FieldValue = CI->getValue();
145
146    // Promote the size of FieldValue if necessary
147    // FIXME: This should never occur, but currently it can because initializer
148    // constants are cast to bool, and because clang is not enforcing bitfield
149    // width limits.
150    if (FieldSize > FieldValue.getBitWidth())
151      FieldValue.zext(FieldSize);
152
153    // Truncate the size of FieldValue to the bit field size.
154    if (FieldSize < FieldValue.getBitWidth())
155      FieldValue.trunc(FieldSize);
156
157    if (FieldOffset < NextFieldOffsetInBytes * 8) {
158      // Either part of the field or the entire field can go into the previous
159      // byte.
160      assert(!Elements.empty() && "Elements can't be empty!");
161
162      unsigned BitsInPreviousByte =
163        NextFieldOffsetInBytes * 8 - FieldOffset;
164
165      bool FitsCompletelyInPreviousByte =
166        BitsInPreviousByte >= FieldValue.getBitWidth();
167
168      llvm::APInt Tmp = FieldValue;
169
170      if (!FitsCompletelyInPreviousByte) {
171        unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
172
173        if (CGM.getTargetData().isBigEndian()) {
174          Tmp = Tmp.lshr(NewFieldWidth);
175          Tmp.trunc(BitsInPreviousByte);
176
177          // We want the remaining high bits.
178          FieldValue.trunc(NewFieldWidth);
179        } else {
180          Tmp.trunc(BitsInPreviousByte);
181
182          // We want the remaining low bits.
183          FieldValue = FieldValue.lshr(BitsInPreviousByte);
184          FieldValue.trunc(NewFieldWidth);
185        }
186      }
187
188      Tmp.zext(8);
189      if (CGM.getTargetData().isBigEndian()) {
190        if (FitsCompletelyInPreviousByte)
191          Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
192      } else {
193        Tmp = Tmp.shl(8 - BitsInPreviousByte);
194      }
195
196      // Or in the bits that go into the previous byte.
197      Tmp |= cast<llvm::ConstantInt>(Elements.back())->getValue();
198      Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
199
200      if (FitsCompletelyInPreviousByte)
201        return true;
202    }
203
204    while (FieldValue.getBitWidth() > 8) {
205      llvm::APInt Tmp;
206
207      if (CGM.getTargetData().isBigEndian()) {
208        // We want the high bits.
209        Tmp = FieldValue;
210        Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
211        Tmp.trunc(8);
212      } else {
213        // We want the low bits.
214        Tmp = FieldValue;
215        Tmp.trunc(8);
216
217        FieldValue = FieldValue.lshr(8);
218      }
219
220      Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
221      NextFieldOffsetInBytes++;
222
223      FieldValue.trunc(FieldValue.getBitWidth() - 8);
224    }
225
226    assert(FieldValue.getBitWidth() > 0 &&
227           "Should have at least one bit left!");
228    assert(FieldValue.getBitWidth() <= 8 &&
229           "Should not have more than a byte left!");
230
231    if (FieldValue.getBitWidth() < 8) {
232      if (CGM.getTargetData().isBigEndian()) {
233        unsigned BitWidth = FieldValue.getBitWidth();
234
235        FieldValue.zext(8);
236        FieldValue = FieldValue << (8 - BitWidth);
237      } else
238        FieldValue.zext(8);
239    }
240
241    // Append the last element.
242    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
243                                              FieldValue));
244    NextFieldOffsetInBytes++;
245    return true;
246  }
247
248  void AppendPadding(uint64_t NumBytes) {
249    if (!NumBytes)
250      return;
251
252    const llvm::Type *Ty = llvm::Type::Int8Ty;
253    if (NumBytes > 1)
254      Ty = llvm::ArrayType::get(Ty, NumBytes);
255
256    llvm::Constant *C = llvm::Constant::getNullValue(Ty);
257    Elements.push_back(C);
258    assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
259
260    NextFieldOffsetInBytes += getSizeInBytes(C);
261  }
262
263  void AppendTailPadding(uint64_t RecordSize) {
264    assert(RecordSize % 8 == 0 && "Invalid record size!");
265
266    uint64_t RecordSizeInBytes = RecordSize / 8;
267    assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
268
269    unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
270    AppendPadding(NumPadBytes);
271  }
272
273  bool Build(const InitListExpr *ILE) {
274    RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
275    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
276
277    unsigned FieldNo = 0;
278    unsigned ElementNo = 0;
279    for (RecordDecl::field_iterator Field = RD->field_begin(),
280         FieldEnd = RD->field_end();
281         ElementNo < ILE->getNumInits() && Field != FieldEnd;
282         ++Field, ++FieldNo) {
283      if (Field->isBitField()) {
284        if (!Field->getIdentifier())
285          continue;
286
287        if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
288                            ILE->getInit(ElementNo)))
289          return false;
290      } else {
291        if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
292                         ILE->getInit(ElementNo)))
293          return false;
294      }
295
296      ElementNo++;
297    }
298
299    uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
300
301    if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
302      // If the struct is bigger than the size of the record type,
303      // we must have a flexible array member at the end.
304      assert(RD->hasFlexibleArrayMember() &&
305             "Must have flexible array member if struct is bigger than type!");
306
307      // No tail padding is necessary.
308      return true;
309    }
310
311    // Append tail padding if necessary.
312    AppendTailPadding(Layout.getSize());
313
314    assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
315           "Tail padding mismatch!");
316
317    return true;
318  }
319
320  unsigned getAlignment(const llvm::Constant *C) const {
321    if (Packed)
322      return 1;
323
324    return CGM.getTargetData().getABITypeAlignment(C->getType());
325  }
326
327  uint64_t getSizeInBytes(const llvm::Constant *C) const {
328    return CGM.getTargetData().getTypeAllocSize(C->getType());
329  }
330
331public:
332  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
333                                     const InitListExpr *ILE) {
334    ConstStructBuilder Builder(CGM, CGF);
335
336    if (!Builder.Build(ILE))
337      return 0;
338
339    llvm::Constant *Result =
340      llvm::ConstantStruct::get(Builder.Elements, Builder.Packed);
341
342    assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
343                                    Builder.getAlignment(Result)) ==
344           Builder.getSizeInBytes(Result) && "Size mismatch!");
345
346    return Result;
347  }
348};
349
350class VISIBILITY_HIDDEN ConstExprEmitter :
351  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
352  CodeGenModule &CGM;
353  CodeGenFunction *CGF;
354  llvm::LLVMContext &VMContext;
355public:
356  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
357    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
358  }
359
360  //===--------------------------------------------------------------------===//
361  //                            Visitor Methods
362  //===--------------------------------------------------------------------===//
363
364  llvm::Constant *VisitStmt(Stmt *S) {
365    return 0;
366  }
367
368  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
369    return Visit(PE->getSubExpr());
370  }
371
372  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
373    return Visit(E->getInitializer());
374  }
375
376  llvm::Constant *VisitCastExpr(CastExpr* E) {
377    // GCC cast to union extension
378    if (E->getType()->isUnionType()) {
379      const llvm::Type *Ty = ConvertType(E->getType());
380      Expr *SubExpr = E->getSubExpr();
381      return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF),
382                       Ty);
383    }
384    // Explicit and implicit no-op casts
385    QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
386    if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) {
387      return Visit(E->getSubExpr());
388    }
389    return 0;
390  }
391
392  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
393    return Visit(DAE->getExpr());
394  }
395
396  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
397    std::vector<llvm::Constant*> Elts;
398    const llvm::ArrayType *AType =
399        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
400    unsigned NumInitElements = ILE->getNumInits();
401    // FIXME: Check for wide strings
402    // FIXME: Check for NumInitElements exactly equal to 1??
403    if (NumInitElements > 0 &&
404        (isa<StringLiteral>(ILE->getInit(0)) ||
405         isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
406        ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
407      return Visit(ILE->getInit(0));
408    const llvm::Type *ElemTy = AType->getElementType();
409    unsigned NumElements = AType->getNumElements();
410
411    // Initialising an array requires us to automatically
412    // initialise any elements that have not been initialised explicitly
413    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
414
415    // Copy initializer elements.
416    unsigned i = 0;
417    bool RewriteType = false;
418    for (; i < NumInitableElts; ++i) {
419      Expr *Init = ILE->getInit(i);
420      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
421      if (!C)
422        return 0;
423      RewriteType |= (C->getType() != ElemTy);
424      Elts.push_back(C);
425    }
426
427    // Initialize remaining array elements.
428    // FIXME: This doesn't handle member pointers correctly!
429    for (; i < NumElements; ++i)
430      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
431
432    if (RewriteType) {
433      // FIXME: Try to avoid packing the array
434      std::vector<const llvm::Type*> Types;
435      for (unsigned i = 0; i < Elts.size(); ++i)
436        Types.push_back(Elts[i]->getType());
437      const llvm::StructType *SType = llvm::StructType::get(Types, true);
438      return llvm::ConstantStruct::get(SType, Elts);
439    }
440
441    return llvm::ConstantArray::get(AType, Elts);
442  }
443
444  void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
445                                FieldDecl* Field, Expr* E) {
446    // Calculate the value to insert
447    llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF);
448    if (!C)
449      return;
450
451    llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
452    if (!CI) {
453      CGM.ErrorUnsupported(E, "bitfield initialization");
454      return;
455    }
456    llvm::APInt V = CI->getValue();
457
458    // Calculate information about the relevant field
459    const llvm::Type* Ty = CI->getType();
460    const llvm::TargetData &TD = CGM.getTypes().getTargetData();
461    unsigned size = TD.getTypeAllocSizeInBits(Ty);
462    CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
463    unsigned FieldOffset = Info.FieldNo * size;
464
465    FieldOffset += Info.Start;
466
467    // Find where to start the insertion
468    // FIXME: This is O(n^2) in the number of bit-fields!
469    // FIXME: This won't work if the struct isn't completely packed!
470    unsigned offset = 0, i = 0;
471    while (offset < (FieldOffset & -8))
472      offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType());
473
474    // Advance over 0 sized elements (must terminate in bounds since
475    // the bitfield must have a size).
476    while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0)
477      ++i;
478
479    // Promote the size of V if necessary
480    // FIXME: This should never occur, but currently it can because initializer
481    // constants are cast to bool, and because clang is not enforcing bitfield
482    // width limits.
483    if (Info.Size > V.getBitWidth())
484      V.zext(Info.Size);
485
486    // Insert the bits into the struct
487    // FIXME: This algorthm is only correct on X86!
488    // FIXME: THis algorthm assumes bit-fields only have byte-size elements!
489    unsigned bitsToInsert = Info.Size;
490    unsigned curBits = std::min(8 - (FieldOffset & 7), bitsToInsert);
491    unsigned byte = V.getLoBits(curBits).getZExtValue() << (FieldOffset & 7);
492    do {
493      llvm::Constant* byteC =
494        llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
495      Elts[i] = llvm::ConstantExpr::getOr(Elts[i], byteC);
496      ++i;
497      V = V.lshr(curBits);
498      bitsToInsert -= curBits;
499
500      if (!bitsToInsert)
501        break;
502
503      curBits = bitsToInsert > 8 ? 8 : bitsToInsert;
504      byte = V.getLoBits(curBits).getZExtValue();
505    } while (true);
506  }
507
508  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
509    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
510
511    // FIXME: Remove the old struct builder once we're sure that the new one
512    // works well enough!
513    const llvm::StructType *SType =
514        cast<llvm::StructType>(ConvertType(ILE->getType()));
515    RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
516    std::vector<llvm::Constant*> Elts;
517
518    // Initialize the whole structure to zero.
519    // FIXME: This doesn't handle member pointers correctly!
520    for (unsigned i = 0; i < SType->getNumElements(); ++i) {
521      const llvm::Type *FieldTy = SType->getElementType(i);
522      Elts.push_back(llvm::Constant::getNullValue(FieldTy));
523    }
524
525    // Copy initializer elements. Skip padding fields.
526    unsigned EltNo = 0;  // Element no in ILE
527    bool RewriteType = false;
528    for (RecordDecl::field_iterator Field = RD->field_begin(),
529                                 FieldEnd = RD->field_end();
530         EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) {
531      if (Field->isBitField()) {
532        if (!Field->getIdentifier())
533          continue;
534        InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo));
535      } else {
536        unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field);
537        llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo),
538                                                 Field->getType(), CGF);
539        if (!C) return 0;
540        RewriteType |= (C->getType() != Elts[FieldNo]->getType());
541        Elts[FieldNo] = C;
542      }
543      EltNo++;
544    }
545
546    if (RewriteType) {
547      // FIXME: Make this work for non-packed structs
548      assert(SType->isPacked() && "Cannot recreate unpacked structs");
549      std::vector<const llvm::Type*> Types;
550      for (unsigned i = 0; i < Elts.size(); ++i)
551        Types.push_back(Elts[i]->getType());
552      SType = llvm::StructType::get(Types, true);
553    }
554
555    return llvm::ConstantStruct::get(SType, Elts);
556  }
557
558  llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
559    if (!C)
560      return 0;
561
562    // Build a struct with the union sub-element as the first member,
563    // and padded to the appropriate size
564    std::vector<llvm::Constant*> Elts;
565    std::vector<const llvm::Type*> Types;
566    Elts.push_back(C);
567    Types.push_back(C->getType());
568    unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
569    unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
570
571    assert(CurSize <= TotalSize && "Union size mismatch!");
572    if (unsigned NumPadBytes = TotalSize - CurSize) {
573      const llvm::Type *Ty = llvm::Type::Int8Ty;
574      if (NumPadBytes > 1)
575        Ty = llvm::ArrayType::get(Ty, NumPadBytes);
576
577      Elts.push_back(llvm::Constant::getNullValue(Ty));
578      Types.push_back(Ty);
579    }
580
581    llvm::StructType* STy = llvm::StructType::get(Types, false);
582    return llvm::ConstantStruct::get(STy, Elts);
583  }
584
585  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
586    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
587
588    const llvm::Type *Ty = ConvertType(ILE->getType());
589
590    FieldDecl* curField = ILE->getInitializedFieldInUnion();
591    if (!curField) {
592      // There's no field to initialize, so value-initialize the union.
593#ifndef NDEBUG
594      // Make sure that it's really an empty and not a failure of
595      // semantic analysis.
596      RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
597      for (RecordDecl::field_iterator Field = RD->field_begin(),
598                                   FieldEnd = RD->field_end();
599           Field != FieldEnd; ++Field)
600        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
601#endif
602      return llvm::Constant::getNullValue(Ty);
603    }
604
605    if (curField->isBitField()) {
606      // Create a dummy struct for bit-field insertion
607      unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
608      llvm::Constant* NV =
609        llvm::Constant::getNullValue(llvm::Type::Int8Ty);
610      std::vector<llvm::Constant*> Elts(NumElts, NV);
611
612      InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
613      const llvm::ArrayType *RetTy =
614          llvm::ArrayType::get(NV->getType(), NumElts);
615      return llvm::ConstantArray::get(RetTy, Elts);
616    }
617
618    llvm::Constant *InitElem;
619    if (ILE->getNumInits() > 0) {
620      Expr *Init = ILE->getInit(0);
621      InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
622    } else {
623      InitElem = CGM.EmitNullConstant(curField->getType());
624    }
625    return EmitUnion(InitElem, Ty);
626  }
627
628  llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
629    const llvm::VectorType *VType =
630        cast<llvm::VectorType>(ConvertType(ILE->getType()));
631    const llvm::Type *ElemTy = VType->getElementType();
632    std::vector<llvm::Constant*> Elts;
633    unsigned NumElements = VType->getNumElements();
634    unsigned NumInitElements = ILE->getNumInits();
635
636    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
637
638    // Copy initializer elements.
639    unsigned i = 0;
640    for (; i < NumInitableElts; ++i) {
641      Expr *Init = ILE->getInit(i);
642      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
643      if (!C)
644        return 0;
645      Elts.push_back(C);
646    }
647
648    for (; i < NumElements; ++i)
649      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
650
651    return llvm::ConstantVector::get(VType, Elts);
652  }
653
654  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
655    return CGM.EmitNullConstant(E->getType());
656  }
657
658  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
659    if (ILE->getType()->isScalarType()) {
660      // We have a scalar in braces. Just use the first element.
661      if (ILE->getNumInits() > 0) {
662        Expr *Init = ILE->getInit(0);
663        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
664      }
665      return CGM.EmitNullConstant(ILE->getType());
666    }
667
668    if (ILE->getType()->isArrayType())
669      return EmitArrayInitialization(ILE);
670
671    if (ILE->getType()->isStructureType())
672      return EmitStructInitialization(ILE);
673
674    if (ILE->getType()->isUnionType())
675      return EmitUnionInitialization(ILE);
676
677    if (ILE->getType()->isVectorType())
678      return EmitVectorInitialization(ILE);
679
680    assert(0 && "Unable to handle InitListExpr");
681    // Get rid of control reaches end of void function warning.
682    // Not reached.
683    return 0;
684  }
685
686  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
687    assert(!E->getType()->isPointerType() && "Strings are always arrays");
688
689    // This must be a string initializing an array in a static initializer.
690    // Don't emit it as the address of the string, emit the string data itself
691    // as an inline array.
692    return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false);
693  }
694
695  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
696    // This must be an @encode initializing an array in a static initializer.
697    // Don't emit it as the address of the string, emit the string data itself
698    // as an inline array.
699    std::string Str;
700    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
701    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
702
703    // Resize the string to the right size, adding zeros at the end, or
704    // truncating as needed.
705    Str.resize(CAT->getSize().getZExtValue(), '\0');
706    return llvm::ConstantArray::get(Str, false);
707  }
708
709  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
710    return Visit(E->getSubExpr());
711  }
712
713  // Utility methods
714  const llvm::Type *ConvertType(QualType T) {
715    return CGM.getTypes().ConvertType(T);
716  }
717
718public:
719  llvm::Constant *EmitLValue(Expr *E) {
720    switch (E->getStmtClass()) {
721    default: break;
722    case Expr::CompoundLiteralExprClass: {
723      // Note that due to the nature of compound literals, this is guaranteed
724      // to be the only use of the variable, so we just generate it here.
725      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
726      llvm::Constant* C = Visit(CLE->getInitializer());
727      // FIXME: "Leaked" on failure.
728      if (C)
729        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
730                                     E->getType().isConstQualified(),
731                                     llvm::GlobalValue::InternalLinkage,
732                                     C, ".compoundliteral");
733      return C;
734    }
735    case Expr::DeclRefExprClass:
736    case Expr::QualifiedDeclRefExprClass: {
737      NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
738      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
739        return CGM.GetAddrOfFunction(GlobalDecl(FD));
740      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
741        // We can never refer to a variable with local storage.
742        if (!VD->hasLocalStorage()) {
743          if (VD->isFileVarDecl() || VD->hasExternalStorage())
744            return CGM.GetAddrOfGlobalVar(VD);
745          else if (VD->isBlockVarDecl()) {
746            assert(CGF && "Can't access static local vars without CGF");
747            return CGF->GetAddrOfStaticLocalVar(VD);
748          }
749        }
750      }
751      break;
752    }
753    case Expr::StringLiteralClass:
754      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
755    case Expr::ObjCEncodeExprClass:
756      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
757    case Expr::ObjCStringLiteralClass: {
758      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
759      llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
760      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
761    }
762    case Expr::PredefinedExprClass: {
763      // __func__/__FUNCTION__ -> "".  __PRETTY_FUNCTION__ -> "top level".
764      std::string Str;
765      if (cast<PredefinedExpr>(E)->getIdentType() ==
766          PredefinedExpr::PrettyFunction)
767        Str = "top level";
768
769      return CGM.GetAddrOfConstantCString(Str, ".tmp");
770    }
771    case Expr::AddrLabelExprClass: {
772      assert(CGF && "Invalid address of label expression outside function.");
773      unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
774      llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
775      return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
776    }
777    case Expr::CallExprClass: {
778      CallExpr* CE = cast<CallExpr>(E);
779      if (CE->isBuiltinCall(CGM.getContext()) !=
780            Builtin::BI__builtin___CFStringMakeConstantString)
781        break;
782      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
783      const StringLiteral *Literal = cast<StringLiteral>(Arg);
784      // FIXME: need to deal with UCN conversion issues.
785      return CGM.GetAddrOfConstantCFString(Literal);
786    }
787    case Expr::BlockExprClass: {
788      std::string FunctionName;
789      if (CGF)
790        FunctionName = CGF->CurFn->getName();
791      else
792        FunctionName = "global";
793
794      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
795    }
796    }
797
798    return 0;
799  }
800};
801
802}  // end anonymous namespace.
803
804llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
805                                                QualType DestType,
806                                                CodeGenFunction *CGF) {
807  Expr::EvalResult Result;
808
809  bool Success = false;
810
811  if (DestType->isReferenceType())
812    Success = E->EvaluateAsLValue(Result, Context);
813  else
814    Success = E->Evaluate(Result, Context);
815
816  if (Success) {
817    assert(!Result.HasSideEffects &&
818           "Constant expr should not have any side effects!");
819    switch (Result.Val.getKind()) {
820    case APValue::Uninitialized:
821      assert(0 && "Constant expressions should be initialized.");
822      return 0;
823    case APValue::LValue: {
824      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
825      llvm::Constant *Offset =
826        llvm::ConstantInt::get(llvm::Type::Int64Ty,
827                               Result.Val.getLValueOffset());
828
829      llvm::Constant *C;
830      if (const Expr *LVBase = Result.Val.getLValueBase()) {
831        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
832
833        // Apply offset if necessary.
834        if (!Offset->isNullValue()) {
835          const llvm::Type *Type =
836            llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
837          llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
838          Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
839          C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
840        }
841
842        // Convert to the appropriate type; this could be an lvalue for
843        // an integer.
844        if (isa<llvm::PointerType>(DestTy))
845          return llvm::ConstantExpr::getBitCast(C, DestTy);
846
847        return llvm::ConstantExpr::getPtrToInt(C, DestTy);
848      } else {
849        C = Offset;
850
851        // Convert to the appropriate type; this could be an lvalue for
852        // an integer.
853        if (isa<llvm::PointerType>(DestTy))
854          return llvm::ConstantExpr::getIntToPtr(C, DestTy);
855
856        // If the types don't match this should only be a truncate.
857        if (C->getType() != DestTy)
858          return llvm::ConstantExpr::getTrunc(C, DestTy);
859
860        return C;
861      }
862    }
863    case APValue::Int: {
864      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
865                                                 Result.Val.getInt());
866
867      if (C->getType() == llvm::Type::Int1Ty) {
868        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
869        C = llvm::ConstantExpr::getZExt(C, BoolTy);
870      }
871      return C;
872    }
873    case APValue::ComplexInt: {
874      llvm::Constant *Complex[2];
875
876      Complex[0] = llvm::ConstantInt::get(VMContext,
877                                          Result.Val.getComplexIntReal());
878      Complex[1] = llvm::ConstantInt::get(VMContext,
879                                          Result.Val.getComplexIntImag());
880
881      return llvm::ConstantStruct::get(Complex, 2);
882    }
883    case APValue::Float:
884      return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
885    case APValue::ComplexFloat: {
886      llvm::Constant *Complex[2];
887
888      Complex[0] = llvm::ConstantFP::get(VMContext,
889                                         Result.Val.getComplexFloatReal());
890      Complex[1] = llvm::ConstantFP::get(VMContext,
891                                         Result.Val.getComplexFloatImag());
892
893      return llvm::ConstantStruct::get(Complex, 2);
894    }
895    case APValue::Vector: {
896      llvm::SmallVector<llvm::Constant *, 4> Inits;
897      unsigned NumElts = Result.Val.getVectorLength();
898
899      for (unsigned i = 0; i != NumElts; ++i) {
900        APValue &Elt = Result.Val.getVectorElt(i);
901        if (Elt.isInt())
902          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
903        else
904          Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
905      }
906      return llvm::ConstantVector::get(&Inits[0], Inits.size());
907    }
908    }
909  }
910
911  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
912  if (C && C->getType() == llvm::Type::Int1Ty) {
913    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
914    C = llvm::ConstantExpr::getZExt(C, BoolTy);
915  }
916  return C;
917}
918
919llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
920  // Always return an LLVM null constant for now; this will change when we
921  // get support for IRGen of member pointers.
922  return llvm::Constant::getNullValue(getTypes().ConvertType(T));
923}
924