CGExprScalar.cpp revision a91d6a6619a91d0ca7102d8ab5678d855f04d850
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/AST/ASTContext.h"
17#include "clang/AST/DeclObjC.h"
18#include "clang/AST/RecordLayout.h"
19#include "clang/AST/StmtVisitor.h"
20#include "clang/Basic/TargetInfo.h"
21#include "llvm/Constants.h"
22#include "llvm/Function.h"
23#include "llvm/GlobalVariable.h"
24#include "llvm/Intrinsics.h"
25#include "llvm/Module.h"
26#include "llvm/Support/Compiler.h"
27#include "llvm/Support/CFG.h"
28#include "llvm/Target/TargetData.h"
29#include <cstdarg>
30
31using namespace clang;
32using namespace CodeGen;
33using llvm::Value;
34
35//===----------------------------------------------------------------------===//
36//                         Scalar Expression Emitter
37//===----------------------------------------------------------------------===//
38
39struct BinOpInfo {
40  Value *LHS;
41  Value *RHS;
42  QualType Ty;  // Computation Type.
43  const BinaryOperator *E;
44};
45
46namespace {
47class VISIBILITY_HIDDEN ScalarExprEmitter
48  : public StmtVisitor<ScalarExprEmitter, Value*> {
49  CodeGenFunction &CGF;
50  CGBuilderTy &Builder;
51  bool IgnoreResultAssign;
52  llvm::LLVMContext &VMContext;
53public:
54
55  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
56    : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
57      VMContext(cgf.getLLVMContext()) {
58  }
59
60  //===--------------------------------------------------------------------===//
61  //                               Utilities
62  //===--------------------------------------------------------------------===//
63
64  bool TestAndClearIgnoreResultAssign() {
65    bool I = IgnoreResultAssign;
66    IgnoreResultAssign = false;
67    return I;
68  }
69
70  const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
71  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
72
73  Value *EmitLoadOfLValue(LValue LV, QualType T) {
74    return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
75  }
76
77  /// EmitLoadOfLValue - Given an expression with complex type that represents a
78  /// value l-value, this method emits the address of the l-value, then loads
79  /// and returns the result.
80  Value *EmitLoadOfLValue(const Expr *E) {
81    return EmitLoadOfLValue(EmitLValue(E), E->getType());
82  }
83
84  /// EmitConversionToBool - Convert the specified expression value to a
85  /// boolean (i1) truth value.  This is equivalent to "Val != 0".
86  Value *EmitConversionToBool(Value *Src, QualType DstTy);
87
88  /// EmitScalarConversion - Emit a conversion from the specified type to the
89  /// specified destination type, both of which are LLVM scalar types.
90  Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
91
92  /// EmitComplexToScalarConversion - Emit a conversion from the specified
93  /// complex type to the specified destination type, where the destination
94  /// type is an LLVM scalar type.
95  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
96                                       QualType SrcTy, QualType DstTy);
97
98  //===--------------------------------------------------------------------===//
99  //                            Visitor Methods
100  //===--------------------------------------------------------------------===//
101
102  Value *VisitStmt(Stmt *S) {
103    S->dump(CGF.getContext().getSourceManager());
104    assert(0 && "Stmt can't have complex result type!");
105    return 0;
106  }
107  Value *VisitExpr(Expr *S);
108  Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
109
110  // Leaves.
111  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
112    return llvm::ConstantInt::get(VMContext, E->getValue());
113  }
114  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
115    return llvm::ConstantFP::get(VMContext, E->getValue());
116  }
117  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
118    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
119  }
120  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
121    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
122  }
123  Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
124    return VMContext.getNullValue(ConvertType(E->getType()));
125  }
126  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
127    return VMContext.getNullValue(ConvertType(E->getType()));
128  }
129  Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
130    return llvm::ConstantInt::get(ConvertType(E->getType()),
131                                  CGF.getContext().typesAreCompatible(
132                                    E->getArgType1(), E->getArgType2()));
133  }
134  Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
135  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
136    llvm::Value *V =
137      llvm::ConstantInt::get(llvm::Type::Int32Ty,
138                             CGF.GetIDForAddrOfLabel(E->getLabel()));
139
140    return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
141  }
142
143  // l-values.
144  Value *VisitDeclRefExpr(DeclRefExpr *E) {
145    if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
146      return llvm::ConstantInt::get(VMContext, EC->getInitVal());
147    return EmitLoadOfLValue(E);
148  }
149  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
150    return CGF.EmitObjCSelectorExpr(E);
151  }
152  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
153    return CGF.EmitObjCProtocolExpr(E);
154  }
155  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
156    return EmitLoadOfLValue(E);
157  }
158  Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
159    return EmitLoadOfLValue(E);
160  }
161  Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
162    return EmitLoadOfLValue(E);
163  }
164  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
165    return CGF.EmitObjCMessageExpr(E).getScalarVal();
166  }
167
168  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
169  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
170  Value *VisitMemberExpr(Expr *E)           { return EmitLoadOfLValue(E); }
171  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
172  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
173    return EmitLoadOfLValue(E);
174  }
175  Value *VisitStringLiteral(Expr *E)  { return EmitLValue(E).getAddress(); }
176  Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
177     return EmitLValue(E).getAddress();
178  }
179
180  Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
181
182  Value *VisitInitListExpr(InitListExpr *E) {
183    bool Ignore = TestAndClearIgnoreResultAssign();
184    (void)Ignore;
185    assert (Ignore == false && "init list ignored");
186    unsigned NumInitElements = E->getNumInits();
187
188    if (E->hadArrayRangeDesignator()) {
189      CGF.ErrorUnsupported(E, "GNU array range designator extension");
190    }
191
192    const llvm::VectorType *VType =
193      dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
194
195    // We have a scalar in braces. Just use the first element.
196    if (!VType)
197      return Visit(E->getInit(0));
198
199    unsigned NumVectorElements = VType->getNumElements();
200    const llvm::Type *ElementType = VType->getElementType();
201
202    // Emit individual vector element stores.
203    llvm::Value *V = VMContext.getUndef(VType);
204
205    // Emit initializers
206    unsigned i;
207    for (i = 0; i < NumInitElements; ++i) {
208      Value *NewV = Visit(E->getInit(i));
209      Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
210      V = Builder.CreateInsertElement(V, NewV, Idx);
211    }
212
213    // Emit remaining default initializers
214    for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
215      Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
216      llvm::Value *NewV = VMContext.getNullValue(ElementType);
217      V = Builder.CreateInsertElement(V, NewV, Idx);
218    }
219
220    return V;
221  }
222
223  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
224    return VMContext.getNullValue(ConvertType(E->getType()));
225  }
226  Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
227  Value *VisitCastExpr(const CastExpr *E) {
228    // Make sure to evaluate VLA bounds now so that we have them for later.
229    if (E->getType()->isVariablyModifiedType())
230      CGF.EmitVLASize(E->getType());
231
232    return EmitCastExpr(E->getSubExpr(), E->getType());
233  }
234  Value *EmitCastExpr(const Expr *E, QualType T);
235
236  Value *VisitCallExpr(const CallExpr *E) {
237    if (E->getCallReturnType()->isReferenceType())
238      return EmitLoadOfLValue(E);
239
240    return CGF.EmitCallExpr(E).getScalarVal();
241  }
242
243  Value *VisitStmtExpr(const StmtExpr *E);
244
245  Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
246
247  // Unary Operators.
248  Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
249  Value *VisitUnaryPostDec(const UnaryOperator *E) {
250    return VisitPrePostIncDec(E, false, false);
251  }
252  Value *VisitUnaryPostInc(const UnaryOperator *E) {
253    return VisitPrePostIncDec(E, true, false);
254  }
255  Value *VisitUnaryPreDec(const UnaryOperator *E) {
256    return VisitPrePostIncDec(E, false, true);
257  }
258  Value *VisitUnaryPreInc(const UnaryOperator *E) {
259    return VisitPrePostIncDec(E, true, true);
260  }
261  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
262    return EmitLValue(E->getSubExpr()).getAddress();
263  }
264  Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
265  Value *VisitUnaryPlus(const UnaryOperator *E) {
266    // This differs from gcc, though, most likely due to a bug in gcc.
267    TestAndClearIgnoreResultAssign();
268    return Visit(E->getSubExpr());
269  }
270  Value *VisitUnaryMinus    (const UnaryOperator *E);
271  Value *VisitUnaryNot      (const UnaryOperator *E);
272  Value *VisitUnaryLNot     (const UnaryOperator *E);
273  Value *VisitUnaryReal     (const UnaryOperator *E);
274  Value *VisitUnaryImag     (const UnaryOperator *E);
275  Value *VisitUnaryExtension(const UnaryOperator *E) {
276    return Visit(E->getSubExpr());
277  }
278  Value *VisitUnaryOffsetOf(const UnaryOperator *E);
279
280  // C++
281  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
282    return Visit(DAE->getExpr());
283  }
284  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
285    return CGF.LoadCXXThis();
286  }
287
288  Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
289    return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
290  }
291  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
292    return CGF.EmitCXXNewExpr(E);
293  }
294
295  // Binary Operators.
296  Value *EmitMul(const BinOpInfo &Ops) {
297    if (CGF.getContext().getLangOptions().OverflowChecking
298        && Ops.Ty->isSignedIntegerType())
299      return EmitOverflowCheckedBinOp(Ops);
300    if (Ops.LHS->getType()->isFPOrFPVector())
301      return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
302    return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
303  }
304  /// Create a binary op that checks for overflow.
305  /// Currently only supports +, - and *.
306  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
307  Value *EmitDiv(const BinOpInfo &Ops);
308  Value *EmitRem(const BinOpInfo &Ops);
309  Value *EmitAdd(const BinOpInfo &Ops);
310  Value *EmitSub(const BinOpInfo &Ops);
311  Value *EmitShl(const BinOpInfo &Ops);
312  Value *EmitShr(const BinOpInfo &Ops);
313  Value *EmitAnd(const BinOpInfo &Ops) {
314    return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
315  }
316  Value *EmitXor(const BinOpInfo &Ops) {
317    return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
318  }
319  Value *EmitOr (const BinOpInfo &Ops) {
320    return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
321  }
322
323  BinOpInfo EmitBinOps(const BinaryOperator *E);
324  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
325                            Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
326
327  // Binary operators and binary compound assignment operators.
328#define HANDLEBINOP(OP) \
329  Value *VisitBin ## OP(const BinaryOperator *E) {                         \
330    return Emit ## OP(EmitBinOps(E));                                      \
331  }                                                                        \
332  Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) {       \
333    return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP);          \
334  }
335  HANDLEBINOP(Mul);
336  HANDLEBINOP(Div);
337  HANDLEBINOP(Rem);
338  HANDLEBINOP(Add);
339  HANDLEBINOP(Sub);
340  HANDLEBINOP(Shl);
341  HANDLEBINOP(Shr);
342  HANDLEBINOP(And);
343  HANDLEBINOP(Xor);
344  HANDLEBINOP(Or);
345#undef HANDLEBINOP
346
347  // Comparisons.
348  Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
349                     unsigned SICmpOpc, unsigned FCmpOpc);
350#define VISITCOMP(CODE, UI, SI, FP) \
351    Value *VisitBin##CODE(const BinaryOperator *E) { \
352      return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
353                         llvm::FCmpInst::FP); }
354  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT);
355  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT);
356  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE);
357  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE);
358  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ);
359  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE);
360#undef VISITCOMP
361
362  Value *VisitBinAssign     (const BinaryOperator *E);
363
364  Value *VisitBinLAnd       (const BinaryOperator *E);
365  Value *VisitBinLOr        (const BinaryOperator *E);
366  Value *VisitBinComma      (const BinaryOperator *E);
367
368  // Other Operators.
369  Value *VisitBlockExpr(const BlockExpr *BE);
370  Value *VisitConditionalOperator(const ConditionalOperator *CO);
371  Value *VisitChooseExpr(ChooseExpr *CE);
372  Value *VisitVAArgExpr(VAArgExpr *VE);
373  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
374    return CGF.EmitObjCStringLiteral(E);
375  }
376};
377}  // end anonymous namespace.
378
379//===----------------------------------------------------------------------===//
380//                                Utilities
381//===----------------------------------------------------------------------===//
382
383/// EmitConversionToBool - Convert the specified expression value to a
384/// boolean (i1) truth value.  This is equivalent to "Val != 0".
385Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
386  assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs");
387
388  if (SrcType->isRealFloatingType()) {
389    // Compare against 0.0 for fp scalars.
390    llvm::Value *Zero = VMContext.getNullValue(Src->getType());
391    return Builder.CreateFCmpUNE(Src, Zero, "tobool");
392  }
393
394  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
395         "Unknown scalar type to convert");
396
397  // Because of the type rules of C, we often end up computing a logical value,
398  // then zero extending it to int, then wanting it as a logical value again.
399  // Optimize this common case.
400  if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
401    if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) {
402      Value *Result = ZI->getOperand(0);
403      // If there aren't any more uses, zap the instruction to save space.
404      // Note that there can be more uses, for example if this
405      // is the result of an assignment.
406      if (ZI->use_empty())
407        ZI->eraseFromParent();
408      return Result;
409    }
410  }
411
412  // Compare against an integer or pointer null.
413  llvm::Value *Zero = VMContext.getNullValue(Src->getType());
414  return Builder.CreateICmpNE(Src, Zero, "tobool");
415}
416
417/// EmitScalarConversion - Emit a conversion from the specified type to the
418/// specified destination type, both of which are LLVM scalar types.
419Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
420                                               QualType DstType) {
421  SrcType = CGF.getContext().getCanonicalType(SrcType);
422  DstType = CGF.getContext().getCanonicalType(DstType);
423  if (SrcType == DstType) return Src;
424
425  if (DstType->isVoidType()) return 0;
426
427  // Handle conversions to bool first, they are special: comparisons against 0.
428  if (DstType->isBooleanType())
429    return EmitConversionToBool(Src, SrcType);
430
431  const llvm::Type *DstTy = ConvertType(DstType);
432
433  // Ignore conversions like int -> uint.
434  if (Src->getType() == DstTy)
435    return Src;
436
437  // Handle pointer conversions next: pointers can only be converted
438  // to/from other pointers and integers. Check for pointer types in
439  // terms of LLVM, as some native types (like Obj-C id) may map to a
440  // pointer type.
441  if (isa<llvm::PointerType>(DstTy)) {
442    // The source value may be an integer, or a pointer.
443    if (isa<llvm::PointerType>(Src->getType())) {
444      // Some heavy lifting for derived to base conversion.
445      if (const CXXRecordDecl *ClassDecl =
446            SrcType->getCXXRecordDeclForPointerType())
447        if (const CXXRecordDecl *BaseClassDecl =
448              DstType->getCXXRecordDeclForPointerType())
449          Src = CGF.AddressCXXOfBaseClass(Src, ClassDecl, BaseClassDecl);
450      return Builder.CreateBitCast(Src, DstTy, "conv");
451    }
452    assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
453    // First, convert to the correct width so that we control the kind of
454    // extension.
455    const llvm::Type *MiddleTy = VMContext.getIntegerType(CGF.LLVMPointerWidth);
456    bool InputSigned = SrcType->isSignedIntegerType();
457    llvm::Value* IntResult =
458        Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
459    // Then, cast to pointer.
460    return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
461  }
462
463  if (isa<llvm::PointerType>(Src->getType())) {
464    // Must be an ptr to int cast.
465    assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
466    return Builder.CreatePtrToInt(Src, DstTy, "conv");
467  }
468
469  // A scalar can be splatted to an extended vector of the same element type
470  if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) {
471    // Cast the scalar to element type
472    QualType EltTy = DstType->getAsExtVectorType()->getElementType();
473    llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
474
475    // Insert the element in element zero of an undef vector
476    llvm::Value *UnV = VMContext.getUndef(DstTy);
477    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
478    UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
479
480    // Splat the element across to all elements
481    llvm::SmallVector<llvm::Constant*, 16> Args;
482    unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
483    for (unsigned i = 0; i < NumElements; i++)
484      Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
485
486    llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
487    llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
488    return Yay;
489  }
490
491  // Allow bitcast from vector to integer/fp of the same size.
492  if (isa<llvm::VectorType>(Src->getType()) ||
493      isa<llvm::VectorType>(DstTy))
494    return Builder.CreateBitCast(Src, DstTy, "conv");
495
496  // Finally, we have the arithmetic types: real int/float.
497  if (isa<llvm::IntegerType>(Src->getType())) {
498    bool InputSigned = SrcType->isSignedIntegerType();
499    if (isa<llvm::IntegerType>(DstTy))
500      return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
501    else if (InputSigned)
502      return Builder.CreateSIToFP(Src, DstTy, "conv");
503    else
504      return Builder.CreateUIToFP(Src, DstTy, "conv");
505  }
506
507  assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
508  if (isa<llvm::IntegerType>(DstTy)) {
509    if (DstType->isSignedIntegerType())
510      return Builder.CreateFPToSI(Src, DstTy, "conv");
511    else
512      return Builder.CreateFPToUI(Src, DstTy, "conv");
513  }
514
515  assert(DstTy->isFloatingPoint() && "Unknown real conversion");
516  if (DstTy->getTypeID() < Src->getType()->getTypeID())
517    return Builder.CreateFPTrunc(Src, DstTy, "conv");
518  else
519    return Builder.CreateFPExt(Src, DstTy, "conv");
520}
521
522/// EmitComplexToScalarConversion - Emit a conversion from the specified
523/// complex type to the specified destination type, where the destination
524/// type is an LLVM scalar type.
525Value *ScalarExprEmitter::
526EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
527                              QualType SrcTy, QualType DstTy) {
528  // Get the source element type.
529  SrcTy = SrcTy->getAsComplexType()->getElementType();
530
531  // Handle conversions to bool first, they are special: comparisons against 0.
532  if (DstTy->isBooleanType()) {
533    //  Complex != 0  -> (Real != 0) | (Imag != 0)
534    Src.first  = EmitScalarConversion(Src.first, SrcTy, DstTy);
535    Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
536    return Builder.CreateOr(Src.first, Src.second, "tobool");
537  }
538
539  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
540  // the imaginary part of the complex value is discarded and the value of the
541  // real part is converted according to the conversion rules for the
542  // corresponding real type.
543  return EmitScalarConversion(Src.first, SrcTy, DstTy);
544}
545
546
547//===----------------------------------------------------------------------===//
548//                            Visitor Methods
549//===----------------------------------------------------------------------===//
550
551Value *ScalarExprEmitter::VisitExpr(Expr *E) {
552  CGF.ErrorUnsupported(E, "scalar expression");
553  if (E->getType()->isVoidType())
554    return 0;
555  return VMContext.getUndef(CGF.ConvertType(E->getType()));
556}
557
558Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
559  llvm::SmallVector<llvm::Constant*, 32> indices;
560  for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
561    indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
562  }
563  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
564  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
565  Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
566  return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
567}
568
569Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
570  TestAndClearIgnoreResultAssign();
571
572  // Emit subscript expressions in rvalue context's.  For most cases, this just
573  // loads the lvalue formed by the subscript expr.  However, we have to be
574  // careful, because the base of a vector subscript is occasionally an rvalue,
575  // so we can't get it as an lvalue.
576  if (!E->getBase()->getType()->isVectorType())
577    return EmitLoadOfLValue(E);
578
579  // Handle the vector case.  The base must be a vector, the index must be an
580  // integer value.
581  Value *Base = Visit(E->getBase());
582  Value *Idx  = Visit(E->getIdx());
583  bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
584  Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned,
585                              "vecidxcast");
586  return Builder.CreateExtractElement(Base, Idx, "vecext");
587}
588
589/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but
590/// also handle things like function to pointer-to-function decay, and array to
591/// pointer decay.
592Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) {
593  const Expr *Op = E->getSubExpr();
594
595  // If this is due to array->pointer conversion, emit the array expression as
596  // an l-value.
597  if (Op->getType()->isArrayType()) {
598    Value *V = EmitLValue(Op).getAddress();  // Bitfields can't be arrays.
599
600    // Note that VLA pointers are always decayed, so we don't need to do
601    // anything here.
602    if (!Op->getType()->isVariableArrayType()) {
603      assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
604      assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
605                                 ->getElementType()) &&
606             "Expected pointer to array");
607      V = Builder.CreateStructGEP(V, 0, "arraydecay");
608    }
609
610    // The resultant pointer type can be implicitly casted to other pointer
611    // types as well (e.g. void*) and can be implicitly converted to integer.
612    const llvm::Type *DestTy = ConvertType(E->getType());
613    if (V->getType() != DestTy) {
614      if (isa<llvm::PointerType>(DestTy))
615        V = Builder.CreateBitCast(V, DestTy, "ptrconv");
616      else {
617        assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay");
618        V = Builder.CreatePtrToInt(V, DestTy, "ptrconv");
619      }
620    }
621    return V;
622  }
623
624  return EmitCastExpr(Op, E->getType());
625}
626
627
628// VisitCastExpr - Emit code for an explicit or implicit cast.  Implicit casts
629// have to handle a more broad range of conversions than explicit casts, as they
630// handle things like function to ptr-to-function decay etc.
631Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) {
632  if (!DestTy->isVoidType())
633    TestAndClearIgnoreResultAssign();
634
635  // Handle cases where the source is an non-complex type.
636
637  if (!CGF.hasAggregateLLVMType(E->getType())) {
638    Value *Src = Visit(const_cast<Expr*>(E));
639
640    // Use EmitScalarConversion to perform the conversion.
641    return EmitScalarConversion(Src, E->getType(), DestTy);
642  }
643
644  if (E->getType()->isAnyComplexType()) {
645    // Handle cases where the source is a complex type.
646    bool IgnoreImag = true;
647    bool IgnoreImagAssign = true;
648    bool IgnoreReal = IgnoreResultAssign;
649    bool IgnoreRealAssign = IgnoreResultAssign;
650    if (DestTy->isBooleanType())
651      IgnoreImagAssign = IgnoreImag = false;
652    else if (DestTy->isVoidType()) {
653      IgnoreReal = IgnoreImag = false;
654      IgnoreRealAssign = IgnoreImagAssign = true;
655    }
656    CodeGenFunction::ComplexPairTy V
657      = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
658                            IgnoreImagAssign);
659    return EmitComplexToScalarConversion(V, E->getType(), DestTy);
660  }
661
662  // Okay, this is a cast from an aggregate.  It must be a cast to void.  Just
663  // evaluate the result and return.
664  CGF.EmitAggExpr(E, 0, false, true);
665  return 0;
666}
667
668Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
669  return CGF.EmitCompoundStmt(*E->getSubStmt(),
670                              !E->getType()->isVoidType()).getScalarVal();
671}
672
673Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
674  return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp");
675}
676
677//===----------------------------------------------------------------------===//
678//                             Unary Operators
679//===----------------------------------------------------------------------===//
680
681Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
682                                             bool isInc, bool isPre) {
683  LValue LV = EmitLValue(E->getSubExpr());
684  QualType ValTy = E->getSubExpr()->getType();
685  Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal();
686
687  int AmountVal = isInc ? 1 : -1;
688
689  if (ValTy->isPointerType() &&
690      ValTy->getAsPointerType()->isVariableArrayType()) {
691    // The amount of the addition/subtraction needs to account for the VLA size
692    CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
693  }
694
695  Value *NextVal;
696  if (const llvm::PointerType *PT =
697         dyn_cast<llvm::PointerType>(InVal->getType())) {
698    llvm::Constant *Inc =
699      llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal);
700    if (!isa<llvm::FunctionType>(PT->getElementType())) {
701      QualType PTEE = ValTy->getPointeeType();
702      if (const ObjCInterfaceType *OIT =
703          dyn_cast<ObjCInterfaceType>(PTEE)) {
704        // Handle interface types, which are not represented with a concrete type.
705        int size = CGF.getContext().getTypeSize(OIT) / 8;
706        if (!isInc)
707          size = -size;
708        Inc = llvm::ConstantInt::get(Inc->getType(), size);
709        const llvm::Type *i8Ty =
710          VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
711        InVal = Builder.CreateBitCast(InVal, i8Ty);
712        NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
713        llvm::Value *lhs = LV.getAddress();
714        lhs = Builder.CreateBitCast(lhs, VMContext.getPointerTypeUnqual(i8Ty));
715        LV = LValue::MakeAddr(lhs, ValTy.getCVRQualifiers(),
716                              CGF.getContext().getObjCGCAttrKind(ValTy));
717      }
718      else
719        NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec");
720    } else {
721      const llvm::Type *i8Ty =
722        VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
723      NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
724      NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
725      NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
726    }
727  } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) {
728    // Bool++ is an interesting case, due to promotion rules, we get:
729    // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
730    // Bool = ((int)Bool+1) != 0
731    // An interesting aspect of this is that increment is always true.
732    // Decrement does not have this property.
733    NextVal = VMContext.getTrue();
734  } else if (isa<llvm::IntegerType>(InVal->getType())) {
735    NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
736    NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
737  } else {
738    // Add the inc/dec to the real part.
739    if (InVal->getType() == llvm::Type::FloatTy)
740      NextVal =
741        llvm::ConstantFP::get(VMContext,
742                              llvm::APFloat(static_cast<float>(AmountVal)));
743    else if (InVal->getType() == llvm::Type::DoubleTy)
744      NextVal =
745        llvm::ConstantFP::get(VMContext,
746                              llvm::APFloat(static_cast<double>(AmountVal)));
747    else {
748      llvm::APFloat F(static_cast<float>(AmountVal));
749      bool ignored;
750      F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
751                &ignored);
752      NextVal = llvm::ConstantFP::get(VMContext, F);
753    }
754    NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
755  }
756
757  // Store the updated result through the lvalue.
758  if (LV.isBitfield())
759    CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy,
760                                       &NextVal);
761  else
762    CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
763
764  // If this is a postinc, return the value read from memory, otherwise use the
765  // updated value.
766  return isPre ? NextVal : InVal;
767}
768
769
770Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
771  TestAndClearIgnoreResultAssign();
772  Value *Op = Visit(E->getSubExpr());
773  if (Op->getType()->isFPOrFPVector())
774    return Builder.CreateFNeg(Op, "neg");
775  return Builder.CreateNeg(Op, "neg");
776}
777
778Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
779  TestAndClearIgnoreResultAssign();
780  Value *Op = Visit(E->getSubExpr());
781  return Builder.CreateNot(Op, "neg");
782}
783
784Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
785  // Compare operand to zero.
786  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
787
788  // Invert value.
789  // TODO: Could dynamically modify easy computations here.  For example, if
790  // the operand is an icmp ne, turn into icmp eq.
791  BoolVal = Builder.CreateNot(BoolVal, "lnot");
792
793  // ZExt result to the expr type.
794  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
795}
796
797/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
798/// argument of the sizeof expression as an integer.
799Value *
800ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
801  QualType TypeToSize = E->getTypeOfArgument();
802  if (E->isSizeOf()) {
803    if (const VariableArrayType *VAT =
804          CGF.getContext().getAsVariableArrayType(TypeToSize)) {
805      if (E->isArgumentType()) {
806        // sizeof(type) - make sure to emit the VLA size.
807        CGF.EmitVLASize(TypeToSize);
808      } else {
809        // C99 6.5.3.4p2: If the argument is an expression of type
810        // VLA, it is evaluated.
811        CGF.EmitAnyExpr(E->getArgumentExpr());
812      }
813
814      return CGF.GetVLASize(VAT);
815    }
816  }
817
818  // If this isn't sizeof(vla), the result must be constant; use the
819  // constant folding logic so we don't have to duplicate it here.
820  Expr::EvalResult Result;
821  E->Evaluate(Result, CGF.getContext());
822  return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
823}
824
825Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
826  Expr *Op = E->getSubExpr();
827  if (Op->getType()->isAnyComplexType())
828    return CGF.EmitComplexExpr(Op, false, true, false, true).first;
829  return Visit(Op);
830}
831Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
832  Expr *Op = E->getSubExpr();
833  if (Op->getType()->isAnyComplexType())
834    return CGF.EmitComplexExpr(Op, true, false, true, false).second;
835
836  // __imag on a scalar returns zero.  Emit the subexpr to ensure side
837  // effects are evaluated, but not the actual value.
838  if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
839    CGF.EmitLValue(Op);
840  else
841    CGF.EmitScalarExpr(Op, true);
842  return VMContext.getNullValue(ConvertType(E->getType()));
843}
844
845Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
846{
847  Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
848  const llvm::Type* ResultType = ConvertType(E->getType());
849  return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
850}
851
852//===----------------------------------------------------------------------===//
853//                           Binary Operators
854//===----------------------------------------------------------------------===//
855
856BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
857  TestAndClearIgnoreResultAssign();
858  BinOpInfo Result;
859  Result.LHS = Visit(E->getLHS());
860  Result.RHS = Visit(E->getRHS());
861  Result.Ty  = E->getType();
862  Result.E = E;
863  return Result;
864}
865
866Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
867                      Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
868  bool Ignore = TestAndClearIgnoreResultAssign();
869  QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
870
871  BinOpInfo OpInfo;
872
873  if (E->getComputationResultType()->isAnyComplexType()) {
874    // This needs to go through the complex expression emitter, but
875    // it's a tad complicated to do that... I'm leaving it out for now.
876    // (Note that we do actually need the imaginary part of the RHS for
877    // multiplication and division.)
878    CGF.ErrorUnsupported(E, "complex compound assignment");
879    return VMContext.getUndef(CGF.ConvertType(E->getType()));
880  }
881
882  // Emit the RHS first.  __block variables need to have the rhs evaluated
883  // first, plus this should improve codegen a little.
884  OpInfo.RHS = Visit(E->getRHS());
885  OpInfo.Ty = E->getComputationResultType();
886  OpInfo.E = E;
887  // Load/convert the LHS.
888  LValue LHSLV = EmitLValue(E->getLHS());
889  OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
890  OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
891                                    E->getComputationLHSType());
892
893  // Expand the binary operator.
894  Value *Result = (this->*Func)(OpInfo);
895
896  // Convert the result back to the LHS type.
897  Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
898
899  // Store the result value into the LHS lvalue. Bit-fields are
900  // handled specially because the result is altered by the store,
901  // i.e., [C99 6.5.16p1] 'An assignment expression has the value of
902  // the left operand after the assignment...'.
903  if (LHSLV.isBitfield()) {
904    if (!LHSLV.isVolatileQualified()) {
905      CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
906                                         &Result);
907      return Result;
908    } else
909      CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
910  } else
911    CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
912  if (Ignore)
913    return 0;
914  return EmitLoadOfLValue(LHSLV, E->getType());
915}
916
917
918Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
919  if (Ops.LHS->getType()->isFPOrFPVector())
920    return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
921  else if (Ops.Ty->isUnsignedIntegerType())
922    return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
923  else
924    return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
925}
926
927Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
928  // Rem in C can't be a floating point type: C99 6.5.5p2.
929  if (Ops.Ty->isUnsignedIntegerType())
930    return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
931  else
932    return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
933}
934
935Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
936  unsigned IID;
937  unsigned OpID = 0;
938
939  switch (Ops.E->getOpcode()) {
940  case BinaryOperator::Add:
941  case BinaryOperator::AddAssign:
942    OpID = 1;
943    IID = llvm::Intrinsic::sadd_with_overflow;
944    break;
945  case BinaryOperator::Sub:
946  case BinaryOperator::SubAssign:
947    OpID = 2;
948    IID = llvm::Intrinsic::ssub_with_overflow;
949    break;
950  case BinaryOperator::Mul:
951  case BinaryOperator::MulAssign:
952    OpID = 3;
953    IID = llvm::Intrinsic::smul_with_overflow;
954    break;
955  default:
956    assert(false && "Unsupported operation for overflow detection");
957    IID = 0;
958  }
959  OpID <<= 1;
960  OpID |= 1;
961
962  const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
963
964  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
965
966  Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
967  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
968  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
969
970  // Branch in case of overflow.
971  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
972  llvm::BasicBlock *overflowBB =
973    CGF.createBasicBlock("overflow", CGF.CurFn);
974  llvm::BasicBlock *continueBB =
975    CGF.createBasicBlock("overflow.continue", CGF.CurFn);
976
977  Builder.CreateCondBr(overflow, overflowBB, continueBB);
978
979  // Handle overflow
980
981  Builder.SetInsertPoint(overflowBB);
982
983  // Handler is:
984  // long long *__overflow_handler)(long long a, long long b, char op,
985  // char width)
986  std::vector<const llvm::Type*> handerArgTypes;
987  handerArgTypes.push_back(llvm::Type::Int64Ty);
988  handerArgTypes.push_back(llvm::Type::Int64Ty);
989  handerArgTypes.push_back(llvm::Type::Int8Ty);
990  handerArgTypes.push_back(llvm::Type::Int8Ty);
991  llvm::FunctionType *handlerTy = VMContext.getFunctionType(llvm::Type::Int64Ty,
992      handerArgTypes, false);
993  llvm::Value *handlerFunction =
994    CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
995        VMContext.getPointerTypeUnqual(handlerTy));
996  handlerFunction = Builder.CreateLoad(handlerFunction);
997
998  llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
999      Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty),
1000      Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty),
1001      llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID),
1002      llvm::ConstantInt::get(llvm::Type::Int8Ty,
1003        cast<llvm::IntegerType>(opTy)->getBitWidth()));
1004
1005  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
1006
1007  Builder.CreateBr(continueBB);
1008
1009  // Set up the continuation
1010  Builder.SetInsertPoint(continueBB);
1011  // Get the correct result
1012  llvm::PHINode *phi = Builder.CreatePHI(opTy);
1013  phi->reserveOperandSpace(2);
1014  phi->addIncoming(result, initialBB);
1015  phi->addIncoming(handlerResult, overflowBB);
1016
1017  return phi;
1018}
1019
1020Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
1021  if (!Ops.Ty->isAnyPointerType()) {
1022    if (CGF.getContext().getLangOptions().OverflowChecking &&
1023        Ops.Ty->isSignedIntegerType())
1024      return EmitOverflowCheckedBinOp(Ops);
1025
1026    if (Ops.LHS->getType()->isFPOrFPVector())
1027      return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
1028
1029    return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
1030  }
1031
1032  if (Ops.Ty->isPointerType() &&
1033      Ops.Ty->getAsPointerType()->isVariableArrayType()) {
1034    // The amount of the addition needs to account for the VLA size
1035    CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
1036  }
1037  Value *Ptr, *Idx;
1038  Expr *IdxExp;
1039  const PointerType *PT = Ops.E->getLHS()->getType()->getAsPointerType();
1040  const ObjCObjectPointerType *OPT =
1041    Ops.E->getLHS()->getType()->getAsObjCObjectPointerType();
1042  if (PT || OPT) {
1043    Ptr = Ops.LHS;
1044    Idx = Ops.RHS;
1045    IdxExp = Ops.E->getRHS();
1046  } else {  // int + pointer
1047    PT = Ops.E->getRHS()->getType()->getAsPointerType();
1048    OPT = Ops.E->getRHS()->getType()->getAsObjCObjectPointerType();
1049    assert((PT || OPT) && "Invalid add expr");
1050    Ptr = Ops.RHS;
1051    Idx = Ops.LHS;
1052    IdxExp = Ops.E->getLHS();
1053  }
1054
1055  unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
1056  if (Width < CGF.LLVMPointerWidth) {
1057    // Zero or sign extend the pointer value based on whether the index is
1058    // signed or not.
1059    const llvm::Type *IdxType = VMContext.getIntegerType(CGF.LLVMPointerWidth);
1060    if (IdxExp->getType()->isSignedIntegerType())
1061      Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
1062    else
1063      Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
1064  }
1065  const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
1066  // Handle interface types, which are not represented with a concrete
1067  // type.
1068  if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
1069    llvm::Value *InterfaceSize =
1070      llvm::ConstantInt::get(Idx->getType(),
1071                             CGF.getContext().getTypeSize(OIT) / 8);
1072    Idx = Builder.CreateMul(Idx, InterfaceSize);
1073    const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1074    Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
1075    Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
1076    return Builder.CreateBitCast(Res, Ptr->getType());
1077  }
1078
1079  // Explicitly handle GNU void* and function pointer arithmetic
1080  // extensions. The GNU void* casts amount to no-ops since our void*
1081  // type is i8*, but this is future proof.
1082  if (ElementType->isVoidType() || ElementType->isFunctionType()) {
1083    const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1084    Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
1085    Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
1086    return Builder.CreateBitCast(Res, Ptr->getType());
1087  }
1088
1089  return Builder.CreateGEP(Ptr, Idx, "add.ptr");
1090}
1091
1092Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
1093  if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
1094    if (CGF.getContext().getLangOptions().OverflowChecking
1095        && Ops.Ty->isSignedIntegerType())
1096      return EmitOverflowCheckedBinOp(Ops);
1097
1098    if (Ops.LHS->getType()->isFPOrFPVector())
1099      return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
1100    return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
1101  }
1102
1103  if (Ops.E->getLHS()->getType()->isPointerType() &&
1104      Ops.E->getLHS()->getType()->getAsPointerType()->isVariableArrayType()) {
1105    // The amount of the addition needs to account for the VLA size for
1106    // ptr-int
1107    // The amount of the division needs to account for the VLA size for
1108    // ptr-ptr.
1109    CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
1110  }
1111
1112  const QualType LHSType = Ops.E->getLHS()->getType();
1113  const QualType LHSElementType = LHSType->getPointeeType();
1114  if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
1115    // pointer - int
1116    Value *Idx = Ops.RHS;
1117    unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
1118    if (Width < CGF.LLVMPointerWidth) {
1119      // Zero or sign extend the pointer value based on whether the index is
1120      // signed or not.
1121      const llvm::Type *IdxType =
1122        VMContext.getIntegerType(CGF.LLVMPointerWidth);
1123      if (Ops.E->getRHS()->getType()->isSignedIntegerType())
1124        Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
1125      else
1126        Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
1127    }
1128    Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
1129
1130    // Handle interface types, which are not represented with a concrete
1131    // type.
1132    if (const ObjCInterfaceType *OIT =
1133        dyn_cast<ObjCInterfaceType>(LHSElementType)) {
1134      llvm::Value *InterfaceSize =
1135        llvm::ConstantInt::get(Idx->getType(),
1136                               CGF.getContext().getTypeSize(OIT) / 8);
1137      Idx = Builder.CreateMul(Idx, InterfaceSize);
1138      const llvm::Type *i8Ty =
1139        VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1140      Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
1141      Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
1142      return Builder.CreateBitCast(Res, Ops.LHS->getType());
1143    }
1144
1145    // Explicitly handle GNU void* and function pointer arithmetic
1146    // extensions. The GNU void* casts amount to no-ops since our
1147    // void* type is i8*, but this is future proof.
1148    if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
1149      const llvm::Type *i8Ty =
1150        VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
1151      Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
1152      Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
1153      return Builder.CreateBitCast(Res, Ops.LHS->getType());
1154    }
1155
1156    return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
1157  } else {
1158    // pointer - pointer
1159    Value *LHS = Ops.LHS;
1160    Value *RHS = Ops.RHS;
1161
1162    uint64_t ElementSize;
1163
1164    // Handle GCC extension for pointer arithmetic on void* and function pointer
1165    // types.
1166    if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
1167      ElementSize = 1;
1168    } else {
1169      ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
1170    }
1171
1172    const llvm::Type *ResultType = ConvertType(Ops.Ty);
1173    LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
1174    RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1175    Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
1176
1177    // Optimize out the shift for element size of 1.
1178    if (ElementSize == 1)
1179      return BytesBetween;
1180
1181    // HACK: LLVM doesn't have an divide instruction that 'knows' there is no
1182    // remainder.  As such, we handle common power-of-two cases here to generate
1183    // better code. See PR2247.
1184    if (llvm::isPowerOf2_64(ElementSize)) {
1185      Value *ShAmt =
1186        llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize));
1187      return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
1188    }
1189
1190    // Otherwise, do a full sdiv.
1191    Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
1192    return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
1193  }
1194}
1195
1196Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
1197  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
1198  // RHS to the same size as the LHS.
1199  Value *RHS = Ops.RHS;
1200  if (Ops.LHS->getType() != RHS->getType())
1201    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
1202
1203  return Builder.CreateShl(Ops.LHS, RHS, "shl");
1204}
1205
1206Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
1207  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
1208  // RHS to the same size as the LHS.
1209  Value *RHS = Ops.RHS;
1210  if (Ops.LHS->getType() != RHS->getType())
1211    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
1212
1213  if (Ops.Ty->isUnsignedIntegerType())
1214    return Builder.CreateLShr(Ops.LHS, RHS, "shr");
1215  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
1216}
1217
1218Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
1219                                      unsigned SICmpOpc, unsigned FCmpOpc) {
1220  TestAndClearIgnoreResultAssign();
1221  Value *Result;
1222  QualType LHSTy = E->getLHS()->getType();
1223  if (!LHSTy->isAnyComplexType()) {
1224    Value *LHS = Visit(E->getLHS());
1225    Value *RHS = Visit(E->getRHS());
1226
1227    if (LHS->getType()->isFPOrFPVector()) {
1228      Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
1229                                  LHS, RHS, "cmp");
1230    } else if (LHSTy->isSignedIntegerType()) {
1231      Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
1232                                  LHS, RHS, "cmp");
1233    } else {
1234      // Unsigned integers and pointers.
1235      Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
1236                                  LHS, RHS, "cmp");
1237    }
1238
1239    // If this is a vector comparison, sign extend the result to the appropriate
1240    // vector integer type and return it (don't convert to bool).
1241    if (LHSTy->isVectorType())
1242      return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
1243
1244  } else {
1245    // Complex Comparison: can only be an equality comparison.
1246    CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
1247    CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
1248
1249    QualType CETy = LHSTy->getAsComplexType()->getElementType();
1250
1251    Value *ResultR, *ResultI;
1252    if (CETy->isRealFloatingType()) {
1253      ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
1254                                   LHS.first, RHS.first, "cmp.r");
1255      ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
1256                                   LHS.second, RHS.second, "cmp.i");
1257    } else {
1258      // Complex comparisons can only be equality comparisons.  As such, signed
1259      // and unsigned opcodes are the same.
1260      ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
1261                                   LHS.first, RHS.first, "cmp.r");
1262      ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
1263                                   LHS.second, RHS.second, "cmp.i");
1264    }
1265
1266    if (E->getOpcode() == BinaryOperator::EQ) {
1267      Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
1268    } else {
1269      assert(E->getOpcode() == BinaryOperator::NE &&
1270             "Complex comparison other than == or != ?");
1271      Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
1272    }
1273  }
1274
1275  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
1276}
1277
1278Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1279  bool Ignore = TestAndClearIgnoreResultAssign();
1280
1281  // __block variables need to have the rhs evaluated first, plus this should
1282  // improve codegen just a little.
1283  Value *RHS = Visit(E->getRHS());
1284  LValue LHS = EmitLValue(E->getLHS());
1285
1286  // Store the value into the LHS.  Bit-fields are handled specially
1287  // because the result is altered by the store, i.e., [C99 6.5.16p1]
1288  // 'An assignment expression has the value of the left operand after
1289  // the assignment...'.
1290  if (LHS.isBitfield()) {
1291    if (!LHS.isVolatileQualified()) {
1292      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
1293                                         &RHS);
1294      return RHS;
1295    } else
1296      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
1297  } else
1298    CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
1299  if (Ignore)
1300    return 0;
1301  return EmitLoadOfLValue(LHS, E->getType());
1302}
1303
1304Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
1305  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
1306  // If we have 1 && X, just emit X without inserting the control flow.
1307  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
1308    if (Cond == 1) { // If we have 1 && X, just emit X.
1309      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1310      // ZExt result to int.
1311      return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext");
1312    }
1313
1314    // 0 && RHS: If it is safe, just elide the RHS, and return 0.
1315    if (!CGF.ContainsLabel(E->getRHS()))
1316      return VMContext.getNullValue(CGF.LLVMIntTy);
1317  }
1318
1319  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
1320  llvm::BasicBlock *RHSBlock  = CGF.createBasicBlock("land.rhs");
1321
1322  // Branch on the LHS first.  If it is false, go to the failure (cont) block.
1323  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
1324
1325  // Any edges into the ContBlock are now from an (indeterminate number of)
1326  // edges from this first condition.  All of these values will be false.  Start
1327  // setting up the PHI node in the Cont Block for this.
1328  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
1329  PN->reserveOperandSpace(2);  // Normal case, two inputs.
1330  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
1331       PI != PE; ++PI)
1332    PN->addIncoming(VMContext.getFalse(), *PI);
1333
1334  CGF.PushConditionalTempDestruction();
1335  CGF.EmitBlock(RHSBlock);
1336  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1337  CGF.PopConditionalTempDestruction();
1338
1339  // Reaquire the RHS block, as there may be subblocks inserted.
1340  RHSBlock = Builder.GetInsertBlock();
1341
1342  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
1343  // into the phi node for the edge with the value of RHSCond.
1344  CGF.EmitBlock(ContBlock);
1345  PN->addIncoming(RHSCond, RHSBlock);
1346
1347  // ZExt result to int.
1348  return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext");
1349}
1350
1351Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
1352  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
1353  // If we have 0 || X, just emit X without inserting the control flow.
1354  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
1355    if (Cond == -1) { // If we have 0 || X, just emit X.
1356      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1357      // ZExt result to int.
1358      return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext");
1359    }
1360
1361    // 1 || RHS: If it is safe, just elide the RHS, and return 1.
1362    if (!CGF.ContainsLabel(E->getRHS()))
1363      return llvm::ConstantInt::get(CGF.LLVMIntTy, 1);
1364  }
1365
1366  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
1367  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
1368
1369  // Branch on the LHS first.  If it is true, go to the success (cont) block.
1370  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
1371
1372  // Any edges into the ContBlock are now from an (indeterminate number of)
1373  // edges from this first condition.  All of these values will be true.  Start
1374  // setting up the PHI node in the Cont Block for this.
1375  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
1376  PN->reserveOperandSpace(2);  // Normal case, two inputs.
1377  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
1378       PI != PE; ++PI)
1379    PN->addIncoming(VMContext.getTrue(), *PI);
1380
1381  CGF.PushConditionalTempDestruction();
1382
1383  // Emit the RHS condition as a bool value.
1384  CGF.EmitBlock(RHSBlock);
1385  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
1386
1387  CGF.PopConditionalTempDestruction();
1388
1389  // Reaquire the RHS block, as there may be subblocks inserted.
1390  RHSBlock = Builder.GetInsertBlock();
1391
1392  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
1393  // into the phi node for the edge with the value of RHSCond.
1394  CGF.EmitBlock(ContBlock);
1395  PN->addIncoming(RHSCond, RHSBlock);
1396
1397  // ZExt result to int.
1398  return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext");
1399}
1400
1401Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
1402  CGF.EmitStmt(E->getLHS());
1403  CGF.EnsureInsertPoint();
1404  return Visit(E->getRHS());
1405}
1406
1407//===----------------------------------------------------------------------===//
1408//                             Other Operators
1409//===----------------------------------------------------------------------===//
1410
1411/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
1412/// expression is cheap enough and side-effect-free enough to evaluate
1413/// unconditionally instead of conditionally.  This is used to convert control
1414/// flow into selects in some cases.
1415static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
1416  if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
1417    return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr());
1418
1419  // TODO: Allow anything we can constant fold to an integer or fp constant.
1420  if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
1421      isa<FloatingLiteral>(E))
1422    return true;
1423
1424  // Non-volatile automatic variables too, to get "cond ? X : Y" where
1425  // X and Y are local variables.
1426  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
1427    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
1428      if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified())
1429        return true;
1430
1431  return false;
1432}
1433
1434
1435Value *ScalarExprEmitter::
1436VisitConditionalOperator(const ConditionalOperator *E) {
1437  TestAndClearIgnoreResultAssign();
1438  // If the condition constant folds and can be elided, try to avoid emitting
1439  // the condition and the dead arm.
1440  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
1441    Expr *Live = E->getLHS(), *Dead = E->getRHS();
1442    if (Cond == -1)
1443      std::swap(Live, Dead);
1444
1445    // If the dead side doesn't have labels we need, and if the Live side isn't
1446    // the gnu missing ?: extension (which we could handle, but don't bother
1447    // to), just emit the Live part.
1448    if ((!Dead || !CGF.ContainsLabel(Dead)) &&  // No labels in dead part
1449        Live)                                   // Live part isn't missing.
1450      return Visit(Live);
1451  }
1452
1453
1454  // If this is a really simple expression (like x ? 4 : 5), emit this as a
1455  // select instead of as control flow.  We can only do this if it is cheap and
1456  // safe to evaluate the LHS and RHS unconditionally.
1457  if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) &&
1458      isCheapEnoughToEvaluateUnconditionally(E->getRHS())) {
1459    llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
1460    llvm::Value *LHS = Visit(E->getLHS());
1461    llvm::Value *RHS = Visit(E->getRHS());
1462    return Builder.CreateSelect(CondV, LHS, RHS, "cond");
1463  }
1464
1465
1466  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1467  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1468  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1469  Value *CondVal = 0;
1470
1471  // If we don't have the GNU missing condition extension, emit a branch on
1472  // bool the normal way.
1473  if (E->getLHS()) {
1474    // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
1475    // the branch on bool.
1476    CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
1477  } else {
1478    // Otherwise, for the ?: extension, evaluate the conditional and then
1479    // convert it to bool the hard way.  We do this explicitly because we need
1480    // the unconverted value for the missing middle value of the ?:.
1481    CondVal = CGF.EmitScalarExpr(E->getCond());
1482
1483    // In some cases, EmitScalarConversion will delete the "CondVal" expression
1484    // if there are no extra uses (an optimization).  Inhibit this by making an
1485    // extra dead use, because we're going to add a use of CondVal later.  We
1486    // don't use the builder for this, because we don't want it to get optimized
1487    // away.  This leaves dead code, but the ?: extension isn't common.
1488    new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
1489                          Builder.GetInsertBlock());
1490
1491    Value *CondBoolVal =
1492      CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
1493                               CGF.getContext().BoolTy);
1494    Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
1495  }
1496
1497  CGF.PushConditionalTempDestruction();
1498  CGF.EmitBlock(LHSBlock);
1499
1500  // Handle the GNU extension for missing LHS.
1501  Value *LHS;
1502  if (E->getLHS())
1503    LHS = Visit(E->getLHS());
1504  else    // Perform promotions, to handle cases like "short ?: int"
1505    LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
1506
1507  CGF.PopConditionalTempDestruction();
1508  LHSBlock = Builder.GetInsertBlock();
1509  CGF.EmitBranch(ContBlock);
1510
1511  CGF.PushConditionalTempDestruction();
1512  CGF.EmitBlock(RHSBlock);
1513
1514  Value *RHS = Visit(E->getRHS());
1515  CGF.PopConditionalTempDestruction();
1516  RHSBlock = Builder.GetInsertBlock();
1517  CGF.EmitBranch(ContBlock);
1518
1519  CGF.EmitBlock(ContBlock);
1520
1521  if (!LHS || !RHS) {
1522    assert(E->getType()->isVoidType() && "Non-void value should have a value");
1523    return 0;
1524  }
1525
1526  // Create a PHI node for the real part.
1527  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
1528  PN->reserveOperandSpace(2);
1529  PN->addIncoming(LHS, LHSBlock);
1530  PN->addIncoming(RHS, RHSBlock);
1531  return PN;
1532}
1533
1534Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
1535  return Visit(E->getChosenSubExpr(CGF.getContext()));
1536}
1537
1538Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1539  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
1540  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
1541
1542  // If EmitVAArg fails, we fall back to the LLVM instruction.
1543  if (!ArgPtr)
1544    return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
1545
1546  // FIXME Volatility.
1547  return Builder.CreateLoad(ArgPtr);
1548}
1549
1550Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
1551  return CGF.BuildBlockLiteralTmp(BE);
1552}
1553
1554//===----------------------------------------------------------------------===//
1555//                         Entry Point into this File
1556//===----------------------------------------------------------------------===//
1557
1558/// EmitScalarExpr - Emit the computation of the specified expression of
1559/// scalar type, ignoring the result.
1560Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
1561  assert(E && !hasAggregateLLVMType(E->getType()) &&
1562         "Invalid scalar expression to emit");
1563
1564  return ScalarExprEmitter(*this, IgnoreResultAssign)
1565    .Visit(const_cast<Expr*>(E));
1566}
1567
1568/// EmitScalarConversion - Emit a conversion from the specified type to the
1569/// specified destination type, both of which are LLVM scalar types.
1570Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
1571                                             QualType DstTy) {
1572  assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
1573         "Invalid scalar expression to emit");
1574  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
1575}
1576
1577/// EmitComplexToScalarConversion - Emit a conversion from the specified
1578/// complex type to the specified destination type, where the destination
1579/// type is an LLVM scalar type.
1580Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
1581                                                      QualType SrcTy,
1582                                                      QualType DstTy) {
1583  assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
1584         "Invalid complex -> scalar conversion");
1585  return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
1586                                                                DstTy);
1587}
1588
1589Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
1590  assert(V1->getType() == V2->getType() &&
1591         "Vector operands must be of the same type");
1592  unsigned NumElements =
1593    cast<llvm::VectorType>(V1->getType())->getNumElements();
1594
1595  va_list va;
1596  va_start(va, V2);
1597
1598  llvm::SmallVector<llvm::Constant*, 16> Args;
1599  for (unsigned i = 0; i < NumElements; i++) {
1600    int n = va_arg(va, int);
1601    assert(n >= 0 && n < (int)NumElements * 2 &&
1602           "Vector shuffle index out of bounds!");
1603    Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n));
1604  }
1605
1606  const char *Name = va_arg(va, const char *);
1607  va_end(va);
1608
1609  llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
1610
1611  return Builder.CreateShuffleVector(V1, V2, Mask, Name);
1612}
1613
1614llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
1615                                         unsigned NumVals, bool isSplat) {
1616  llvm::Value *Vec
1617    = VMContext.getUndef(VMContext.getVectorType(Vals[0]->getType(), NumVals));
1618
1619  for (unsigned i = 0, e = NumVals; i != e; ++i) {
1620    llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
1621    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
1622    Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
1623  }
1624
1625  return Vec;
1626}
1627