CGExprScalar.cpp revision f3ea8cfe6b1c2ef0702efe130561e9e66708d799
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "clang/Frontend/CodeGenOptions.h" 15#include "CodeGenFunction.h" 16#include "CGCXXABI.h" 17#include "CGObjCRuntime.h" 18#include "CodeGenModule.h" 19#include "CGDebugInfo.h" 20#include "clang/AST/ASTContext.h" 21#include "clang/AST/DeclObjC.h" 22#include "clang/AST/RecordLayout.h" 23#include "clang/AST/StmtVisitor.h" 24#include "clang/Basic/TargetInfo.h" 25#include "llvm/Constants.h" 26#include "llvm/Function.h" 27#include "llvm/GlobalVariable.h" 28#include "llvm/Intrinsics.h" 29#include "llvm/Module.h" 30#include "llvm/Support/CFG.h" 31#include "llvm/Target/TargetData.h" 32#include <cstdarg> 33 34using namespace clang; 35using namespace CodeGen; 36using llvm::Value; 37 38//===----------------------------------------------------------------------===// 39// Scalar Expression Emitter 40//===----------------------------------------------------------------------===// 41 42namespace { 43struct BinOpInfo { 44 Value *LHS; 45 Value *RHS; 46 QualType Ty; // Computation Type. 47 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 48 const Expr *E; // Entire expr, for error unsupported. May not be binop. 49}; 50 51static bool MustVisitNullValue(const Expr *E) { 52 // If a null pointer expression's type is the C++0x nullptr_t, then 53 // it's not necessarily a simple constant and it must be evaluated 54 // for its potential side effects. 55 return E->getType()->isNullPtrType(); 56} 57 58class ScalarExprEmitter 59 : public StmtVisitor<ScalarExprEmitter, Value*> { 60 CodeGenFunction &CGF; 61 CGBuilderTy &Builder; 62 bool IgnoreResultAssign; 63 llvm::LLVMContext &VMContext; 64public: 65 66 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 67 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 68 VMContext(cgf.getLLVMContext()) { 69 } 70 71 //===--------------------------------------------------------------------===// 72 // Utilities 73 //===--------------------------------------------------------------------===// 74 75 bool TestAndClearIgnoreResultAssign() { 76 bool I = IgnoreResultAssign; 77 IgnoreResultAssign = false; 78 return I; 79 } 80 81 const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 82 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 83 LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); } 84 85 Value *EmitLoadOfLValue(LValue LV, QualType T) { 86 return CGF.EmitLoadOfLValue(LV, T).getScalarVal(); 87 } 88 89 /// EmitLoadOfLValue - Given an expression with complex type that represents a 90 /// value l-value, this method emits the address of the l-value, then loads 91 /// and returns the result. 92 Value *EmitLoadOfLValue(const Expr *E) { 93 return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType()); 94 } 95 96 /// EmitConversionToBool - Convert the specified expression value to a 97 /// boolean (i1) truth value. This is equivalent to "Val != 0". 98 Value *EmitConversionToBool(Value *Src, QualType DstTy); 99 100 /// EmitScalarConversion - Emit a conversion from the specified type to the 101 /// specified destination type, both of which are LLVM scalar types. 102 Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); 103 104 /// EmitComplexToScalarConversion - Emit a conversion from the specified 105 /// complex type to the specified destination type, where the destination type 106 /// is an LLVM scalar type. 107 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 108 QualType SrcTy, QualType DstTy); 109 110 /// EmitNullValue - Emit a value that corresponds to null for the given type. 111 Value *EmitNullValue(QualType Ty); 112 113 //===--------------------------------------------------------------------===// 114 // Visitor Methods 115 //===--------------------------------------------------------------------===// 116 117 Value *Visit(Expr *E) { 118 llvm::DenseMap<const Expr *, llvm::Value *>::iterator I = 119 CGF.ConditionalSaveExprs.find(E); 120 if (I != CGF.ConditionalSaveExprs.end()) 121 return I->second; 122 123 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 124 } 125 126 Value *VisitStmt(Stmt *S) { 127 S->dump(CGF.getContext().getSourceManager()); 128 assert(0 && "Stmt can't have complex result type!"); 129 return 0; 130 } 131 Value *VisitExpr(Expr *S); 132 133 Value *VisitParenExpr(ParenExpr *PE) { 134 return Visit(PE->getSubExpr()); 135 } 136 137 // Leaves. 138 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 139 return llvm::ConstantInt::get(VMContext, E->getValue()); 140 } 141 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 142 return llvm::ConstantFP::get(VMContext, E->getValue()); 143 } 144 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 145 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 146 } 147 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 148 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 149 } 150 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 151 return EmitNullValue(E->getType()); 152 } 153 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 154 return EmitNullValue(E->getType()); 155 } 156 Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) { 157 return llvm::ConstantInt::get(ConvertType(E->getType()), 158 CGF.getContext().typesAreCompatible( 159 E->getArgType1(), E->getArgType2())); 160 } 161 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 162 Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E); 163 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 164 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 165 return Builder.CreateBitCast(V, ConvertType(E->getType())); 166 } 167 168 // l-values. 169 Value *VisitDeclRefExpr(DeclRefExpr *E) { 170 Expr::EvalResult Result; 171 if (!E->Evaluate(Result, CGF.getContext())) 172 return EmitLoadOfLValue(E); 173 174 assert(!Result.HasSideEffects && "Constant declref with side-effect?!"); 175 176 llvm::Constant *C; 177 if (Result.Val.isInt()) { 178 C = llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 179 } else if (Result.Val.isFloat()) { 180 C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat()); 181 } else { 182 return EmitLoadOfLValue(E); 183 } 184 185 // Make sure we emit a debug reference to the global variable. 186 if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) { 187 if (!CGF.getContext().DeclMustBeEmitted(VD)) 188 CGF.EmitDeclRefExprDbgValue(E, C); 189 } else if (isa<EnumConstantDecl>(E->getDecl())) { 190 CGF.EmitDeclRefExprDbgValue(E, C); 191 } 192 193 return C; 194 } 195 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 196 return CGF.EmitObjCSelectorExpr(E); 197 } 198 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 199 return CGF.EmitObjCProtocolExpr(E); 200 } 201 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 202 return EmitLoadOfLValue(E); 203 } 204 Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { 205 return EmitLoadOfLValue(E); 206 } 207 Value *VisitObjCImplicitSetterGetterRefExpr( 208 ObjCImplicitSetterGetterRefExpr *E) { 209 return EmitLoadOfLValue(E); 210 } 211 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 212 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 213 } 214 215 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 216 LValue LV = CGF.EmitObjCIsaExpr(E); 217 Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal(); 218 return V; 219 } 220 221 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 222 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 223 Value *VisitMemberExpr(MemberExpr *E); 224 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 225 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 226 return EmitLoadOfLValue(E); 227 } 228 229 Value *VisitInitListExpr(InitListExpr *E); 230 231 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 232 return CGF.CGM.EmitNullConstant(E->getType()); 233 } 234 Value *VisitCastExpr(CastExpr *E) { 235 // Make sure to evaluate VLA bounds now so that we have them for later. 236 if (E->getType()->isVariablyModifiedType()) 237 CGF.EmitVLASize(E->getType()); 238 239 return EmitCastExpr(E); 240 } 241 Value *EmitCastExpr(CastExpr *E); 242 243 Value *VisitCallExpr(const CallExpr *E) { 244 if (E->getCallReturnType()->isReferenceType()) 245 return EmitLoadOfLValue(E); 246 247 return CGF.EmitCallExpr(E).getScalarVal(); 248 } 249 250 Value *VisitStmtExpr(const StmtExpr *E); 251 252 Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E); 253 254 // Unary Operators. 255 Value *VisitUnaryPostDec(const UnaryOperator *E) { 256 LValue LV = EmitLValue(E->getSubExpr()); 257 return EmitScalarPrePostIncDec(E, LV, false, false); 258 } 259 Value *VisitUnaryPostInc(const UnaryOperator *E) { 260 LValue LV = EmitLValue(E->getSubExpr()); 261 return EmitScalarPrePostIncDec(E, LV, true, false); 262 } 263 Value *VisitUnaryPreDec(const UnaryOperator *E) { 264 LValue LV = EmitLValue(E->getSubExpr()); 265 return EmitScalarPrePostIncDec(E, LV, false, true); 266 } 267 Value *VisitUnaryPreInc(const UnaryOperator *E) { 268 LValue LV = EmitLValue(E->getSubExpr()); 269 return EmitScalarPrePostIncDec(E, LV, true, true); 270 } 271 272 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 273 bool isInc, bool isPre); 274 275 276 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 277 // If the sub-expression is an instance member reference, 278 // EmitDeclRefLValue will magically emit it with the appropriate 279 // value as the "address". 280 return EmitLValue(E->getSubExpr()).getAddress(); 281 } 282 Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } 283 Value *VisitUnaryPlus(const UnaryOperator *E) { 284 // This differs from gcc, though, most likely due to a bug in gcc. 285 TestAndClearIgnoreResultAssign(); 286 return Visit(E->getSubExpr()); 287 } 288 Value *VisitUnaryMinus (const UnaryOperator *E); 289 Value *VisitUnaryNot (const UnaryOperator *E); 290 Value *VisitUnaryLNot (const UnaryOperator *E); 291 Value *VisitUnaryReal (const UnaryOperator *E); 292 Value *VisitUnaryImag (const UnaryOperator *E); 293 Value *VisitUnaryExtension(const UnaryOperator *E) { 294 return Visit(E->getSubExpr()); 295 } 296 297 // C++ 298 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 299 return Visit(DAE->getExpr()); 300 } 301 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 302 return CGF.LoadCXXThis(); 303 } 304 305 Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { 306 return CGF.EmitCXXExprWithTemporaries(E).getScalarVal(); 307 } 308 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 309 return CGF.EmitCXXNewExpr(E); 310 } 311 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 312 CGF.EmitCXXDeleteExpr(E); 313 return 0; 314 } 315 Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) { 316 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 317 } 318 319 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 320 // C++ [expr.pseudo]p1: 321 // The result shall only be used as the operand for the function call 322 // operator (), and the result of such a call has type void. The only 323 // effect is the evaluation of the postfix-expression before the dot or 324 // arrow. 325 CGF.EmitScalarExpr(E->getBase()); 326 return 0; 327 } 328 329 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 330 return EmitNullValue(E->getType()); 331 } 332 333 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 334 CGF.EmitCXXThrowExpr(E); 335 return 0; 336 } 337 338 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 339 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 340 } 341 342 // Binary Operators. 343 Value *EmitMul(const BinOpInfo &Ops) { 344 if (Ops.Ty->hasSignedIntegerRepresentation()) { 345 switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { 346 case LangOptions::SOB_Undefined: 347 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 348 case LangOptions::SOB_Defined: 349 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 350 case LangOptions::SOB_Trapping: 351 return EmitOverflowCheckedBinOp(Ops); 352 } 353 } 354 355 if (Ops.LHS->getType()->isFPOrFPVectorTy()) 356 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 357 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 358 } 359 bool isTrapvOverflowBehavior() { 360 return CGF.getContext().getLangOptions().getSignedOverflowBehavior() 361 == LangOptions::SOB_Trapping; 362 } 363 /// Create a binary op that checks for overflow. 364 /// Currently only supports +, - and *. 365 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 366 // Emit the overflow BB when -ftrapv option is activated. 367 void EmitOverflowBB(llvm::BasicBlock *overflowBB) { 368 Builder.SetInsertPoint(overflowBB); 369 llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap); 370 Builder.CreateCall(Trap); 371 Builder.CreateUnreachable(); 372 } 373 // Check for undefined division and modulus behaviors. 374 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 375 llvm::Value *Zero,bool isDiv); 376 Value *EmitDiv(const BinOpInfo &Ops); 377 Value *EmitRem(const BinOpInfo &Ops); 378 Value *EmitAdd(const BinOpInfo &Ops); 379 Value *EmitSub(const BinOpInfo &Ops); 380 Value *EmitShl(const BinOpInfo &Ops); 381 Value *EmitShr(const BinOpInfo &Ops); 382 Value *EmitAnd(const BinOpInfo &Ops) { 383 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 384 } 385 Value *EmitXor(const BinOpInfo &Ops) { 386 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 387 } 388 Value *EmitOr (const BinOpInfo &Ops) { 389 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 390 } 391 392 BinOpInfo EmitBinOps(const BinaryOperator *E); 393 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 394 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 395 Value *&Result); 396 397 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 398 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 399 400 // Binary operators and binary compound assignment operators. 401#define HANDLEBINOP(OP) \ 402 Value *VisitBin ## OP(const BinaryOperator *E) { \ 403 return Emit ## OP(EmitBinOps(E)); \ 404 } \ 405 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ 406 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ 407 } 408 HANDLEBINOP(Mul) 409 HANDLEBINOP(Div) 410 HANDLEBINOP(Rem) 411 HANDLEBINOP(Add) 412 HANDLEBINOP(Sub) 413 HANDLEBINOP(Shl) 414 HANDLEBINOP(Shr) 415 HANDLEBINOP(And) 416 HANDLEBINOP(Xor) 417 HANDLEBINOP(Or) 418#undef HANDLEBINOP 419 420 // Comparisons. 421 Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, 422 unsigned SICmpOpc, unsigned FCmpOpc); 423#define VISITCOMP(CODE, UI, SI, FP) \ 424 Value *VisitBin##CODE(const BinaryOperator *E) { \ 425 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 426 llvm::FCmpInst::FP); } 427 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT) 428 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT) 429 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE) 430 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE) 431 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ) 432 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE) 433#undef VISITCOMP 434 435 Value *VisitBinAssign (const BinaryOperator *E); 436 437 Value *VisitBinLAnd (const BinaryOperator *E); 438 Value *VisitBinLOr (const BinaryOperator *E); 439 Value *VisitBinComma (const BinaryOperator *E); 440 441 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 442 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 443 444 // Other Operators. 445 Value *VisitBlockExpr(const BlockExpr *BE); 446 Value *VisitConditionalOperator(const ConditionalOperator *CO); 447 Value *VisitChooseExpr(ChooseExpr *CE); 448 Value *VisitVAArgExpr(VAArgExpr *VE); 449 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 450 return CGF.EmitObjCStringLiteral(E); 451 } 452}; 453} // end anonymous namespace. 454 455//===----------------------------------------------------------------------===// 456// Utilities 457//===----------------------------------------------------------------------===// 458 459/// EmitConversionToBool - Convert the specified expression value to a 460/// boolean (i1) truth value. This is equivalent to "Val != 0". 461Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 462 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 463 464 if (SrcType->isRealFloatingType()) { 465 // Compare against 0.0 for fp scalars. 466 llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); 467 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 468 } 469 470 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 471 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 472 473 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 474 "Unknown scalar type to convert"); 475 476 // Because of the type rules of C, we often end up computing a logical value, 477 // then zero extending it to int, then wanting it as a logical value again. 478 // Optimize this common case. 479 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) { 480 if (ZI->getOperand(0)->getType() == 481 llvm::Type::getInt1Ty(CGF.getLLVMContext())) { 482 Value *Result = ZI->getOperand(0); 483 // If there aren't any more uses, zap the instruction to save space. 484 // Note that there can be more uses, for example if this 485 // is the result of an assignment. 486 if (ZI->use_empty()) 487 ZI->eraseFromParent(); 488 return Result; 489 } 490 } 491 492 // Compare against an integer or pointer null. 493 llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); 494 return Builder.CreateICmpNE(Src, Zero, "tobool"); 495} 496 497/// EmitScalarConversion - Emit a conversion from the specified type to the 498/// specified destination type, both of which are LLVM scalar types. 499Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 500 QualType DstType) { 501 SrcType = CGF.getContext().getCanonicalType(SrcType); 502 DstType = CGF.getContext().getCanonicalType(DstType); 503 if (SrcType == DstType) return Src; 504 505 if (DstType->isVoidType()) return 0; 506 507 // Handle conversions to bool first, they are special: comparisons against 0. 508 if (DstType->isBooleanType()) 509 return EmitConversionToBool(Src, SrcType); 510 511 const llvm::Type *DstTy = ConvertType(DstType); 512 513 // Ignore conversions like int -> uint. 514 if (Src->getType() == DstTy) 515 return Src; 516 517 // Handle pointer conversions next: pointers can only be converted to/from 518 // other pointers and integers. Check for pointer types in terms of LLVM, as 519 // some native types (like Obj-C id) may map to a pointer type. 520 if (isa<llvm::PointerType>(DstTy)) { 521 // The source value may be an integer, or a pointer. 522 if (isa<llvm::PointerType>(Src->getType())) 523 return Builder.CreateBitCast(Src, DstTy, "conv"); 524 525 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 526 // First, convert to the correct width so that we control the kind of 527 // extension. 528 const llvm::Type *MiddleTy = CGF.IntPtrTy; 529 bool InputSigned = SrcType->isSignedIntegerType(); 530 llvm::Value* IntResult = 531 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 532 // Then, cast to pointer. 533 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 534 } 535 536 if (isa<llvm::PointerType>(Src->getType())) { 537 // Must be an ptr to int cast. 538 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 539 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 540 } 541 542 // A scalar can be splatted to an extended vector of the same element type 543 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 544 // Cast the scalar to element type 545 QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType(); 546 llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); 547 548 // Insert the element in element zero of an undef vector 549 llvm::Value *UnV = llvm::UndefValue::get(DstTy); 550 llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); 551 UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); 552 553 // Splat the element across to all elements 554 llvm::SmallVector<llvm::Constant*, 16> Args; 555 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 556 for (unsigned i = 0; i < NumElements; i++) 557 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0)); 558 559 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); 560 llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); 561 return Yay; 562 } 563 564 // Allow bitcast from vector to integer/fp of the same size. 565 if (isa<llvm::VectorType>(Src->getType()) || 566 isa<llvm::VectorType>(DstTy)) 567 return Builder.CreateBitCast(Src, DstTy, "conv"); 568 569 // Finally, we have the arithmetic types: real int/float. 570 if (isa<llvm::IntegerType>(Src->getType())) { 571 bool InputSigned = SrcType->isSignedIntegerType(); 572 if (isa<llvm::IntegerType>(DstTy)) 573 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 574 else if (InputSigned) 575 return Builder.CreateSIToFP(Src, DstTy, "conv"); 576 else 577 return Builder.CreateUIToFP(Src, DstTy, "conv"); 578 } 579 580 assert(Src->getType()->isFloatingPointTy() && "Unknown real conversion"); 581 if (isa<llvm::IntegerType>(DstTy)) { 582 if (DstType->isSignedIntegerType()) 583 return Builder.CreateFPToSI(Src, DstTy, "conv"); 584 else 585 return Builder.CreateFPToUI(Src, DstTy, "conv"); 586 } 587 588 assert(DstTy->isFloatingPointTy() && "Unknown real conversion"); 589 if (DstTy->getTypeID() < Src->getType()->getTypeID()) 590 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 591 else 592 return Builder.CreateFPExt(Src, DstTy, "conv"); 593} 594 595/// EmitComplexToScalarConversion - Emit a conversion from the specified complex 596/// type to the specified destination type, where the destination type is an 597/// LLVM scalar type. 598Value *ScalarExprEmitter:: 599EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 600 QualType SrcTy, QualType DstTy) { 601 // Get the source element type. 602 SrcTy = SrcTy->getAs<ComplexType>()->getElementType(); 603 604 // Handle conversions to bool first, they are special: comparisons against 0. 605 if (DstTy->isBooleanType()) { 606 // Complex != 0 -> (Real != 0) | (Imag != 0) 607 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); 608 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); 609 return Builder.CreateOr(Src.first, Src.second, "tobool"); 610 } 611 612 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 613 // the imaginary part of the complex value is discarded and the value of the 614 // real part is converted according to the conversion rules for the 615 // corresponding real type. 616 return EmitScalarConversion(Src.first, SrcTy, DstTy); 617} 618 619Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 620 if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) 621 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 622 623 return llvm::Constant::getNullValue(ConvertType(Ty)); 624} 625 626//===----------------------------------------------------------------------===// 627// Visitor Methods 628//===----------------------------------------------------------------------===// 629 630Value *ScalarExprEmitter::VisitExpr(Expr *E) { 631 CGF.ErrorUnsupported(E, "scalar expression"); 632 if (E->getType()->isVoidType()) 633 return 0; 634 return llvm::UndefValue::get(CGF.ConvertType(E->getType())); 635} 636 637Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 638 // Vector Mask Case 639 if (E->getNumSubExprs() == 2 || 640 (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) { 641 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 642 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 643 Value *Mask; 644 645 const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType()); 646 unsigned LHSElts = LTy->getNumElements(); 647 648 if (E->getNumSubExprs() == 3) { 649 Mask = CGF.EmitScalarExpr(E->getExpr(2)); 650 651 // Shuffle LHS & RHS into one input vector. 652 llvm::SmallVector<llvm::Constant*, 32> concat; 653 for (unsigned i = 0; i != LHSElts; ++i) { 654 concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i)); 655 concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1)); 656 } 657 658 Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size()); 659 LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat"); 660 LHSElts *= 2; 661 } else { 662 Mask = RHS; 663 } 664 665 const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType()); 666 llvm::Constant* EltMask; 667 668 // Treat vec3 like vec4. 669 if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) 670 EltMask = llvm::ConstantInt::get(MTy->getElementType(), 671 (1 << llvm::Log2_32(LHSElts+2))-1); 672 else if ((LHSElts == 3) && (E->getNumSubExprs() == 2)) 673 EltMask = llvm::ConstantInt::get(MTy->getElementType(), 674 (1 << llvm::Log2_32(LHSElts+1))-1); 675 else 676 EltMask = llvm::ConstantInt::get(MTy->getElementType(), 677 (1 << llvm::Log2_32(LHSElts))-1); 678 679 // Mask off the high bits of each shuffle index. 680 llvm::SmallVector<llvm::Constant *, 32> MaskV; 681 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) 682 MaskV.push_back(EltMask); 683 684 Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size()); 685 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 686 687 // newv = undef 688 // mask = mask & maskbits 689 // for each elt 690 // n = extract mask i 691 // x = extract val n 692 // newv = insert newv, x, i 693 const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(), 694 MTy->getNumElements()); 695 Value* NewV = llvm::UndefValue::get(RTy); 696 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 697 Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i); 698 Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx"); 699 Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext"); 700 701 // Handle vec3 special since the index will be off by one for the RHS. 702 if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) { 703 Value *cmpIndx, *newIndx; 704 cmpIndx = Builder.CreateICmpUGT(Indx, 705 llvm::ConstantInt::get(CGF.Int32Ty, 3), 706 "cmp_shuf_idx"); 707 newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1), 708 "shuf_idx_adj"); 709 Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx"); 710 } 711 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 712 NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins"); 713 } 714 return NewV; 715 } 716 717 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 718 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 719 720 // Handle vec3 special since the index will be off by one for the RHS. 721 llvm::SmallVector<llvm::Constant*, 32> indices; 722 for (unsigned i = 2; i < E->getNumSubExprs(); i++) { 723 llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))); 724 const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType()); 725 if (VTy->getNumElements() == 3) { 726 if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) { 727 uint64_t cVal = CI->getZExtValue(); 728 if (cVal > 3) { 729 C = llvm::ConstantInt::get(C->getType(), cVal-1); 730 } 731 } 732 } 733 indices.push_back(C); 734 } 735 736 Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size()); 737 return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); 738} 739Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 740 Expr::EvalResult Result; 741 if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) { 742 if (E->isArrow()) 743 CGF.EmitScalarExpr(E->getBase()); 744 else 745 EmitLValue(E->getBase()); 746 return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 747 } 748 749 // Emit debug info for aggregate now, if it was delayed to reduce 750 // debug info size. 751 CGDebugInfo *DI = CGF.getDebugInfo(); 752 if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) { 753 QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType(); 754 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) 755 if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl())) 756 DI->getOrCreateRecordType(PTy->getPointeeType(), 757 M->getParent()->getLocation()); 758 } 759 return EmitLoadOfLValue(E); 760} 761 762Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 763 TestAndClearIgnoreResultAssign(); 764 765 // Emit subscript expressions in rvalue context's. For most cases, this just 766 // loads the lvalue formed by the subscript expr. However, we have to be 767 // careful, because the base of a vector subscript is occasionally an rvalue, 768 // so we can't get it as an lvalue. 769 if (!E->getBase()->getType()->isVectorType()) 770 return EmitLoadOfLValue(E); 771 772 // Handle the vector case. The base must be a vector, the index must be an 773 // integer value. 774 Value *Base = Visit(E->getBase()); 775 Value *Idx = Visit(E->getIdx()); 776 bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); 777 Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast"); 778 return Builder.CreateExtractElement(Base, Idx, "vecext"); 779} 780 781static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 782 unsigned Off, const llvm::Type *I32Ty) { 783 int MV = SVI->getMaskValue(Idx); 784 if (MV == -1) 785 return llvm::UndefValue::get(I32Ty); 786 return llvm::ConstantInt::get(I32Ty, Off+MV); 787} 788 789Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 790 bool Ignore = TestAndClearIgnoreResultAssign(); 791 (void)Ignore; 792 assert (Ignore == false && "init list ignored"); 793 unsigned NumInitElements = E->getNumInits(); 794 795 if (E->hadArrayRangeDesignator()) 796 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 797 798 const llvm::VectorType *VType = 799 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 800 801 // We have a scalar in braces. Just use the first element. 802 if (!VType) 803 return Visit(E->getInit(0)); 804 805 unsigned ResElts = VType->getNumElements(); 806 807 // Loop over initializers collecting the Value for each, and remembering 808 // whether the source was swizzle (ExtVectorElementExpr). This will allow 809 // us to fold the shuffle for the swizzle into the shuffle for the vector 810 // initializer, since LLVM optimizers generally do not want to touch 811 // shuffles. 812 unsigned CurIdx = 0; 813 bool VIsUndefShuffle = false; 814 llvm::Value *V = llvm::UndefValue::get(VType); 815 for (unsigned i = 0; i != NumInitElements; ++i) { 816 Expr *IE = E->getInit(i); 817 Value *Init = Visit(IE); 818 llvm::SmallVector<llvm::Constant*, 16> Args; 819 820 const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 821 822 // Handle scalar elements. If the scalar initializer is actually one 823 // element of a different vector of the same width, use shuffle instead of 824 // extract+insert. 825 if (!VVT) { 826 if (isa<ExtVectorElementExpr>(IE)) { 827 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 828 829 if (EI->getVectorOperandType()->getNumElements() == ResElts) { 830 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 831 Value *LHS = 0, *RHS = 0; 832 if (CurIdx == 0) { 833 // insert into undef -> shuffle (src, undef) 834 Args.push_back(C); 835 for (unsigned j = 1; j != ResElts; ++j) 836 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 837 838 LHS = EI->getVectorOperand(); 839 RHS = V; 840 VIsUndefShuffle = true; 841 } else if (VIsUndefShuffle) { 842 // insert into undefshuffle && size match -> shuffle (v, src) 843 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 844 for (unsigned j = 0; j != CurIdx; ++j) 845 Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty)); 846 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 847 ResElts + C->getZExtValue())); 848 for (unsigned j = CurIdx + 1; j != ResElts; ++j) 849 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 850 851 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 852 RHS = EI->getVectorOperand(); 853 VIsUndefShuffle = false; 854 } 855 if (!Args.empty()) { 856 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); 857 V = Builder.CreateShuffleVector(LHS, RHS, Mask); 858 ++CurIdx; 859 continue; 860 } 861 } 862 } 863 Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx); 864 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 865 VIsUndefShuffle = false; 866 ++CurIdx; 867 continue; 868 } 869 870 unsigned InitElts = VVT->getNumElements(); 871 872 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 873 // input is the same width as the vector being constructed, generate an 874 // optimized shuffle of the swizzle input into the result. 875 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 876 if (isa<ExtVectorElementExpr>(IE)) { 877 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 878 Value *SVOp = SVI->getOperand(0); 879 const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType()); 880 881 if (OpTy->getNumElements() == ResElts) { 882 for (unsigned j = 0; j != CurIdx; ++j) { 883 // If the current vector initializer is a shuffle with undef, merge 884 // this shuffle directly into it. 885 if (VIsUndefShuffle) { 886 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0, 887 CGF.Int32Ty)); 888 } else { 889 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); 890 } 891 } 892 for (unsigned j = 0, je = InitElts; j != je; ++j) 893 Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty)); 894 for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) 895 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 896 897 if (VIsUndefShuffle) 898 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 899 900 Init = SVOp; 901 } 902 } 903 904 // Extend init to result vector length, and then shuffle its contribution 905 // to the vector initializer into V. 906 if (Args.empty()) { 907 for (unsigned j = 0; j != InitElts; ++j) 908 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); 909 for (unsigned j = InitElts; j != ResElts; ++j) 910 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 911 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); 912 Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), 913 Mask, "vext"); 914 915 Args.clear(); 916 for (unsigned j = 0; j != CurIdx; ++j) 917 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); 918 for (unsigned j = 0; j != InitElts; ++j) 919 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset)); 920 for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) 921 Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); 922 } 923 924 // If V is undef, make sure it ends up on the RHS of the shuffle to aid 925 // merging subsequent shuffles into this one. 926 if (CurIdx == 0) 927 std::swap(V, Init); 928 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); 929 V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit"); 930 VIsUndefShuffle = isa<llvm::UndefValue>(Init); 931 CurIdx += InitElts; 932 } 933 934 // FIXME: evaluate codegen vs. shuffling against constant null vector. 935 // Emit remaining default initializers. 936 const llvm::Type *EltTy = VType->getElementType(); 937 938 // Emit remaining default initializers 939 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 940 Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx); 941 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 942 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 943 } 944 return V; 945} 946 947static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { 948 const Expr *E = CE->getSubExpr(); 949 950 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 951 return false; 952 953 if (isa<CXXThisExpr>(E)) { 954 // We always assume that 'this' is never null. 955 return false; 956 } 957 958 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 959 // And that glvalue casts are never null. 960 if (ICE->getValueKind() != VK_RValue) 961 return false; 962 } 963 964 return true; 965} 966 967// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 968// have to handle a more broad range of conversions than explicit casts, as they 969// handle things like function to ptr-to-function decay etc. 970Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { 971 Expr *E = CE->getSubExpr(); 972 QualType DestTy = CE->getType(); 973 CastKind Kind = CE->getCastKind(); 974 975 if (!DestTy->isVoidType()) 976 TestAndClearIgnoreResultAssign(); 977 978 // Since almost all cast kinds apply to scalars, this switch doesn't have 979 // a default case, so the compiler will warn on a missing case. The cases 980 // are in the same order as in the CastKind enum. 981 switch (Kind) { 982 case CK_Unknown: 983 // FIXME: All casts should have a known kind! 984 //assert(0 && "Unknown cast kind!"); 985 break; 986 987 case CK_LValueBitCast: 988 case CK_ObjCObjectLValueCast: { 989 Value *V = EmitLValue(E).getAddress(); 990 V = Builder.CreateBitCast(V, 991 ConvertType(CGF.getContext().getPointerType(DestTy))); 992 return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), DestTy); 993 } 994 995 case CK_AnyPointerToObjCPointerCast: 996 case CK_AnyPointerToBlockPointerCast: 997 case CK_BitCast: { 998 Value *Src = Visit(const_cast<Expr*>(E)); 999 return Builder.CreateBitCast(Src, ConvertType(DestTy)); 1000 } 1001 case CK_NoOp: 1002 case CK_UserDefinedConversion: 1003 return Visit(const_cast<Expr*>(E)); 1004 1005 case CK_BaseToDerived: { 1006 const CXXRecordDecl *DerivedClassDecl = 1007 DestTy->getCXXRecordDeclForPointerType(); 1008 1009 return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl, 1010 CE->path_begin(), CE->path_end(), 1011 ShouldNullCheckClassCastValue(CE)); 1012 } 1013 case CK_UncheckedDerivedToBase: 1014 case CK_DerivedToBase: { 1015 const RecordType *DerivedClassTy = 1016 E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>(); 1017 CXXRecordDecl *DerivedClassDecl = 1018 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1019 1020 return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl, 1021 CE->path_begin(), CE->path_end(), 1022 ShouldNullCheckClassCastValue(CE)); 1023 } 1024 case CK_Dynamic: { 1025 Value *V = Visit(const_cast<Expr*>(E)); 1026 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 1027 return CGF.EmitDynamicCast(V, DCE); 1028 } 1029 case CK_ToUnion: 1030 assert(0 && "Should be unreachable!"); 1031 break; 1032 1033 case CK_ArrayToPointerDecay: { 1034 assert(E->getType()->isArrayType() && 1035 "Array to pointer decay must have array source type!"); 1036 1037 Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays. 1038 1039 // Note that VLA pointers are always decayed, so we don't need to do 1040 // anything here. 1041 if (!E->getType()->isVariableArrayType()) { 1042 assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); 1043 assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType()) 1044 ->getElementType()) && 1045 "Expected pointer to array"); 1046 V = Builder.CreateStructGEP(V, 0, "arraydecay"); 1047 } 1048 1049 return V; 1050 } 1051 case CK_FunctionToPointerDecay: 1052 return EmitLValue(E).getAddress(); 1053 1054 case CK_NullToPointer: 1055 if (MustVisitNullValue(E)) 1056 (void) Visit(E); 1057 1058 return llvm::ConstantPointerNull::get( 1059 cast<llvm::PointerType>(ConvertType(DestTy))); 1060 1061 case CK_NullToMemberPointer: { 1062 if (MustVisitNullValue(E)) 1063 (void) Visit(E); 1064 1065 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 1066 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 1067 } 1068 1069 case CK_BaseToDerivedMemberPointer: 1070 case CK_DerivedToBaseMemberPointer: { 1071 Value *Src = Visit(E); 1072 1073 // Note that the AST doesn't distinguish between checked and 1074 // unchecked member pointer conversions, so we always have to 1075 // implement checked conversions here. This is inefficient when 1076 // actual control flow may be required in order to perform the 1077 // check, which it is for data member pointers (but not member 1078 // function pointers on Itanium and ARM). 1079 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 1080 } 1081 1082 case CK_FloatingRealToComplex: 1083 case CK_FloatingComplexCast: 1084 case CK_IntegralRealToComplex: 1085 case CK_IntegralComplexCast: 1086 case CK_IntegralComplexToFloatingComplex: 1087 case CK_FloatingComplexToIntegralComplex: 1088 case CK_ConstructorConversion: 1089 assert(0 && "Should be unreachable!"); 1090 break; 1091 1092 case CK_IntegralToPointer: { 1093 Value *Src = Visit(const_cast<Expr*>(E)); 1094 1095 // First, convert to the correct width so that we control the kind of 1096 // extension. 1097 const llvm::Type *MiddleTy = CGF.IntPtrTy; 1098 bool InputSigned = E->getType()->isSignedIntegerType(); 1099 llvm::Value* IntResult = 1100 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1101 1102 return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy)); 1103 } 1104 case CK_PointerToIntegral: { 1105 Value *Src = Visit(const_cast<Expr*>(E)); 1106 1107 // Handle conversion to bool correctly. 1108 if (DestTy->isBooleanType()) 1109 return EmitScalarConversion(Src, E->getType(), DestTy); 1110 1111 return Builder.CreatePtrToInt(Src, ConvertType(DestTy)); 1112 } 1113 case CK_ToVoid: { 1114 if (E->Classify(CGF.getContext()).isGLValue()) { 1115 LValue LV = CGF.EmitLValue(E); 1116 if (LV.isPropertyRef()) 1117 CGF.EmitLoadOfPropertyRefLValue(LV, E->getType()); 1118 else if (LV.isKVCRef()) 1119 CGF.EmitLoadOfKVCRefLValue(LV, E->getType()); 1120 } 1121 else 1122 CGF.EmitAnyExpr(E, AggValueSlot::ignored(), true); 1123 return 0; 1124 } 1125 case CK_VectorSplat: { 1126 const llvm::Type *DstTy = ConvertType(DestTy); 1127 Value *Elt = Visit(const_cast<Expr*>(E)); 1128 1129 // Insert the element in element zero of an undef vector 1130 llvm::Value *UnV = llvm::UndefValue::get(DstTy); 1131 llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); 1132 UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); 1133 1134 // Splat the element across to all elements 1135 llvm::SmallVector<llvm::Constant*, 16> Args; 1136 unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); 1137 for (unsigned i = 0; i < NumElements; i++) 1138 Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0)); 1139 1140 llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); 1141 llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); 1142 return Yay; 1143 } 1144 case CK_IntegralCast: 1145 case CK_IntegralToFloating: 1146 case CK_FloatingToIntegral: 1147 case CK_FloatingCast: 1148 return EmitScalarConversion(Visit(E), E->getType(), DestTy); 1149 1150 case CK_MemberPointerToBoolean: { 1151 llvm::Value *MemPtr = Visit(E); 1152 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 1153 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 1154 } 1155 1156 case CK_FloatingComplexToReal: 1157 case CK_IntegralComplexToReal: 1158 return CGF.EmitComplexExpr(E, false, true, false, true).first; 1159 1160 case CK_FloatingComplexToBoolean: 1161 case CK_IntegralComplexToBoolean: { 1162 CodeGenFunction::ComplexPairTy V 1163 = CGF.EmitComplexExpr(E, false, false, false, false); 1164 1165 // TODO: kill this function off, inline appropriate case here 1166 return EmitComplexToScalarConversion(V, E->getType(), DestTy); 1167 } 1168 1169 } 1170 1171 // Handle cases where the source is an non-complex type. 1172 1173 if (!CGF.hasAggregateLLVMType(E->getType())) { 1174 Value *Src = Visit(const_cast<Expr*>(E)); 1175 1176 // Use EmitScalarConversion to perform the conversion. 1177 return EmitScalarConversion(Src, E->getType(), DestTy); 1178 } 1179 1180 // Handle cases where the source is a complex type. 1181 // TODO: when we're certain about cast kinds, we should just be able 1182 // to assert that no complexes make it here. 1183 if (E->getType()->isAnyComplexType()) { 1184 bool IgnoreImag = true; 1185 bool IgnoreImagAssign = true; 1186 bool IgnoreReal = IgnoreResultAssign; 1187 bool IgnoreRealAssign = IgnoreResultAssign; 1188 if (DestTy->isBooleanType()) 1189 IgnoreImagAssign = IgnoreImag = false; 1190 else if (DestTy->isVoidType()) { 1191 IgnoreReal = IgnoreImag = false; 1192 IgnoreRealAssign = IgnoreImagAssign = true; 1193 } 1194 CodeGenFunction::ComplexPairTy V 1195 = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign, 1196 IgnoreImagAssign); 1197 return EmitComplexToScalarConversion(V, E->getType(), DestTy); 1198 } 1199 1200 // Okay, this is a cast from an aggregate. It must be a cast to void. Just 1201 // evaluate the result and return. 1202 CGF.EmitAggExpr(E, AggValueSlot::ignored(), true); 1203 return 0; 1204} 1205 1206Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 1207 return CGF.EmitCompoundStmt(*E->getSubStmt(), 1208 !E->getType()->isVoidType()).getScalarVal(); 1209} 1210 1211Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { 1212 llvm::Value *V = CGF.GetAddrOfBlockDecl(E); 1213 if (E->getType().isObjCGCWeak()) 1214 return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V); 1215 return CGF.EmitLoadOfScalar(V, false, 0, E->getType()); 1216} 1217 1218//===----------------------------------------------------------------------===// 1219// Unary Operators 1220//===----------------------------------------------------------------------===// 1221 1222llvm::Value *ScalarExprEmitter:: 1223EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 1224 bool isInc, bool isPre) { 1225 1226 QualType ValTy = E->getSubExpr()->getType(); 1227 llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy); 1228 1229 int AmountVal = isInc ? 1 : -1; 1230 1231 if (ValTy->isPointerType() && 1232 ValTy->getAs<PointerType>()->isVariableArrayType()) { 1233 // The amount of the addition/subtraction needs to account for the VLA size 1234 CGF.ErrorUnsupported(E, "VLA pointer inc/dec"); 1235 } 1236 1237 llvm::Value *NextVal; 1238 if (const llvm::PointerType *PT = 1239 dyn_cast<llvm::PointerType>(InVal->getType())) { 1240 llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal); 1241 if (!isa<llvm::FunctionType>(PT->getElementType())) { 1242 QualType PTEE = ValTy->getPointeeType(); 1243 if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) { 1244 // Handle interface types, which are not represented with a concrete 1245 // type. 1246 int size = CGF.getContext().getTypeSize(OIT) / 8; 1247 if (!isInc) 1248 size = -size; 1249 Inc = llvm::ConstantInt::get(Inc->getType(), size); 1250 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1251 InVal = Builder.CreateBitCast(InVal, i8Ty); 1252 NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); 1253 llvm::Value *lhs = LV.getAddress(); 1254 lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); 1255 LV = CGF.MakeAddrLValue(lhs, ValTy); 1256 } else 1257 NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); 1258 } else { 1259 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1260 NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); 1261 NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); 1262 NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); 1263 } 1264 } else if (InVal->getType()->isIntegerTy(1) && isInc) { 1265 // Bool++ is an interesting case, due to promotion rules, we get: 1266 // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> 1267 // Bool = ((int)Bool+1) != 0 1268 // An interesting aspect of this is that increment is always true. 1269 // Decrement does not have this property. 1270 NextVal = llvm::ConstantInt::getTrue(VMContext); 1271 } else if (isa<llvm::IntegerType>(InVal->getType())) { 1272 NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); 1273 1274 if (!ValTy->isSignedIntegerType()) 1275 // Unsigned integer inc is always two's complement. 1276 NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); 1277 else { 1278 switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { 1279 case LangOptions::SOB_Undefined: 1280 NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec"); 1281 break; 1282 case LangOptions::SOB_Defined: 1283 NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); 1284 break; 1285 case LangOptions::SOB_Trapping: 1286 BinOpInfo BinOp; 1287 BinOp.LHS = InVal; 1288 BinOp.RHS = NextVal; 1289 BinOp.Ty = E->getType(); 1290 BinOp.Opcode = BO_Add; 1291 BinOp.E = E; 1292 NextVal = EmitOverflowCheckedBinOp(BinOp); 1293 break; 1294 } 1295 } 1296 } else { 1297 // Add the inc/dec to the real part. 1298 if (InVal->getType()->isFloatTy()) 1299 NextVal = 1300 llvm::ConstantFP::get(VMContext, 1301 llvm::APFloat(static_cast<float>(AmountVal))); 1302 else if (InVal->getType()->isDoubleTy()) 1303 NextVal = 1304 llvm::ConstantFP::get(VMContext, 1305 llvm::APFloat(static_cast<double>(AmountVal))); 1306 else { 1307 llvm::APFloat F(static_cast<float>(AmountVal)); 1308 bool ignored; 1309 F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, 1310 &ignored); 1311 NextVal = llvm::ConstantFP::get(VMContext, F); 1312 } 1313 NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); 1314 } 1315 1316 // Store the updated result through the lvalue. 1317 if (LV.isBitField()) 1318 CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal); 1319 else 1320 CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); 1321 1322 // If this is a postinc, return the value read from memory, otherwise use the 1323 // updated value. 1324 return isPre ? NextVal : InVal; 1325} 1326 1327 1328 1329Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { 1330 TestAndClearIgnoreResultAssign(); 1331 // Emit unary minus with EmitSub so we handle overflow cases etc. 1332 BinOpInfo BinOp; 1333 BinOp.RHS = Visit(E->getSubExpr()); 1334 1335 if (BinOp.RHS->getType()->isFPOrFPVectorTy()) 1336 BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType()); 1337 else 1338 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 1339 BinOp.Ty = E->getType(); 1340 BinOp.Opcode = BO_Sub; 1341 BinOp.E = E; 1342 return EmitSub(BinOp); 1343} 1344 1345Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 1346 TestAndClearIgnoreResultAssign(); 1347 Value *Op = Visit(E->getSubExpr()); 1348 return Builder.CreateNot(Op, "neg"); 1349} 1350 1351Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 1352 // Compare operand to zero. 1353 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 1354 1355 // Invert value. 1356 // TODO: Could dynamically modify easy computations here. For example, if 1357 // the operand is an icmp ne, turn into icmp eq. 1358 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 1359 1360 // ZExt result to the expr type. 1361 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 1362} 1363 1364Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 1365 // Try folding the offsetof to a constant. 1366 Expr::EvalResult EvalResult; 1367 if (E->Evaluate(EvalResult, CGF.getContext())) 1368 return llvm::ConstantInt::get(VMContext, EvalResult.Val.getInt()); 1369 1370 // Loop over the components of the offsetof to compute the value. 1371 unsigned n = E->getNumComponents(); 1372 const llvm::Type* ResultType = ConvertType(E->getType()); 1373 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 1374 QualType CurrentType = E->getTypeSourceInfo()->getType(); 1375 for (unsigned i = 0; i != n; ++i) { 1376 OffsetOfExpr::OffsetOfNode ON = E->getComponent(i); 1377 llvm::Value *Offset = 0; 1378 switch (ON.getKind()) { 1379 case OffsetOfExpr::OffsetOfNode::Array: { 1380 // Compute the index 1381 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 1382 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 1383 bool IdxSigned = IdxExpr->getType()->isSignedIntegerType(); 1384 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 1385 1386 // Save the element type 1387 CurrentType = 1388 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 1389 1390 // Compute the element size 1391 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 1392 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 1393 1394 // Multiply out to compute the result 1395 Offset = Builder.CreateMul(Idx, ElemSize); 1396 break; 1397 } 1398 1399 case OffsetOfExpr::OffsetOfNode::Field: { 1400 FieldDecl *MemberDecl = ON.getField(); 1401 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl(); 1402 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 1403 1404 // Compute the index of the field in its parent. 1405 unsigned i = 0; 1406 // FIXME: It would be nice if we didn't have to loop here! 1407 for (RecordDecl::field_iterator Field = RD->field_begin(), 1408 FieldEnd = RD->field_end(); 1409 Field != FieldEnd; (void)++Field, ++i) { 1410 if (*Field == MemberDecl) 1411 break; 1412 } 1413 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 1414 1415 // Compute the offset to the field 1416 int64_t OffsetInt = RL.getFieldOffset(i) / 1417 CGF.getContext().getCharWidth(); 1418 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 1419 1420 // Save the element type. 1421 CurrentType = MemberDecl->getType(); 1422 break; 1423 } 1424 1425 case OffsetOfExpr::OffsetOfNode::Identifier: 1426 llvm_unreachable("dependent __builtin_offsetof"); 1427 1428 case OffsetOfExpr::OffsetOfNode::Base: { 1429 if (ON.getBase()->isVirtual()) { 1430 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 1431 continue; 1432 } 1433 1434 RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl(); 1435 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 1436 1437 // Save the element type. 1438 CurrentType = ON.getBase()->getType(); 1439 1440 // Compute the offset to the base. 1441 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 1442 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 1443 int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) / 1444 CGF.getContext().getCharWidth(); 1445 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 1446 break; 1447 } 1448 } 1449 Result = Builder.CreateAdd(Result, Offset); 1450 } 1451 return Result; 1452} 1453 1454/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of 1455/// argument of the sizeof expression as an integer. 1456Value * 1457ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) { 1458 QualType TypeToSize = E->getTypeOfArgument(); 1459 if (E->isSizeOf()) { 1460 if (const VariableArrayType *VAT = 1461 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 1462 if (E->isArgumentType()) { 1463 // sizeof(type) - make sure to emit the VLA size. 1464 CGF.EmitVLASize(TypeToSize); 1465 } else { 1466 // C99 6.5.3.4p2: If the argument is an expression of type 1467 // VLA, it is evaluated. 1468 CGF.EmitAnyExpr(E->getArgumentExpr()); 1469 } 1470 1471 return CGF.GetVLASize(VAT); 1472 } 1473 } 1474 1475 // If this isn't sizeof(vla), the result must be constant; use the constant 1476 // folding logic so we don't have to duplicate it here. 1477 Expr::EvalResult Result; 1478 E->Evaluate(Result, CGF.getContext()); 1479 return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); 1480} 1481 1482Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { 1483 Expr *Op = E->getSubExpr(); 1484 if (Op->getType()->isAnyComplexType()) 1485 return CGF.EmitComplexExpr(Op, false, true, false, true).first; 1486 return Visit(Op); 1487} 1488Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { 1489 Expr *Op = E->getSubExpr(); 1490 if (Op->getType()->isAnyComplexType()) 1491 return CGF.EmitComplexExpr(Op, true, false, true, false).second; 1492 1493 // __imag on a scalar returns zero. Emit the subexpr to ensure side 1494 // effects are evaluated, but not the actual value. 1495 if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) 1496 CGF.EmitLValue(Op); 1497 else 1498 CGF.EmitScalarExpr(Op, true); 1499 return llvm::Constant::getNullValue(ConvertType(E->getType())); 1500} 1501 1502//===----------------------------------------------------------------------===// 1503// Binary Operators 1504//===----------------------------------------------------------------------===// 1505 1506BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { 1507 TestAndClearIgnoreResultAssign(); 1508 BinOpInfo Result; 1509 Result.LHS = Visit(E->getLHS()); 1510 Result.RHS = Visit(E->getRHS()); 1511 Result.Ty = E->getType(); 1512 Result.Opcode = E->getOpcode(); 1513 Result.E = E; 1514 return Result; 1515} 1516 1517LValue ScalarExprEmitter::EmitCompoundAssignLValue( 1518 const CompoundAssignOperator *E, 1519 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 1520 Value *&Result) { 1521 QualType LHSTy = E->getLHS()->getType(); 1522 BinOpInfo OpInfo; 1523 1524 if (E->getComputationResultType()->isAnyComplexType()) { 1525 // This needs to go through the complex expression emitter, but it's a tad 1526 // complicated to do that... I'm leaving it out for now. (Note that we do 1527 // actually need the imaginary part of the RHS for multiplication and 1528 // division.) 1529 CGF.ErrorUnsupported(E, "complex compound assignment"); 1530 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 1531 return LValue(); 1532 } 1533 1534 // Emit the RHS first. __block variables need to have the rhs evaluated 1535 // first, plus this should improve codegen a little. 1536 OpInfo.RHS = Visit(E->getRHS()); 1537 OpInfo.Ty = E->getComputationResultType(); 1538 OpInfo.Opcode = E->getOpcode(); 1539 OpInfo.E = E; 1540 // Load/convert the LHS. 1541 LValue LHSLV = EmitCheckedLValue(E->getLHS()); 1542 OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy); 1543 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, 1544 E->getComputationLHSType()); 1545 1546 // Expand the binary operator. 1547 Result = (this->*Func)(OpInfo); 1548 1549 // Convert the result back to the LHS type. 1550 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); 1551 1552 // Store the result value into the LHS lvalue. Bit-fields are handled 1553 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 1554 // 'An assignment expression has the value of the left operand after the 1555 // assignment...'. 1556 if (LHSLV.isBitField()) 1557 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, 1558 &Result); 1559 else 1560 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy); 1561 1562 return LHSLV; 1563} 1564 1565Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 1566 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 1567 bool Ignore = TestAndClearIgnoreResultAssign(); 1568 Value *RHS; 1569 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 1570 1571 // If the result is clearly ignored, return now. 1572 if (Ignore) 1573 return 0; 1574 1575 // Objective-C property assignment never reloads the value following a store. 1576 if (LHS.isPropertyRef() || LHS.isKVCRef()) 1577 return RHS; 1578 1579 // If the lvalue is non-volatile, return the computed value of the assignment. 1580 if (!LHS.isVolatileQualified()) 1581 return RHS; 1582 1583 // Otherwise, reload the value. 1584 return EmitLoadOfLValue(LHS, E->getType()); 1585} 1586 1587void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 1588 const BinOpInfo &Ops, 1589 llvm::Value *Zero, bool isDiv) { 1590 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 1591 llvm::BasicBlock *contBB = 1592 CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn); 1593 1594 const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 1595 1596 if (Ops.Ty->hasSignedIntegerRepresentation()) { 1597 llvm::Value *IntMin = 1598 llvm::ConstantInt::get(VMContext, 1599 llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 1600 llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL); 1601 1602 llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero); 1603 llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin); 1604 llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne); 1605 llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and"); 1606 Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"), 1607 overflowBB, contBB); 1608 } else { 1609 CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero), 1610 overflowBB, contBB); 1611 } 1612 EmitOverflowBB(overflowBB); 1613 Builder.SetInsertPoint(contBB); 1614} 1615 1616Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 1617 if (isTrapvOverflowBehavior()) { 1618 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 1619 1620 if (Ops.Ty->isIntegerType()) 1621 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 1622 else if (Ops.Ty->isRealFloatingType()) { 1623 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", 1624 CGF.CurFn); 1625 llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn); 1626 CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero), 1627 overflowBB, DivCont); 1628 EmitOverflowBB(overflowBB); 1629 Builder.SetInsertPoint(DivCont); 1630 } 1631 } 1632 if (Ops.LHS->getType()->isFPOrFPVectorTy()) 1633 return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 1634 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 1635 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 1636 else 1637 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 1638} 1639 1640Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 1641 // Rem in C can't be a floating point type: C99 6.5.5p2. 1642 if (isTrapvOverflowBehavior()) { 1643 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 1644 1645 if (Ops.Ty->isIntegerType()) 1646 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 1647 } 1648 1649 if (Ops.Ty->isUnsignedIntegerType()) 1650 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 1651 else 1652 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 1653} 1654 1655Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 1656 unsigned IID; 1657 unsigned OpID = 0; 1658 1659 switch (Ops.Opcode) { 1660 case BO_Add: 1661 case BO_AddAssign: 1662 OpID = 1; 1663 IID = llvm::Intrinsic::sadd_with_overflow; 1664 break; 1665 case BO_Sub: 1666 case BO_SubAssign: 1667 OpID = 2; 1668 IID = llvm::Intrinsic::ssub_with_overflow; 1669 break; 1670 case BO_Mul: 1671 case BO_MulAssign: 1672 OpID = 3; 1673 IID = llvm::Intrinsic::smul_with_overflow; 1674 break; 1675 default: 1676 assert(false && "Unsupported operation for overflow detection"); 1677 IID = 0; 1678 } 1679 OpID <<= 1; 1680 OpID |= 1; 1681 1682 const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 1683 1684 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1); 1685 1686 Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS); 1687 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 1688 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 1689 1690 // Branch in case of overflow. 1691 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 1692 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 1693 llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn); 1694 1695 Builder.CreateCondBr(overflow, overflowBB, continueBB); 1696 1697 // Handle overflow with llvm.trap. 1698 const std::string *handlerName = 1699 &CGF.getContext().getLangOptions().OverflowHandler; 1700 if (handlerName->empty()) { 1701 EmitOverflowBB(overflowBB); 1702 Builder.SetInsertPoint(continueBB); 1703 return result; 1704 } 1705 1706 // If an overflow handler is set, then we want to call it and then use its 1707 // result, if it returns. 1708 Builder.SetInsertPoint(overflowBB); 1709 1710 // Get the overflow handler. 1711 const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext); 1712 std::vector<const llvm::Type*> argTypes; 1713 argTypes.push_back(CGF.Int64Ty); argTypes.push_back(CGF.Int64Ty); 1714 argTypes.push_back(Int8Ty); argTypes.push_back(Int8Ty); 1715 llvm::FunctionType *handlerTy = 1716 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 1717 llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 1718 1719 // Sign extend the args to 64-bit, so that we can use the same handler for 1720 // all types of overflow. 1721 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 1722 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 1723 1724 // Call the handler with the two arguments, the operation, and the size of 1725 // the result. 1726 llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs, 1727 Builder.getInt8(OpID), 1728 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())); 1729 1730 // Truncate the result back to the desired size. 1731 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 1732 Builder.CreateBr(continueBB); 1733 1734 Builder.SetInsertPoint(continueBB); 1735 llvm::PHINode *phi = Builder.CreatePHI(opTy); 1736 phi->reserveOperandSpace(2); 1737 phi->addIncoming(result, initialBB); 1738 phi->addIncoming(handlerResult, overflowBB); 1739 1740 return phi; 1741} 1742 1743Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { 1744 if (!Ops.Ty->isAnyPointerType()) { 1745 if (Ops.Ty->hasSignedIntegerRepresentation()) { 1746 switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { 1747 case LangOptions::SOB_Undefined: 1748 return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add"); 1749 case LangOptions::SOB_Defined: 1750 return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); 1751 case LangOptions::SOB_Trapping: 1752 return EmitOverflowCheckedBinOp(Ops); 1753 } 1754 } 1755 1756 if (Ops.LHS->getType()->isFPOrFPVectorTy()) 1757 return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add"); 1758 1759 return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); 1760 } 1761 1762 // Must have binary (not unary) expr here. Unary pointer decrement doesn't 1763 // use this path. 1764 const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E); 1765 1766 if (Ops.Ty->isPointerType() && 1767 Ops.Ty->getAs<PointerType>()->isVariableArrayType()) { 1768 // The amount of the addition needs to account for the VLA size 1769 CGF.ErrorUnsupported(BinOp, "VLA pointer addition"); 1770 } 1771 1772 Value *Ptr, *Idx; 1773 Expr *IdxExp; 1774 const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>(); 1775 const ObjCObjectPointerType *OPT = 1776 BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>(); 1777 if (PT || OPT) { 1778 Ptr = Ops.LHS; 1779 Idx = Ops.RHS; 1780 IdxExp = BinOp->getRHS(); 1781 } else { // int + pointer 1782 PT = BinOp->getRHS()->getType()->getAs<PointerType>(); 1783 OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>(); 1784 assert((PT || OPT) && "Invalid add expr"); 1785 Ptr = Ops.RHS; 1786 Idx = Ops.LHS; 1787 IdxExp = BinOp->getLHS(); 1788 } 1789 1790 unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1791 if (Width < CGF.LLVMPointerWidth) { 1792 // Zero or sign extend the pointer value based on whether the index is 1793 // signed or not. 1794 const llvm::Type *IdxType = CGF.IntPtrTy; 1795 if (IdxExp->getType()->isSignedIntegerType()) 1796 Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); 1797 else 1798 Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); 1799 } 1800 const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType(); 1801 // Handle interface types, which are not represented with a concrete type. 1802 if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) { 1803 llvm::Value *InterfaceSize = 1804 llvm::ConstantInt::get(Idx->getType(), 1805 CGF.getContext().getTypeSizeInChars(OIT).getQuantity()); 1806 Idx = Builder.CreateMul(Idx, InterfaceSize); 1807 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1808 Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); 1809 Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); 1810 return Builder.CreateBitCast(Res, Ptr->getType()); 1811 } 1812 1813 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 1814 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 1815 // future proof. 1816 if (ElementType->isVoidType() || ElementType->isFunctionType()) { 1817 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1818 Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); 1819 Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); 1820 return Builder.CreateBitCast(Res, Ptr->getType()); 1821 } 1822 1823 return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr"); 1824} 1825 1826Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { 1827 if (!isa<llvm::PointerType>(Ops.LHS->getType())) { 1828 if (Ops.Ty->hasSignedIntegerRepresentation()) { 1829 switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { 1830 case LangOptions::SOB_Undefined: 1831 return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub"); 1832 case LangOptions::SOB_Defined: 1833 return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); 1834 case LangOptions::SOB_Trapping: 1835 return EmitOverflowCheckedBinOp(Ops); 1836 } 1837 } 1838 1839 if (Ops.LHS->getType()->isFPOrFPVectorTy()) 1840 return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub"); 1841 1842 return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); 1843 } 1844 1845 // Must have binary (not unary) expr here. Unary pointer increment doesn't 1846 // use this path. 1847 const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E); 1848 1849 if (BinOp->getLHS()->getType()->isPointerType() && 1850 BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) { 1851 // The amount of the addition needs to account for the VLA size for 1852 // ptr-int 1853 // The amount of the division needs to account for the VLA size for 1854 // ptr-ptr. 1855 CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction"); 1856 } 1857 1858 const QualType LHSType = BinOp->getLHS()->getType(); 1859 const QualType LHSElementType = LHSType->getPointeeType(); 1860 if (!isa<llvm::PointerType>(Ops.RHS->getType())) { 1861 // pointer - int 1862 Value *Idx = Ops.RHS; 1863 unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1864 if (Width < CGF.LLVMPointerWidth) { 1865 // Zero or sign extend the pointer value based on whether the index is 1866 // signed or not. 1867 const llvm::Type *IdxType = CGF.IntPtrTy; 1868 if (BinOp->getRHS()->getType()->isSignedIntegerType()) 1869 Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); 1870 else 1871 Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); 1872 } 1873 Idx = Builder.CreateNeg(Idx, "sub.ptr.neg"); 1874 1875 // Handle interface types, which are not represented with a concrete type. 1876 if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) { 1877 llvm::Value *InterfaceSize = 1878 llvm::ConstantInt::get(Idx->getType(), 1879 CGF.getContext(). 1880 getTypeSizeInChars(OIT).getQuantity()); 1881 Idx = Builder.CreateMul(Idx, InterfaceSize); 1882 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1883 Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); 1884 Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); 1885 return Builder.CreateBitCast(Res, Ops.LHS->getType()); 1886 } 1887 1888 // Explicitly handle GNU void* and function pointer arithmetic 1889 // extensions. The GNU void* casts amount to no-ops since our void* type is 1890 // i8*, but this is future proof. 1891 if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { 1892 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 1893 Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); 1894 Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); 1895 return Builder.CreateBitCast(Res, Ops.LHS->getType()); 1896 } 1897 1898 return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr"); 1899 } else { 1900 // pointer - pointer 1901 Value *LHS = Ops.LHS; 1902 Value *RHS = Ops.RHS; 1903 1904 CharUnits ElementSize; 1905 1906 // Handle GCC extension for pointer arithmetic on void* and function pointer 1907 // types. 1908 if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { 1909 ElementSize = CharUnits::One(); 1910 } else { 1911 ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType); 1912 } 1913 1914 const llvm::Type *ResultType = ConvertType(Ops.Ty); 1915 LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast"); 1916 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1917 Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 1918 1919 // Optimize out the shift for element size of 1. 1920 if (ElementSize.isOne()) 1921 return BytesBetween; 1922 1923 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 1924 // pointer difference in C is only defined in the case where both operands 1925 // are pointing to elements of an array. 1926 Value *BytesPerElt = 1927 llvm::ConstantInt::get(ResultType, ElementSize.getQuantity()); 1928 return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div"); 1929 } 1930} 1931 1932Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 1933 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 1934 // RHS to the same size as the LHS. 1935 Value *RHS = Ops.RHS; 1936 if (Ops.LHS->getType() != RHS->getType()) 1937 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 1938 1939 if (CGF.CatchUndefined 1940 && isa<llvm::IntegerType>(Ops.LHS->getType())) { 1941 unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); 1942 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 1943 CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS, 1944 llvm::ConstantInt::get(RHS->getType(), Width)), 1945 Cont, CGF.getTrapBB()); 1946 CGF.EmitBlock(Cont); 1947 } 1948 1949 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 1950} 1951 1952Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 1953 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 1954 // RHS to the same size as the LHS. 1955 Value *RHS = Ops.RHS; 1956 if (Ops.LHS->getType() != RHS->getType()) 1957 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 1958 1959 if (CGF.CatchUndefined 1960 && isa<llvm::IntegerType>(Ops.LHS->getType())) { 1961 unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); 1962 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 1963 CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS, 1964 llvm::ConstantInt::get(RHS->getType(), Width)), 1965 Cont, CGF.getTrapBB()); 1966 CGF.EmitBlock(Cont); 1967 } 1968 1969 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 1970 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 1971 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 1972} 1973 1974Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, 1975 unsigned SICmpOpc, unsigned FCmpOpc) { 1976 TestAndClearIgnoreResultAssign(); 1977 Value *Result; 1978 QualType LHSTy = E->getLHS()->getType(); 1979 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 1980 assert(E->getOpcode() == BO_EQ || 1981 E->getOpcode() == BO_NE); 1982 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 1983 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 1984 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 1985 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 1986 } else if (!LHSTy->isAnyComplexType()) { 1987 Value *LHS = Visit(E->getLHS()); 1988 Value *RHS = Visit(E->getRHS()); 1989 1990 if (LHS->getType()->isFPOrFPVectorTy()) { 1991 Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, 1992 LHS, RHS, "cmp"); 1993 } else if (LHSTy->hasSignedIntegerRepresentation()) { 1994 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, 1995 LHS, RHS, "cmp"); 1996 } else { 1997 // Unsigned integers and pointers. 1998 Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 1999 LHS, RHS, "cmp"); 2000 } 2001 2002 // If this is a vector comparison, sign extend the result to the appropriate 2003 // vector integer type and return it (don't convert to bool). 2004 if (LHSTy->isVectorType()) 2005 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 2006 2007 } else { 2008 // Complex Comparison: can only be an equality comparison. 2009 CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS()); 2010 CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS()); 2011 2012 QualType CETy = LHSTy->getAs<ComplexType>()->getElementType(); 2013 2014 Value *ResultR, *ResultI; 2015 if (CETy->isRealFloatingType()) { 2016 ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 2017 LHS.first, RHS.first, "cmp.r"); 2018 ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, 2019 LHS.second, RHS.second, "cmp.i"); 2020 } else { 2021 // Complex comparisons can only be equality comparisons. As such, signed 2022 // and unsigned opcodes are the same. 2023 ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 2024 LHS.first, RHS.first, "cmp.r"); 2025 ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, 2026 LHS.second, RHS.second, "cmp.i"); 2027 } 2028 2029 if (E->getOpcode() == BO_EQ) { 2030 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 2031 } else { 2032 assert(E->getOpcode() == BO_NE && 2033 "Complex comparison other than == or != ?"); 2034 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 2035 } 2036 } 2037 2038 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); 2039} 2040 2041Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 2042 bool Ignore = TestAndClearIgnoreResultAssign(); 2043 2044 // __block variables need to have the rhs evaluated first, plus this should 2045 // improve codegen just a little. 2046 Value *RHS = Visit(E->getRHS()); 2047 LValue LHS = EmitCheckedLValue(E->getLHS()); 2048 2049 // Store the value into the LHS. Bit-fields are handled specially 2050 // because the result is altered by the store, i.e., [C99 6.5.16p1] 2051 // 'An assignment expression has the value of the left operand after 2052 // the assignment...'. 2053 if (LHS.isBitField()) 2054 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(), 2055 &RHS); 2056 else 2057 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType()); 2058 2059 // If the result is clearly ignored, return now. 2060 if (Ignore) 2061 return 0; 2062 2063 // Objective-C property assignment never reloads the value following a store. 2064 if (LHS.isPropertyRef() || LHS.isKVCRef()) 2065 return RHS; 2066 2067 // If the lvalue is non-volatile, return the computed value of the assignment. 2068 if (!LHS.isVolatileQualified()) 2069 return RHS; 2070 2071 // Otherwise, reload the value. 2072 return EmitLoadOfLValue(LHS, E->getType()); 2073} 2074 2075Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 2076 const llvm::Type *ResTy = ConvertType(E->getType()); 2077 2078 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 2079 // If we have 1 && X, just emit X without inserting the control flow. 2080 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { 2081 if (Cond == 1) { // If we have 1 && X, just emit X. 2082 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 2083 // ZExt result to int or bool. 2084 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 2085 } 2086 2087 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 2088 if (!CGF.ContainsLabel(E->getRHS())) 2089 return llvm::Constant::getNullValue(ResTy); 2090 } 2091 2092 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 2093 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 2094 2095 // Branch on the LHS first. If it is false, go to the failure (cont) block. 2096 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock); 2097 2098 // Any edges into the ContBlock are now from an (indeterminate number of) 2099 // edges from this first condition. All of these values will be false. Start 2100 // setting up the PHI node in the Cont Block for this. 2101 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2102 "", ContBlock); 2103 PN->reserveOperandSpace(2); // Normal case, two inputs. 2104 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 2105 PI != PE; ++PI) 2106 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 2107 2108 CGF.BeginConditionalBranch(); 2109 CGF.EmitBlock(RHSBlock); 2110 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 2111 CGF.EndConditionalBranch(); 2112 2113 // Reaquire the RHS block, as there may be subblocks inserted. 2114 RHSBlock = Builder.GetInsertBlock(); 2115 2116 // Emit an unconditional branch from this block to ContBlock. Insert an entry 2117 // into the phi node for the edge with the value of RHSCond. 2118 CGF.EmitBlock(ContBlock); 2119 PN->addIncoming(RHSCond, RHSBlock); 2120 2121 // ZExt result to int. 2122 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 2123} 2124 2125Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 2126 const llvm::Type *ResTy = ConvertType(E->getType()); 2127 2128 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 2129 // If we have 0 || X, just emit X without inserting the control flow. 2130 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { 2131 if (Cond == -1) { // If we have 0 || X, just emit X. 2132 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 2133 // ZExt result to int or bool. 2134 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 2135 } 2136 2137 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 2138 if (!CGF.ContainsLabel(E->getRHS())) 2139 return llvm::ConstantInt::get(ResTy, 1); 2140 } 2141 2142 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 2143 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 2144 2145 // Branch on the LHS first. If it is true, go to the success (cont) block. 2146 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock); 2147 2148 // Any edges into the ContBlock are now from an (indeterminate number of) 2149 // edges from this first condition. All of these values will be true. Start 2150 // setting up the PHI node in the Cont Block for this. 2151 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2152 "", ContBlock); 2153 PN->reserveOperandSpace(2); // Normal case, two inputs. 2154 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 2155 PI != PE; ++PI) 2156 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 2157 2158 CGF.BeginConditionalBranch(); 2159 2160 // Emit the RHS condition as a bool value. 2161 CGF.EmitBlock(RHSBlock); 2162 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 2163 2164 CGF.EndConditionalBranch(); 2165 2166 // Reaquire the RHS block, as there may be subblocks inserted. 2167 RHSBlock = Builder.GetInsertBlock(); 2168 2169 // Emit an unconditional branch from this block to ContBlock. Insert an entry 2170 // into the phi node for the edge with the value of RHSCond. 2171 CGF.EmitBlock(ContBlock); 2172 PN->addIncoming(RHSCond, RHSBlock); 2173 2174 // ZExt result to int. 2175 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 2176} 2177 2178Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 2179 CGF.EmitStmt(E->getLHS()); 2180 CGF.EnsureInsertPoint(); 2181 return Visit(E->getRHS()); 2182} 2183 2184//===----------------------------------------------------------------------===// 2185// Other Operators 2186//===----------------------------------------------------------------------===// 2187 2188/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 2189/// expression is cheap enough and side-effect-free enough to evaluate 2190/// unconditionally instead of conditionally. This is used to convert control 2191/// flow into selects in some cases. 2192static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 2193 CodeGenFunction &CGF) { 2194 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 2195 return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF); 2196 2197 // TODO: Allow anything we can constant fold to an integer or fp constant. 2198 if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) || 2199 isa<FloatingLiteral>(E)) 2200 return true; 2201 2202 // Non-volatile automatic variables too, to get "cond ? X : Y" where 2203 // X and Y are local variables. 2204 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 2205 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) 2206 if (VD->hasLocalStorage() && !(CGF.getContext() 2207 .getCanonicalType(VD->getType()) 2208 .isVolatileQualified())) 2209 return true; 2210 2211 return false; 2212} 2213 2214 2215Value *ScalarExprEmitter:: 2216VisitConditionalOperator(const ConditionalOperator *E) { 2217 TestAndClearIgnoreResultAssign(); 2218 // If the condition constant folds and can be elided, try to avoid emitting 2219 // the condition and the dead arm. 2220 if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){ 2221 Expr *Live = E->getLHS(), *Dead = E->getRHS(); 2222 if (Cond == -1) 2223 std::swap(Live, Dead); 2224 2225 // If the dead side doesn't have labels we need, and if the Live side isn't 2226 // the gnu missing ?: extension (which we could handle, but don't bother 2227 // to), just emit the Live part. 2228 if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part 2229 Live) // Live part isn't missing. 2230 return Visit(Live); 2231 } 2232 2233 // OpenCL: If the condition is a vector, we can treat this condition like 2234 // the select function. 2235 if (CGF.getContext().getLangOptions().OpenCL 2236 && E->getCond()->getType()->isVectorType()) { 2237 llvm::Value *CondV = CGF.EmitScalarExpr(E->getCond()); 2238 llvm::Value *LHS = Visit(E->getLHS()); 2239 llvm::Value *RHS = Visit(E->getRHS()); 2240 2241 const llvm::Type *condType = ConvertType(E->getCond()->getType()); 2242 const llvm::VectorType *vecTy = cast<llvm::VectorType>(condType); 2243 2244 unsigned numElem = vecTy->getNumElements(); 2245 const llvm::Type *elemType = vecTy->getElementType(); 2246 2247 std::vector<llvm::Constant*> Zvals; 2248 for (unsigned i = 0; i < numElem; ++i) 2249 Zvals.push_back(llvm::ConstantInt::get(elemType,0)); 2250 2251 llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals); 2252 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 2253 llvm::Value *tmp = Builder.CreateSExt(TestMSB, 2254 llvm::VectorType::get(elemType, 2255 numElem), 2256 "sext"); 2257 llvm::Value *tmp2 = Builder.CreateNot(tmp); 2258 2259 // Cast float to int to perform ANDs if necessary. 2260 llvm::Value *RHSTmp = RHS; 2261 llvm::Value *LHSTmp = LHS; 2262 bool wasCast = false; 2263 const llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 2264 if (rhsVTy->getElementType()->isFloatTy()) { 2265 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 2266 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 2267 wasCast = true; 2268 } 2269 2270 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 2271 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 2272 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 2273 if (wasCast) 2274 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 2275 2276 return tmp5; 2277 } 2278 2279 // If this is a really simple expression (like x ? 4 : 5), emit this as a 2280 // select instead of as control flow. We can only do this if it is cheap and 2281 // safe to evaluate the LHS and RHS unconditionally. 2282 if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(), 2283 CGF) && 2284 isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) { 2285 llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond()); 2286 llvm::Value *LHS = Visit(E->getLHS()); 2287 llvm::Value *RHS = Visit(E->getRHS()); 2288 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 2289 } 2290 2291 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 2292 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 2293 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 2294 2295 // If we don't have the GNU missing condition extension, emit a branch on bool 2296 // the normal way. 2297 if (E->getLHS()) { 2298 // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for 2299 // the branch on bool. 2300 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 2301 } else { 2302 // Otherwise, for the ?: extension, evaluate the conditional and then 2303 // convert it to bool the hard way. We do this explicitly because we need 2304 // the unconverted value for the missing middle value of the ?:. 2305 Expr *save = E->getSAVE(); 2306 assert(save && "VisitConditionalOperator - save is null"); 2307 // Intentianlly not doing direct assignment to ConditionalSaveExprs[save] !! 2308 Value *SaveVal = CGF.EmitScalarExpr(save); 2309 CGF.ConditionalSaveExprs[save] = SaveVal; 2310 Value *CondVal = Visit(E->getCond()); 2311 // In some cases, EmitScalarConversion will delete the "CondVal" expression 2312 // if there are no extra uses (an optimization). Inhibit this by making an 2313 // extra dead use, because we're going to add a use of CondVal later. We 2314 // don't use the builder for this, because we don't want it to get optimized 2315 // away. This leaves dead code, but the ?: extension isn't common. 2316 new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder", 2317 Builder.GetInsertBlock()); 2318 2319 Value *CondBoolVal = 2320 CGF.EmitScalarConversion(CondVal, E->getCond()->getType(), 2321 CGF.getContext().BoolTy); 2322 Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock); 2323 } 2324 2325 CGF.BeginConditionalBranch(); 2326 CGF.EmitBlock(LHSBlock); 2327 2328 // Handle the GNU extension for missing LHS. 2329 Value *LHS = Visit(E->getTrueExpr()); 2330 2331 CGF.EndConditionalBranch(); 2332 LHSBlock = Builder.GetInsertBlock(); 2333 CGF.EmitBranch(ContBlock); 2334 2335 CGF.BeginConditionalBranch(); 2336 CGF.EmitBlock(RHSBlock); 2337 2338 Value *RHS = Visit(E->getRHS()); 2339 CGF.EndConditionalBranch(); 2340 RHSBlock = Builder.GetInsertBlock(); 2341 CGF.EmitBranch(ContBlock); 2342 2343 CGF.EmitBlock(ContBlock); 2344 2345 // If the LHS or RHS is a throw expression, it will be legitimately null. 2346 if (!LHS) 2347 return RHS; 2348 if (!RHS) 2349 return LHS; 2350 2351 // Create a PHI node for the real part. 2352 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond"); 2353 PN->reserveOperandSpace(2); 2354 PN->addIncoming(LHS, LHSBlock); 2355 PN->addIncoming(RHS, RHSBlock); 2356 return PN; 2357} 2358 2359Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 2360 return Visit(E->getChosenSubExpr(CGF.getContext())); 2361} 2362 2363Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 2364 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 2365 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 2366 2367 // If EmitVAArg fails, we fall back to the LLVM instruction. 2368 if (!ArgPtr) 2369 return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); 2370 2371 // FIXME Volatility. 2372 return Builder.CreateLoad(ArgPtr); 2373} 2374 2375Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) { 2376 return CGF.BuildBlockLiteralTmp(BE); 2377} 2378 2379//===----------------------------------------------------------------------===// 2380// Entry Point into this File 2381//===----------------------------------------------------------------------===// 2382 2383/// EmitScalarExpr - Emit the computation of the specified expression of scalar 2384/// type, ignoring the result. 2385Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 2386 assert(E && !hasAggregateLLVMType(E->getType()) && 2387 "Invalid scalar expression to emit"); 2388 2389 return ScalarExprEmitter(*this, IgnoreResultAssign) 2390 .Visit(const_cast<Expr*>(E)); 2391} 2392 2393/// EmitScalarConversion - Emit a conversion from the specified type to the 2394/// specified destination type, both of which are LLVM scalar types. 2395Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 2396 QualType DstTy) { 2397 assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) && 2398 "Invalid scalar expression to emit"); 2399 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); 2400} 2401 2402/// EmitComplexToScalarConversion - Emit a conversion from the specified complex 2403/// type to the specified destination type, where the destination type is an 2404/// LLVM scalar type. 2405Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 2406 QualType SrcTy, 2407 QualType DstTy) { 2408 assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) && 2409 "Invalid complex -> scalar conversion"); 2410 return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, 2411 DstTy); 2412} 2413 2414 2415llvm::Value *CodeGenFunction:: 2416EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2417 bool isInc, bool isPre) { 2418 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 2419} 2420 2421LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 2422 llvm::Value *V; 2423 // object->isa or (*object).isa 2424 // Generate code as for: *(Class*)object 2425 // build Class* type 2426 const llvm::Type *ClassPtrTy = ConvertType(E->getType()); 2427 2428 Expr *BaseExpr = E->getBase(); 2429 if (BaseExpr->isLvalue(getContext()) != Expr::LV_Valid) { 2430 V = CreateTempAlloca(ClassPtrTy, "resval"); 2431 llvm::Value *Src = EmitScalarExpr(BaseExpr); 2432 Builder.CreateStore(Src, V); 2433 V = ScalarExprEmitter(*this).EmitLoadOfLValue( 2434 MakeAddrLValue(V, E->getType()), E->getType()); 2435 } else { 2436 if (E->isArrow()) 2437 V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr); 2438 else 2439 V = EmitLValue(BaseExpr).getAddress(); 2440 } 2441 2442 // build Class* type 2443 ClassPtrTy = ClassPtrTy->getPointerTo(); 2444 V = Builder.CreateBitCast(V, ClassPtrTy); 2445 return MakeAddrLValue(V, E->getType()); 2446} 2447 2448 2449LValue CodeGenFunction::EmitCompoundAssignOperatorLValue( 2450 const CompoundAssignOperator *E) { 2451 ScalarExprEmitter Scalar(*this); 2452 Value *Result = 0; 2453 switch (E->getOpcode()) { 2454#define COMPOUND_OP(Op) \ 2455 case BO_##Op##Assign: \ 2456 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 2457 Result) 2458 COMPOUND_OP(Mul); 2459 COMPOUND_OP(Div); 2460 COMPOUND_OP(Rem); 2461 COMPOUND_OP(Add); 2462 COMPOUND_OP(Sub); 2463 COMPOUND_OP(Shl); 2464 COMPOUND_OP(Shr); 2465 COMPOUND_OP(And); 2466 COMPOUND_OP(Xor); 2467 COMPOUND_OP(Or); 2468#undef COMPOUND_OP 2469 2470 case BO_PtrMemD: 2471 case BO_PtrMemI: 2472 case BO_Mul: 2473 case BO_Div: 2474 case BO_Rem: 2475 case BO_Add: 2476 case BO_Sub: 2477 case BO_Shl: 2478 case BO_Shr: 2479 case BO_LT: 2480 case BO_GT: 2481 case BO_LE: 2482 case BO_GE: 2483 case BO_EQ: 2484 case BO_NE: 2485 case BO_And: 2486 case BO_Xor: 2487 case BO_Or: 2488 case BO_LAnd: 2489 case BO_LOr: 2490 case BO_Assign: 2491 case BO_Comma: 2492 assert(false && "Not valid compound assignment operators"); 2493 break; 2494 } 2495 2496 llvm_unreachable("Unhandled compound assignment operator"); 2497} 2498