CGExprAgg.cpp revision a07398ed98ea2b55ad7a505a3aab18aed93b149f
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Aggregate Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGObjCRuntime.h" 17#include "clang/AST/ASTContext.h" 18#include "clang/AST/DeclCXX.h" 19#include "clang/AST/StmtVisitor.h" 20#include "llvm/Constants.h" 21#include "llvm/Function.h" 22#include "llvm/GlobalVariable.h" 23#include "llvm/Intrinsics.h" 24using namespace clang; 25using namespace CodeGen; 26 27//===----------------------------------------------------------------------===// 28// Aggregate Expression Emitter 29//===----------------------------------------------------------------------===// 30 31namespace { 32class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 33 CodeGenFunction &CGF; 34 CGBuilderTy &Builder; 35 AggValueSlot Dest; 36 bool IgnoreResult; 37 38 ReturnValueSlot getReturnValueSlot() const { 39 // If the destination slot requires garbage collection, we can't 40 // use the real return value slot, because we have to use the GC 41 // API. 42 if (Dest.requiresGCollection()) return ReturnValueSlot(); 43 44 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile()); 45 } 46 47 AggValueSlot EnsureSlot(QualType T) { 48 if (!Dest.isIgnored()) return Dest; 49 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 50 } 51 52public: 53 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, 54 bool ignore) 55 : CGF(cgf), Builder(CGF.Builder), Dest(Dest), 56 IgnoreResult(ignore) { 57 } 58 59 //===--------------------------------------------------------------------===// 60 // Utilities 61 //===--------------------------------------------------------------------===// 62 63 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 64 /// represents a value lvalue, this method emits the address of the lvalue, 65 /// then loads the result into DestPtr. 66 void EmitAggLoadOfLValue(const Expr *E); 67 68 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 69 void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false); 70 void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false); 71 72 void EmitGCMove(const Expr *E, RValue Src); 73 74 bool TypeRequiresGCollection(QualType T); 75 76 //===--------------------------------------------------------------------===// 77 // Visitor Methods 78 //===--------------------------------------------------------------------===// 79 80 void VisitStmt(Stmt *S) { 81 CGF.ErrorUnsupported(S, "aggregate expression"); 82 } 83 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 84 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 85 Visit(GE->getResultExpr()); 86 } 87 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 88 89 // l-values. 90 void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); } 91 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 92 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 93 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 94 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 95 EmitAggLoadOfLValue(E); 96 } 97 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 98 EmitAggLoadOfLValue(E); 99 } 100 void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { 101 EmitAggLoadOfLValue(E); 102 } 103 void VisitPredefinedExpr(const PredefinedExpr *E) { 104 EmitAggLoadOfLValue(E); 105 } 106 107 // Operators. 108 void VisitCastExpr(CastExpr *E); 109 void VisitCallExpr(const CallExpr *E); 110 void VisitStmtExpr(const StmtExpr *E); 111 void VisitBinaryOperator(const BinaryOperator *BO); 112 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 113 void VisitBinAssign(const BinaryOperator *E); 114 void VisitBinComma(const BinaryOperator *E); 115 116 void VisitObjCMessageExpr(ObjCMessageExpr *E); 117 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 118 EmitAggLoadOfLValue(E); 119 } 120 void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E); 121 122 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); 123 void VisitChooseExpr(const ChooseExpr *CE); 124 void VisitInitListExpr(InitListExpr *E); 125 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 126 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 127 Visit(DAE->getExpr()); 128 } 129 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 130 void VisitCXXConstructExpr(const CXXConstructExpr *E); 131 void VisitExprWithCleanups(ExprWithCleanups *E); 132 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 133 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 134 135 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 136 137 void VisitVAArgExpr(VAArgExpr *E); 138 139 void EmitInitializationToLValue(Expr *E, LValue Address); 140 void EmitNullInitializationToLValue(LValue Address); 141 // case Expr::ChooseExprClass: 142 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 143}; 144} // end anonymous namespace. 145 146//===----------------------------------------------------------------------===// 147// Utilities 148//===----------------------------------------------------------------------===// 149 150/// EmitAggLoadOfLValue - Given an expression with aggregate type that 151/// represents a value lvalue, this method emits the address of the lvalue, 152/// then loads the result into DestPtr. 153void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 154 LValue LV = CGF.EmitLValue(E); 155 EmitFinalDestCopy(E, LV); 156} 157 158/// \brief True if the given aggregate type requires special GC API calls. 159bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 160 // Only record types have members that might require garbage collection. 161 const RecordType *RecordTy = T->getAs<RecordType>(); 162 if (!RecordTy) return false; 163 164 // Don't mess with non-trivial C++ types. 165 RecordDecl *Record = RecordTy->getDecl(); 166 if (isa<CXXRecordDecl>(Record) && 167 (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() || 168 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 169 return false; 170 171 // Check whether the type has an object member. 172 return Record->hasObjectMember(); 173} 174 175/// \brief Perform the final move to DestPtr if RequiresGCollection is set. 176/// 177/// The idea is that you do something like this: 178/// RValue Result = EmitSomething(..., getReturnValueSlot()); 179/// EmitGCMove(E, Result); 180/// If GC doesn't interfere, this will cause the result to be emitted 181/// directly into the return value slot. If GC does interfere, a final 182/// move will be performed. 183void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) { 184 if (Dest.requiresGCollection()) { 185 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); 186 const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); 187 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 188 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(), 189 Src.getAggregateAddr(), 190 SizeVal); 191 } 192} 193 194/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 195void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) { 196 assert(Src.isAggregate() && "value must be aggregate value!"); 197 198 // If Dest is ignored, then we're evaluating an aggregate expression 199 // in a context (like an expression statement) that doesn't care 200 // about the result. C says that an lvalue-to-rvalue conversion is 201 // performed in these cases; C++ says that it is not. In either 202 // case, we don't actually need to do anything unless the value is 203 // volatile. 204 if (Dest.isIgnored()) { 205 if (!Src.isVolatileQualified() || 206 CGF.CGM.getLangOptions().CPlusPlus || 207 (IgnoreResult && Ignore)) 208 return; 209 210 // If the source is volatile, we must read from it; to do that, we need 211 // some place to put it. 212 Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp"); 213 } 214 215 if (Dest.requiresGCollection()) { 216 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType()); 217 const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); 218 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 219 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 220 Dest.getAddr(), 221 Src.getAggregateAddr(), 222 SizeVal); 223 return; 224 } 225 // If the result of the assignment is used, copy the LHS there also. 226 // FIXME: Pass VolatileDest as well. I think we also need to merge volatile 227 // from the source as well, as we can't eliminate it if either operand 228 // is volatile, unless copy has volatile for both source and destination.. 229 CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(), 230 Dest.isVolatile()|Src.isVolatileQualified()); 231} 232 233/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 234void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { 235 assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); 236 237 EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(), 238 Src.isVolatileQualified()), 239 Ignore); 240} 241 242//===----------------------------------------------------------------------===// 243// Visitor Methods 244//===----------------------------------------------------------------------===// 245 246void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 247 EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e)); 248} 249 250void AggExprEmitter::VisitCastExpr(CastExpr *E) { 251 switch (E->getCastKind()) { 252 case CK_Dynamic: { 253 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 254 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); 255 // FIXME: Do we also need to handle property references here? 256 if (LV.isSimple()) 257 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); 258 else 259 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 260 261 if (!Dest.isIgnored()) 262 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 263 break; 264 } 265 266 case CK_ToUnion: { 267 if (Dest.isIgnored()) break; 268 269 // GCC union extension 270 QualType Ty = E->getSubExpr()->getType(); 271 QualType PtrTy = CGF.getContext().getPointerType(Ty); 272 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), 273 CGF.ConvertType(PtrTy)); 274 EmitInitializationToLValue(E->getSubExpr(), 275 CGF.MakeAddrLValue(CastPtr, Ty)); 276 break; 277 } 278 279 case CK_DerivedToBase: 280 case CK_BaseToDerived: 281 case CK_UncheckedDerivedToBase: { 282 assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: " 283 "should have been unpacked before we got here"); 284 break; 285 } 286 287 case CK_GetObjCProperty: { 288 LValue LV = CGF.EmitLValue(E->getSubExpr()); 289 assert(LV.isPropertyRef()); 290 RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot()); 291 EmitGCMove(E, RV); 292 break; 293 } 294 295 case CK_LValueToRValue: // hope for downstream optimization 296 case CK_NoOp: 297 case CK_UserDefinedConversion: 298 case CK_ConstructorConversion: 299 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 300 E->getType()) && 301 "Implicit cast types must be compatible"); 302 Visit(E->getSubExpr()); 303 break; 304 305 case CK_LValueBitCast: 306 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 307 break; 308 309 case CK_Dependent: 310 case CK_BitCast: 311 case CK_ArrayToPointerDecay: 312 case CK_FunctionToPointerDecay: 313 case CK_NullToPointer: 314 case CK_NullToMemberPointer: 315 case CK_BaseToDerivedMemberPointer: 316 case CK_DerivedToBaseMemberPointer: 317 case CK_MemberPointerToBoolean: 318 case CK_IntegralToPointer: 319 case CK_PointerToIntegral: 320 case CK_PointerToBoolean: 321 case CK_ToVoid: 322 case CK_VectorSplat: 323 case CK_IntegralCast: 324 case CK_IntegralToBoolean: 325 case CK_IntegralToFloating: 326 case CK_FloatingToIntegral: 327 case CK_FloatingToBoolean: 328 case CK_FloatingCast: 329 case CK_AnyPointerToObjCPointerCast: 330 case CK_AnyPointerToBlockPointerCast: 331 case CK_ObjCObjectLValueCast: 332 case CK_FloatingRealToComplex: 333 case CK_FloatingComplexToReal: 334 case CK_FloatingComplexToBoolean: 335 case CK_FloatingComplexCast: 336 case CK_FloatingComplexToIntegralComplex: 337 case CK_IntegralRealToComplex: 338 case CK_IntegralComplexToReal: 339 case CK_IntegralComplexToBoolean: 340 case CK_IntegralComplexCast: 341 case CK_IntegralComplexToFloatingComplex: 342 case CK_ObjCProduceObject: 343 case CK_ObjCConsumeObject: 344 llvm_unreachable("cast kind invalid for aggregate types"); 345 } 346} 347 348void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 349 if (E->getCallReturnType()->isReferenceType()) { 350 EmitAggLoadOfLValue(E); 351 return; 352 } 353 354 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot()); 355 EmitGCMove(E, RV); 356} 357 358void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 359 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot()); 360 EmitGCMove(E, RV); 361} 362 363void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { 364 llvm_unreachable("direct property access not surrounded by " 365 "lvalue-to-rvalue cast"); 366} 367 368void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 369 CGF.EmitIgnoredExpr(E->getLHS()); 370 Visit(E->getRHS()); 371} 372 373void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 374 CodeGenFunction::StmtExprEvaluation eval(CGF); 375 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 376} 377 378void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 379 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 380 VisitPointerToDataMemberBinaryOperator(E); 381 else 382 CGF.ErrorUnsupported(E, "aggregate binary expression"); 383} 384 385void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 386 const BinaryOperator *E) { 387 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 388 EmitFinalDestCopy(E, LV); 389} 390 391void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 392 // For an assignment to work, the value on the right has 393 // to be compatible with the value on the left. 394 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 395 E->getRHS()->getType()) 396 && "Invalid assignment"); 397 398 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS())) 399 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) 400 if (VD->hasAttr<BlocksAttr>() && 401 E->getRHS()->HasSideEffects(CGF.getContext())) { 402 // When __block variable on LHS, the RHS must be evaluated first 403 // as it may change the 'forwarding' field via call to Block_copy. 404 LValue RHS = CGF.EmitLValue(E->getRHS()); 405 LValue LHS = CGF.EmitLValue(E->getLHS()); 406 bool GCollection = false; 407 if (CGF.getContext().getLangOptions().getGCMode()) 408 GCollection = TypeRequiresGCollection(E->getLHS()->getType()); 409 Dest = AggValueSlot::forLValue(LHS, true, GCollection); 410 EmitFinalDestCopy(E, RHS, true); 411 return; 412 } 413 414 LValue LHS = CGF.EmitLValue(E->getLHS()); 415 416 // We have to special case property setters, otherwise we must have 417 // a simple lvalue (no aggregates inside vectors, bitfields). 418 if (LHS.isPropertyRef()) { 419 const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr(); 420 QualType ArgType = RE->getSetterArgType(); 421 RValue Src; 422 if (ArgType->isReferenceType()) 423 Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0); 424 else { 425 AggValueSlot Slot = EnsureSlot(E->getRHS()->getType()); 426 CGF.EmitAggExpr(E->getRHS(), Slot); 427 Src = Slot.asRValue(); 428 } 429 CGF.EmitStoreThroughPropertyRefLValue(Src, LHS); 430 } else { 431 bool GCollection = false; 432 if (CGF.getContext().getLangOptions().getGCMode()) 433 GCollection = TypeRequiresGCollection(E->getLHS()->getType()); 434 435 // Codegen the RHS so that it stores directly into the LHS. 436 AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true, 437 GCollection); 438 CGF.EmitAggExpr(E->getRHS(), LHSSlot, false); 439 EmitFinalDestCopy(E, LHS, true); 440 } 441} 442 443void AggExprEmitter:: 444VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 445 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 446 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 447 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 448 449 // Bind the common expression if necessary. 450 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 451 452 CodeGenFunction::ConditionalEvaluation eval(CGF); 453 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 454 455 // Save whether the destination's lifetime is externally managed. 456 bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged(); 457 458 eval.begin(CGF); 459 CGF.EmitBlock(LHSBlock); 460 Visit(E->getTrueExpr()); 461 eval.end(CGF); 462 463 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 464 CGF.Builder.CreateBr(ContBlock); 465 466 // If the result of an agg expression is unused, then the emission 467 // of the LHS might need to create a destination slot. That's fine 468 // with us, and we can safely emit the RHS into the same slot, but 469 // we shouldn't claim that its lifetime is externally managed. 470 Dest.setLifetimeExternallyManaged(DestLifetimeManaged); 471 472 eval.begin(CGF); 473 CGF.EmitBlock(RHSBlock); 474 Visit(E->getFalseExpr()); 475 eval.end(CGF); 476 477 CGF.EmitBlock(ContBlock); 478} 479 480void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 481 Visit(CE->getChosenSubExpr(CGF.getContext())); 482} 483 484void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 485 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 486 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 487 488 if (!ArgPtr) { 489 CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); 490 return; 491 } 492 493 EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType())); 494} 495 496void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 497 // Ensure that we have a slot, but if we already do, remember 498 // whether its lifetime was externally managed. 499 bool WasManaged = Dest.isLifetimeExternallyManaged(); 500 Dest = EnsureSlot(E->getType()); 501 Dest.setLifetimeExternallyManaged(); 502 503 Visit(E->getSubExpr()); 504 505 // Set up the temporary's destructor if its lifetime wasn't already 506 // being managed. 507 if (!WasManaged) 508 CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr()); 509} 510 511void 512AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 513 AggValueSlot Slot = EnsureSlot(E->getType()); 514 CGF.EmitCXXConstructExpr(E, Slot); 515} 516 517void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 518 CGF.EmitExprWithCleanups(E, Dest); 519} 520 521void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 522 QualType T = E->getType(); 523 AggValueSlot Slot = EnsureSlot(T); 524 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 525} 526 527void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 528 QualType T = E->getType(); 529 AggValueSlot Slot = EnsureSlot(T); 530 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 531} 532 533/// isSimpleZero - If emitting this value will obviously just cause a store of 534/// zero to memory, return true. This can return false if uncertain, so it just 535/// handles simple cases. 536static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 537 E = E->IgnoreParens(); 538 539 // 0 540 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 541 return IL->getValue() == 0; 542 // +0.0 543 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 544 return FL->getValue().isPosZero(); 545 // int() 546 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 547 CGF.getTypes().isZeroInitializable(E->getType())) 548 return true; 549 // (int*)0 - Null pointer expressions. 550 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 551 return ICE->getCastKind() == CK_NullToPointer; 552 // '\0' 553 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 554 return CL->getValue() == 0; 555 556 // Otherwise, hard case: conservatively return false. 557 return false; 558} 559 560 561void 562AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { 563 QualType type = LV.getType(); 564 // FIXME: Ignore result? 565 // FIXME: Are initializers affected by volatile? 566 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 567 // Storing "i32 0" to a zero'd memory location is a noop. 568 } else if (isa<ImplicitValueInitExpr>(E)) { 569 EmitNullInitializationToLValue(LV); 570 } else if (type->isReferenceType()) { 571 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 572 CGF.EmitStoreThroughLValue(RV, LV, type); 573 } else if (type->isAnyComplexType()) { 574 CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); 575 } else if (CGF.hasAggregateLLVMType(type)) { 576 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false, 577 Dest.isZeroed())); 578 } else if (LV.isSimple()) { 579 CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); 580 } else { 581 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, type); 582 } 583} 584 585void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { 586 QualType type = lv.getType(); 587 588 // If the destination slot is already zeroed out before the aggregate is 589 // copied into it, we don't have to emit any zeros here. 590 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) 591 return; 592 593 if (!CGF.hasAggregateLLVMType(type)) { 594 // For non-aggregates, we can store zero 595 llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type)); 596 CGF.EmitStoreThroughLValue(RValue::get(null), lv, type); 597 } else { 598 // There's a potential optimization opportunity in combining 599 // memsets; that would be easy for arrays, but relatively 600 // difficult for structures with the current code. 601 CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); 602 } 603} 604 605void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 606#if 0 607 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 608 // (Length of globals? Chunks of zeroed-out space?). 609 // 610 // If we can, prefer a copy from a global; this is a lot less code for long 611 // globals, and it's easier for the current optimizers to analyze. 612 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { 613 llvm::GlobalVariable* GV = 614 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 615 llvm::GlobalValue::InternalLinkage, C, ""); 616 EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType())); 617 return; 618 } 619#endif 620 if (E->hadArrayRangeDesignator()) 621 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 622 623 llvm::Value *DestPtr = Dest.getAddr(); 624 625 // Handle initialization of an array. 626 if (E->getType()->isArrayType()) { 627 const llvm::PointerType *APType = 628 cast<llvm::PointerType>(DestPtr->getType()); 629 const llvm::ArrayType *AType = 630 cast<llvm::ArrayType>(APType->getElementType()); 631 632 uint64_t NumInitElements = E->getNumInits(); 633 634 if (E->getNumInits() > 0) { 635 QualType T1 = E->getType(); 636 QualType T2 = E->getInit(0)->getType(); 637 if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) { 638 EmitAggLoadOfLValue(E->getInit(0)); 639 return; 640 } 641 } 642 643 uint64_t NumArrayElements = AType->getNumElements(); 644 QualType ElementType = CGF.getContext().getCanonicalType(E->getType()); 645 ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType(); 646 ElementType = CGF.getContext().getQualifiedType(ElementType, 647 Dest.getQualifiers()); 648 649 bool hasNonTrivialCXXConstructor = false; 650 if (CGF.getContext().getLangOptions().CPlusPlus) 651 if (const RecordType *RT = CGF.getContext() 652 .getBaseElementType(ElementType)->getAs<RecordType>()) { 653 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 654 hasNonTrivialCXXConstructor = !RD->hasTrivialDefaultConstructor(); 655 } 656 657 for (uint64_t i = 0; i != NumArrayElements; ++i) { 658 // If we're done emitting initializers and the destination is known-zeroed 659 // then we're done. 660 if (i == NumInitElements && 661 Dest.isZeroed() && 662 CGF.getTypes().isZeroInitializable(ElementType) && 663 !hasNonTrivialCXXConstructor) 664 break; 665 666 llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array"); 667 LValue LV = CGF.MakeAddrLValue(NextVal, ElementType); 668 669 if (i < NumInitElements) 670 EmitInitializationToLValue(E->getInit(i), LV); 671 else if (Expr *filler = E->getArrayFiller()) 672 EmitInitializationToLValue(filler, LV); 673 else 674 EmitNullInitializationToLValue(LV); 675 676 // If the GEP didn't get used because of a dead zero init or something 677 // else, clean it up for -O0 builds and general tidiness. 678 if (llvm::GetElementPtrInst *GEP = 679 dyn_cast<llvm::GetElementPtrInst>(NextVal)) 680 if (GEP->use_empty()) 681 GEP->eraseFromParent(); 682 } 683 return; 684 } 685 686 assert(E->getType()->isRecordType() && "Only support structs/unions here!"); 687 688 // Do struct initialization; this code just sets each individual member 689 // to the approprate value. This makes bitfield support automatic; 690 // the disadvantage is that the generated code is more difficult for 691 // the optimizer, especially with bitfields. 692 unsigned NumInitElements = E->getNumInits(); 693 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 694 695 if (E->getType()->isUnionType()) { 696 // Only initialize one field of a union. The field itself is 697 // specified by the initializer list. 698 if (!E->getInitializedFieldInUnion()) { 699 // Empty union; we have nothing to do. 700 701#ifndef NDEBUG 702 // Make sure that it's really an empty and not a failure of 703 // semantic analysis. 704 for (RecordDecl::field_iterator Field = SD->field_begin(), 705 FieldEnd = SD->field_end(); 706 Field != FieldEnd; ++Field) 707 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 708#endif 709 return; 710 } 711 712 // FIXME: volatility 713 FieldDecl *Field = E->getInitializedFieldInUnion(); 714 715 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0); 716 if (NumInitElements) { 717 // Store the initializer into the field 718 EmitInitializationToLValue(E->getInit(0), FieldLoc); 719 } else { 720 // Default-initialize to null. 721 EmitNullInitializationToLValue(FieldLoc); 722 } 723 724 return; 725 } 726 727 // Here we iterate over the fields; this makes it simpler to both 728 // default-initialize fields and skip over unnamed fields. 729 unsigned CurInitVal = 0; 730 for (RecordDecl::field_iterator Field = SD->field_begin(), 731 FieldEnd = SD->field_end(); 732 Field != FieldEnd; ++Field) { 733 // We're done once we hit the flexible array member 734 if (Field->getType()->isIncompleteArrayType()) 735 break; 736 737 if (Field->isUnnamedBitfield()) 738 continue; 739 740 // Don't emit GEP before a noop store of zero. 741 if (CurInitVal == NumInitElements && Dest.isZeroed() && 742 CGF.getTypes().isZeroInitializable(E->getType())) 743 break; 744 745 // FIXME: volatility 746 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0); 747 // We never generate write-barries for initialized fields. 748 FieldLoc.setNonGC(true); 749 750 if (CurInitVal < NumInitElements) { 751 // Store the initializer into the field. 752 EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc); 753 } else { 754 // We're out of initalizers; default-initialize to null 755 EmitNullInitializationToLValue(FieldLoc); 756 } 757 758 // If the GEP didn't get used because of a dead zero init or something 759 // else, clean it up for -O0 builds and general tidiness. 760 if (FieldLoc.isSimple()) 761 if (llvm::GetElementPtrInst *GEP = 762 dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress())) 763 if (GEP->use_empty()) 764 GEP->eraseFromParent(); 765 } 766} 767 768//===----------------------------------------------------------------------===// 769// Entry Points into this File 770//===----------------------------------------------------------------------===// 771 772/// GetNumNonZeroBytesInInit - Get an approximate count of the number of 773/// non-zero bytes that will be stored when outputting the initializer for the 774/// specified initializer expression. 775static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 776 E = E->IgnoreParens(); 777 778 // 0 and 0.0 won't require any non-zero stores! 779 if (isSimpleZero(E, CGF)) return CharUnits::Zero(); 780 781 // If this is an initlist expr, sum up the size of sizes of the (present) 782 // elements. If this is something weird, assume the whole thing is non-zero. 783 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 784 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType())) 785 return CGF.getContext().getTypeSizeInChars(E->getType()); 786 787 // InitListExprs for structs have to be handled carefully. If there are 788 // reference members, we need to consider the size of the reference, not the 789 // referencee. InitListExprs for unions and arrays can't have references. 790 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 791 if (!RT->isUnionType()) { 792 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 793 CharUnits NumNonZeroBytes = CharUnits::Zero(); 794 795 unsigned ILEElement = 0; 796 for (RecordDecl::field_iterator Field = SD->field_begin(), 797 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) { 798 // We're done once we hit the flexible array member or run out of 799 // InitListExpr elements. 800 if (Field->getType()->isIncompleteArrayType() || 801 ILEElement == ILE->getNumInits()) 802 break; 803 if (Field->isUnnamedBitfield()) 804 continue; 805 806 const Expr *E = ILE->getInit(ILEElement++); 807 808 // Reference values are always non-null and have the width of a pointer. 809 if (Field->getType()->isReferenceType()) 810 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( 811 CGF.getContext().Target.getPointerWidth(0)); 812 else 813 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 814 } 815 816 return NumNonZeroBytes; 817 } 818 } 819 820 821 CharUnits NumNonZeroBytes = CharUnits::Zero(); 822 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 823 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 824 return NumNonZeroBytes; 825} 826 827/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 828/// zeros in it, emit a memset and avoid storing the individual zeros. 829/// 830static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 831 CodeGenFunction &CGF) { 832 // If the slot is already known to be zeroed, nothing to do. Don't mess with 833 // volatile stores. 834 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; 835 836 // C++ objects with a user-declared constructor don't need zero'ing. 837 if (CGF.getContext().getLangOptions().CPlusPlus) 838 if (const RecordType *RT = CGF.getContext() 839 .getBaseElementType(E->getType())->getAs<RecordType>()) { 840 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 841 if (RD->hasUserDeclaredConstructor()) 842 return; 843 } 844 845 // If the type is 16-bytes or smaller, prefer individual stores over memset. 846 std::pair<CharUnits, CharUnits> TypeInfo = 847 CGF.getContext().getTypeInfoInChars(E->getType()); 848 if (TypeInfo.first <= CharUnits::fromQuantity(16)) 849 return; 850 851 // Check to see if over 3/4 of the initializer are known to be zero. If so, 852 // we prefer to emit memset + individual stores for the rest. 853 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 854 if (NumNonZeroBytes*4 > TypeInfo.first) 855 return; 856 857 // Okay, it seems like a good idea to use an initial memset, emit the call. 858 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity()); 859 CharUnits Align = TypeInfo.second; 860 861 llvm::Value *Loc = Slot.getAddr(); 862 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 863 864 Loc = CGF.Builder.CreateBitCast(Loc, BP); 865 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, 866 Align.getQuantity(), false); 867 868 // Tell the AggExprEmitter that the slot is known zero. 869 Slot.setZeroed(); 870} 871 872 873 874 875/// EmitAggExpr - Emit the computation of the specified expression of aggregate 876/// type. The result is computed into DestPtr. Note that if DestPtr is null, 877/// the value of the aggregate expression is not needed. If VolatileDest is 878/// true, DestPtr cannot be 0. 879/// 880/// \param IsInitializer - true if this evaluation is initializing an 881/// object whose lifetime is already being managed. 882void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot, 883 bool IgnoreResult) { 884 assert(E && hasAggregateLLVMType(E->getType()) && 885 "Invalid aggregate expression to emit"); 886 assert((Slot.getAddr() != 0 || Slot.isIgnored()) && 887 "slot has bits but no address"); 888 889 // Optimize the slot if possible. 890 CheckAggExprForMemSetUse(Slot, E, *this); 891 892 AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E)); 893} 894 895LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 896 assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!"); 897 llvm::Value *Temp = CreateMemTemp(E->getType()); 898 LValue LV = MakeAddrLValue(Temp, E->getType()); 899 EmitAggExpr(E, AggValueSlot::forLValue(LV, false)); 900 return LV; 901} 902 903void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, 904 llvm::Value *SrcPtr, QualType Ty, 905 bool isVolatile) { 906 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 907 908 if (getContext().getLangOptions().CPlusPlus) { 909 if (const RecordType *RT = Ty->getAs<RecordType>()) { 910 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 911 assert((Record->hasTrivialCopyConstructor() || 912 Record->hasTrivialCopyAssignment()) && 913 "Trying to aggregate-copy a type without a trivial copy " 914 "constructor or assignment operator"); 915 // Ignore empty classes in C++. 916 if (Record->isEmpty()) 917 return; 918 } 919 } 920 921 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 922 // C99 6.5.16.1p3, which states "If the value being stored in an object is 923 // read from another object that overlaps in anyway the storage of the first 924 // object, then the overlap shall be exact and the two objects shall have 925 // qualified or unqualified versions of a compatible type." 926 // 927 // memcpy is not defined if the source and destination pointers are exactly 928 // equal, but other compilers do this optimization, and almost every memcpy 929 // implementation handles this case safely. If there is a libc that does not 930 // safely handle this, we can add a target hook. 931 932 // Get size and alignment info for this aggregate. 933 std::pair<CharUnits, CharUnits> TypeInfo = 934 getContext().getTypeInfoInChars(Ty); 935 936 // FIXME: Handle variable sized types. 937 938 // FIXME: If we have a volatile struct, the optimizer can remove what might 939 // appear to be `extra' memory ops: 940 // 941 // volatile struct { int i; } a, b; 942 // 943 // int main() { 944 // a = b; 945 // a = b; 946 // } 947 // 948 // we need to use a different call here. We use isVolatile to indicate when 949 // either the source or the destination is volatile. 950 951 const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 952 const llvm::Type *DBP = 953 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace()); 954 DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp"); 955 956 const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 957 const llvm::Type *SBP = 958 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace()); 959 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp"); 960 961 // Don't do any of the memmove_collectable tests if GC isn't set. 962 if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) { 963 // fall through 964 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 965 RecordDecl *Record = RecordTy->getDecl(); 966 if (Record->hasObjectMember()) { 967 CharUnits size = TypeInfo.first; 968 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 969 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 970 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 971 SizeVal); 972 return; 973 } 974 } else if (Ty->isArrayType()) { 975 QualType BaseType = getContext().getBaseElementType(Ty); 976 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 977 if (RecordTy->getDecl()->hasObjectMember()) { 978 CharUnits size = TypeInfo.first; 979 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 980 llvm::Value *SizeVal = 981 llvm::ConstantInt::get(SizeTy, size.getQuantity()); 982 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 983 SizeVal); 984 return; 985 } 986 } 987 } 988 989 Builder.CreateMemCpy(DestPtr, SrcPtr, 990 llvm::ConstantInt::get(IntPtrTy, 991 TypeInfo.first.getQuantity()), 992 TypeInfo.second.getQuantity(), isVolatile); 993} 994