CGExprAgg.cpp revision e996ffd240f20a1048179d7727a6ee3227261921
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Aggregate Expr nodes as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "CGObjCRuntime.h" 17#include "clang/AST/ASTContext.h" 18#include "clang/AST/DeclCXX.h" 19#include "clang/AST/StmtVisitor.h" 20#include "llvm/Constants.h" 21#include "llvm/Function.h" 22#include "llvm/GlobalVariable.h" 23#include "llvm/Intrinsics.h" 24using namespace clang; 25using namespace CodeGen; 26 27//===----------------------------------------------------------------------===// 28// Aggregate Expression Emitter 29//===----------------------------------------------------------------------===// 30 31namespace { 32class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 33 CodeGenFunction &CGF; 34 CGBuilderTy &Builder; 35 AggValueSlot Dest; 36 bool IgnoreResult; 37 38 ReturnValueSlot getReturnValueSlot() const { 39 // If the destination slot requires garbage collection, we can't 40 // use the real return value slot, because we have to use the GC 41 // API. 42 if (Dest.requiresGCollection()) return ReturnValueSlot(); 43 44 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile()); 45 } 46 47 AggValueSlot EnsureSlot(QualType T) { 48 if (!Dest.isIgnored()) return Dest; 49 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 50 } 51 52public: 53 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, 54 bool ignore) 55 : CGF(cgf), Builder(CGF.Builder), Dest(Dest), 56 IgnoreResult(ignore) { 57 } 58 59 //===--------------------------------------------------------------------===// 60 // Utilities 61 //===--------------------------------------------------------------------===// 62 63 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 64 /// represents a value lvalue, this method emits the address of the lvalue, 65 /// then loads the result into DestPtr. 66 void EmitAggLoadOfLValue(const Expr *E); 67 68 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 69 void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false); 70 void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false); 71 72 void EmitGCMove(const Expr *E, RValue Src); 73 74 bool TypeRequiresGCollection(QualType T); 75 76 //===--------------------------------------------------------------------===// 77 // Visitor Methods 78 //===--------------------------------------------------------------------===// 79 80 void VisitStmt(Stmt *S) { 81 CGF.ErrorUnsupported(S, "aggregate expression"); 82 } 83 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 84 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 85 86 // l-values. 87 void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); } 88 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 89 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 90 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 91 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 92 EmitAggLoadOfLValue(E); 93 } 94 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 95 EmitAggLoadOfLValue(E); 96 } 97 void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { 98 EmitAggLoadOfLValue(E); 99 } 100 void VisitPredefinedExpr(const PredefinedExpr *E) { 101 EmitAggLoadOfLValue(E); 102 } 103 104 // Operators. 105 void VisitCastExpr(CastExpr *E); 106 void VisitCallExpr(const CallExpr *E); 107 void VisitStmtExpr(const StmtExpr *E); 108 void VisitBinaryOperator(const BinaryOperator *BO); 109 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 110 void VisitBinAssign(const BinaryOperator *E); 111 void VisitBinComma(const BinaryOperator *E); 112 113 void VisitObjCMessageExpr(ObjCMessageExpr *E); 114 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 115 EmitAggLoadOfLValue(E); 116 } 117 void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E); 118 119 void VisitConditionalOperator(const ConditionalOperator *CO); 120 void VisitChooseExpr(const ChooseExpr *CE); 121 void VisitInitListExpr(InitListExpr *E); 122 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 123 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 124 Visit(DAE->getExpr()); 125 } 126 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 127 void VisitCXXConstructExpr(const CXXConstructExpr *E); 128 void VisitExprWithCleanups(ExprWithCleanups *E); 129 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 130 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 131 132 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 133 134 void VisitVAArgExpr(VAArgExpr *E); 135 136 void EmitInitializationToLValue(Expr *E, LValue Address, QualType T); 137 void EmitNullInitializationToLValue(LValue Address, QualType T); 138 // case Expr::ChooseExprClass: 139 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 140}; 141} // end anonymous namespace. 142 143//===----------------------------------------------------------------------===// 144// Utilities 145//===----------------------------------------------------------------------===// 146 147/// EmitAggLoadOfLValue - Given an expression with aggregate type that 148/// represents a value lvalue, this method emits the address of the lvalue, 149/// then loads the result into DestPtr. 150void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 151 LValue LV = CGF.EmitLValue(E); 152 EmitFinalDestCopy(E, LV); 153} 154 155/// \brief True if the given aggregate type requires special GC API calls. 156bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 157 // Only record types have members that might require garbage collection. 158 const RecordType *RecordTy = T->getAs<RecordType>(); 159 if (!RecordTy) return false; 160 161 // Don't mess with non-trivial C++ types. 162 RecordDecl *Record = RecordTy->getDecl(); 163 if (isa<CXXRecordDecl>(Record) && 164 (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() || 165 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 166 return false; 167 168 // Check whether the type has an object member. 169 return Record->hasObjectMember(); 170} 171 172/// \brief Perform the final move to DestPtr if RequiresGCollection is set. 173/// 174/// The idea is that you do something like this: 175/// RValue Result = EmitSomething(..., getReturnValueSlot()); 176/// EmitGCMove(E, Result); 177/// If GC doesn't interfere, this will cause the result to be emitted 178/// directly into the return value slot. If GC does interfere, a final 179/// move will be performed. 180void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) { 181 if (Dest.requiresGCollection()) { 182 std::pair<uint64_t, unsigned> TypeInfo = 183 CGF.getContext().getTypeInfo(E->getType()); 184 unsigned long size = TypeInfo.first/8; 185 const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); 186 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); 187 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(), 188 Src.getAggregateAddr(), 189 SizeVal); 190 } 191} 192 193/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 194void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) { 195 assert(Src.isAggregate() && "value must be aggregate value!"); 196 197 // If Dest is ignored, then we're evaluating an aggregate expression 198 // in a context (like an expression statement) that doesn't care 199 // about the result. C says that an lvalue-to-rvalue conversion is 200 // performed in these cases; C++ says that it is not. In either 201 // case, we don't actually need to do anything unless the value is 202 // volatile. 203 if (Dest.isIgnored()) { 204 if (!Src.isVolatileQualified() || 205 CGF.CGM.getLangOptions().CPlusPlus || 206 (IgnoreResult && Ignore)) 207 return; 208 209 // If the source is volatile, we must read from it; to do that, we need 210 // some place to put it. 211 Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp"); 212 } 213 214 if (Dest.requiresGCollection()) { 215 std::pair<uint64_t, unsigned> TypeInfo = 216 CGF.getContext().getTypeInfo(E->getType()); 217 unsigned long size = TypeInfo.first/8; 218 const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); 219 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); 220 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 221 Dest.getAddr(), 222 Src.getAggregateAddr(), 223 SizeVal); 224 return; 225 } 226 // If the result of the assignment is used, copy the LHS there also. 227 // FIXME: Pass VolatileDest as well. I think we also need to merge volatile 228 // from the source as well, as we can't eliminate it if either operand 229 // is volatile, unless copy has volatile for both source and destination.. 230 CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(), 231 Dest.isVolatile()|Src.isVolatileQualified()); 232} 233 234/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 235void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { 236 assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); 237 238 EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(), 239 Src.isVolatileQualified()), 240 Ignore); 241} 242 243//===----------------------------------------------------------------------===// 244// Visitor Methods 245//===----------------------------------------------------------------------===// 246 247void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 248 EmitFinalDestCopy(e, CGF.EmitOpaqueValueLValue(e)); 249} 250 251void AggExprEmitter::VisitCastExpr(CastExpr *E) { 252 if (Dest.isIgnored() && E->getCastKind() != CK_Dynamic) { 253 Visit(E->getSubExpr()); 254 return; 255 } 256 257 switch (E->getCastKind()) { 258 case CK_Dynamic: { 259 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 260 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr()); 261 // FIXME: Do we also need to handle property references here? 262 if (LV.isSimple()) 263 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); 264 else 265 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 266 267 if (!Dest.isIgnored()) 268 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 269 break; 270 } 271 272 case CK_ToUnion: { 273 // GCC union extension 274 QualType Ty = E->getSubExpr()->getType(); 275 QualType PtrTy = CGF.getContext().getPointerType(Ty); 276 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), 277 CGF.ConvertType(PtrTy)); 278 EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty), 279 Ty); 280 break; 281 } 282 283 case CK_DerivedToBase: 284 case CK_BaseToDerived: 285 case CK_UncheckedDerivedToBase: { 286 assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: " 287 "should have been unpacked before we got here"); 288 break; 289 } 290 291 case CK_GetObjCProperty: { 292 LValue LV = CGF.EmitLValue(E->getSubExpr()); 293 assert(LV.isPropertyRef()); 294 RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot()); 295 EmitGCMove(E, RV); 296 break; 297 } 298 299 case CK_LValueToRValue: // hope for downstream optimization 300 case CK_NoOp: 301 case CK_UserDefinedConversion: 302 case CK_ConstructorConversion: 303 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 304 E->getType()) && 305 "Implicit cast types must be compatible"); 306 Visit(E->getSubExpr()); 307 break; 308 309 case CK_LValueBitCast: 310 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 311 break; 312 313 case CK_Dependent: 314 case CK_BitCast: 315 case CK_ArrayToPointerDecay: 316 case CK_FunctionToPointerDecay: 317 case CK_NullToPointer: 318 case CK_NullToMemberPointer: 319 case CK_BaseToDerivedMemberPointer: 320 case CK_DerivedToBaseMemberPointer: 321 case CK_MemberPointerToBoolean: 322 case CK_IntegralToPointer: 323 case CK_PointerToIntegral: 324 case CK_PointerToBoolean: 325 case CK_ToVoid: 326 case CK_VectorSplat: 327 case CK_IntegralCast: 328 case CK_IntegralToBoolean: 329 case CK_IntegralToFloating: 330 case CK_FloatingToIntegral: 331 case CK_FloatingToBoolean: 332 case CK_FloatingCast: 333 case CK_AnyPointerToObjCPointerCast: 334 case CK_AnyPointerToBlockPointerCast: 335 case CK_ObjCObjectLValueCast: 336 case CK_FloatingRealToComplex: 337 case CK_FloatingComplexToReal: 338 case CK_FloatingComplexToBoolean: 339 case CK_FloatingComplexCast: 340 case CK_FloatingComplexToIntegralComplex: 341 case CK_IntegralRealToComplex: 342 case CK_IntegralComplexToReal: 343 case CK_IntegralComplexToBoolean: 344 case CK_IntegralComplexCast: 345 case CK_IntegralComplexToFloatingComplex: 346 llvm_unreachable("cast kind invalid for aggregate types"); 347 } 348} 349 350void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 351 if (E->getCallReturnType()->isReferenceType()) { 352 EmitAggLoadOfLValue(E); 353 return; 354 } 355 356 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot()); 357 EmitGCMove(E, RV); 358} 359 360void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 361 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot()); 362 EmitGCMove(E, RV); 363} 364 365void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { 366 llvm_unreachable("direct property access not surrounded by " 367 "lvalue-to-rvalue cast"); 368} 369 370void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 371 CGF.EmitIgnoredExpr(E->getLHS()); 372 Visit(E->getRHS()); 373} 374 375void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 376 CodeGenFunction::StmtExprEvaluation eval(CGF); 377 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 378} 379 380void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 381 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 382 VisitPointerToDataMemberBinaryOperator(E); 383 else 384 CGF.ErrorUnsupported(E, "aggregate binary expression"); 385} 386 387void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 388 const BinaryOperator *E) { 389 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 390 EmitFinalDestCopy(E, LV); 391} 392 393void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 394 // For an assignment to work, the value on the right has 395 // to be compatible with the value on the left. 396 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 397 E->getRHS()->getType()) 398 && "Invalid assignment"); 399 400 // FIXME: __block variables need the RHS evaluated first! 401 LValue LHS = CGF.EmitLValue(E->getLHS()); 402 403 // We have to special case property setters, otherwise we must have 404 // a simple lvalue (no aggregates inside vectors, bitfields). 405 if (LHS.isPropertyRef()) { 406 AggValueSlot Slot = EnsureSlot(E->getRHS()->getType()); 407 CGF.EmitAggExpr(E->getRHS(), Slot); 408 CGF.EmitStoreThroughPropertyRefLValue(Slot.asRValue(), LHS); 409 } else { 410 bool GCollection = false; 411 if (CGF.getContext().getLangOptions().getGCMode()) 412 GCollection = TypeRequiresGCollection(E->getLHS()->getType()); 413 414 // Codegen the RHS so that it stores directly into the LHS. 415 AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true, 416 GCollection); 417 CGF.EmitAggExpr(E->getRHS(), LHSSlot, false); 418 EmitFinalDestCopy(E, LHS, true); 419 } 420} 421 422void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) { 423 if (!E->getLHS()) { 424 CGF.ErrorUnsupported(E, "conditional operator with missing LHS"); 425 return; 426 } 427 428 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 429 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 430 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 431 432 CodeGenFunction::ConditionalEvaluation eval(CGF); 433 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 434 435 // Save whether the destination's lifetime is externally managed. 436 bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged(); 437 438 eval.begin(CGF); 439 CGF.EmitBlock(LHSBlock); 440 Visit(E->getLHS()); 441 eval.end(CGF); 442 443 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 444 CGF.Builder.CreateBr(ContBlock); 445 446 // If the result of an agg expression is unused, then the emission 447 // of the LHS might need to create a destination slot. That's fine 448 // with us, and we can safely emit the RHS into the same slot, but 449 // we shouldn't claim that its lifetime is externally managed. 450 Dest.setLifetimeExternallyManaged(DestLifetimeManaged); 451 452 eval.begin(CGF); 453 CGF.EmitBlock(RHSBlock); 454 Visit(E->getRHS()); 455 eval.end(CGF); 456 457 CGF.EmitBlock(ContBlock); 458} 459 460void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 461 Visit(CE->getChosenSubExpr(CGF.getContext())); 462} 463 464void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 465 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 466 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 467 468 if (!ArgPtr) { 469 CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); 470 return; 471 } 472 473 EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType())); 474} 475 476void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 477 // Ensure that we have a slot, but if we already do, remember 478 // whether its lifetime was externally managed. 479 bool WasManaged = Dest.isLifetimeExternallyManaged(); 480 Dest = EnsureSlot(E->getType()); 481 Dest.setLifetimeExternallyManaged(); 482 483 Visit(E->getSubExpr()); 484 485 // Set up the temporary's destructor if its lifetime wasn't already 486 // being managed. 487 if (!WasManaged) 488 CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr()); 489} 490 491void 492AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 493 AggValueSlot Slot = EnsureSlot(E->getType()); 494 CGF.EmitCXXConstructExpr(E, Slot); 495} 496 497void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 498 CGF.EmitExprWithCleanups(E, Dest); 499} 500 501void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 502 QualType T = E->getType(); 503 AggValueSlot Slot = EnsureSlot(T); 504 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T); 505} 506 507void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 508 QualType T = E->getType(); 509 AggValueSlot Slot = EnsureSlot(T); 510 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T); 511} 512 513/// isSimpleZero - If emitting this value will obviously just cause a store of 514/// zero to memory, return true. This can return false if uncertain, so it just 515/// handles simple cases. 516static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 517 // (0) 518 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 519 return isSimpleZero(PE->getSubExpr(), CGF); 520 // 0 521 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 522 return IL->getValue() == 0; 523 // +0.0 524 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 525 return FL->getValue().isPosZero(); 526 // int() 527 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 528 CGF.getTypes().isZeroInitializable(E->getType())) 529 return true; 530 // (int*)0 - Null pointer expressions. 531 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 532 return ICE->getCastKind() == CK_NullToPointer; 533 // '\0' 534 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 535 return CL->getValue() == 0; 536 537 // Otherwise, hard case: conservatively return false. 538 return false; 539} 540 541 542void 543AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) { 544 // FIXME: Ignore result? 545 // FIXME: Are initializers affected by volatile? 546 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 547 // Storing "i32 0" to a zero'd memory location is a noop. 548 } else if (isa<ImplicitValueInitExpr>(E)) { 549 EmitNullInitializationToLValue(LV, T); 550 } else if (T->isReferenceType()) { 551 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 552 CGF.EmitStoreThroughLValue(RV, LV, T); 553 } else if (T->isAnyComplexType()) { 554 CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); 555 } else if (CGF.hasAggregateLLVMType(T)) { 556 CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true, 557 false, Dest.isZeroed())); 558 } else { 559 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T); 560 } 561} 562 563void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) { 564 // If the destination slot is already zeroed out before the aggregate is 565 // copied into it, we don't have to emit any zeros here. 566 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T)) 567 return; 568 569 if (!CGF.hasAggregateLLVMType(T)) { 570 // For non-aggregates, we can store zero 571 llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T)); 572 CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T); 573 } else { 574 // There's a potential optimization opportunity in combining 575 // memsets; that would be easy for arrays, but relatively 576 // difficult for structures with the current code. 577 CGF.EmitNullInitialization(LV.getAddress(), T); 578 } 579} 580 581void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 582#if 0 583 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 584 // (Length of globals? Chunks of zeroed-out space?). 585 // 586 // If we can, prefer a copy from a global; this is a lot less code for long 587 // globals, and it's easier for the current optimizers to analyze. 588 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { 589 llvm::GlobalVariable* GV = 590 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 591 llvm::GlobalValue::InternalLinkage, C, ""); 592 EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType())); 593 return; 594 } 595#endif 596 if (E->hadArrayRangeDesignator()) 597 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 598 599 llvm::Value *DestPtr = Dest.getAddr(); 600 601 // Handle initialization of an array. 602 if (E->getType()->isArrayType()) { 603 const llvm::PointerType *APType = 604 cast<llvm::PointerType>(DestPtr->getType()); 605 const llvm::ArrayType *AType = 606 cast<llvm::ArrayType>(APType->getElementType()); 607 608 uint64_t NumInitElements = E->getNumInits(); 609 610 if (E->getNumInits() > 0) { 611 QualType T1 = E->getType(); 612 QualType T2 = E->getInit(0)->getType(); 613 if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) { 614 EmitAggLoadOfLValue(E->getInit(0)); 615 return; 616 } 617 } 618 619 uint64_t NumArrayElements = AType->getNumElements(); 620 QualType ElementType = CGF.getContext().getCanonicalType(E->getType()); 621 ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType(); 622 623 // FIXME: were we intentionally ignoring address spaces and GC attributes? 624 625 for (uint64_t i = 0; i != NumArrayElements; ++i) { 626 // If we're done emitting initializers and the destination is known-zeroed 627 // then we're done. 628 if (i == NumInitElements && 629 Dest.isZeroed() && 630 CGF.getTypes().isZeroInitializable(ElementType)) 631 break; 632 633 llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array"); 634 LValue LV = CGF.MakeAddrLValue(NextVal, ElementType); 635 636 if (i < NumInitElements) 637 EmitInitializationToLValue(E->getInit(i), LV, ElementType); 638 else 639 EmitNullInitializationToLValue(LV, ElementType); 640 641 // If the GEP didn't get used because of a dead zero init or something 642 // else, clean it up for -O0 builds and general tidiness. 643 if (llvm::GetElementPtrInst *GEP = 644 dyn_cast<llvm::GetElementPtrInst>(NextVal)) 645 if (GEP->use_empty()) 646 GEP->eraseFromParent(); 647 } 648 return; 649 } 650 651 assert(E->getType()->isRecordType() && "Only support structs/unions here!"); 652 653 // Do struct initialization; this code just sets each individual member 654 // to the approprate value. This makes bitfield support automatic; 655 // the disadvantage is that the generated code is more difficult for 656 // the optimizer, especially with bitfields. 657 unsigned NumInitElements = E->getNumInits(); 658 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 659 660 if (E->getType()->isUnionType()) { 661 // Only initialize one field of a union. The field itself is 662 // specified by the initializer list. 663 if (!E->getInitializedFieldInUnion()) { 664 // Empty union; we have nothing to do. 665 666#ifndef NDEBUG 667 // Make sure that it's really an empty and not a failure of 668 // semantic analysis. 669 for (RecordDecl::field_iterator Field = SD->field_begin(), 670 FieldEnd = SD->field_end(); 671 Field != FieldEnd; ++Field) 672 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 673#endif 674 return; 675 } 676 677 // FIXME: volatility 678 FieldDecl *Field = E->getInitializedFieldInUnion(); 679 680 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0); 681 if (NumInitElements) { 682 // Store the initializer into the field 683 EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType()); 684 } else { 685 // Default-initialize to null. 686 EmitNullInitializationToLValue(FieldLoc, Field->getType()); 687 } 688 689 return; 690 } 691 692 // Here we iterate over the fields; this makes it simpler to both 693 // default-initialize fields and skip over unnamed fields. 694 unsigned CurInitVal = 0; 695 for (RecordDecl::field_iterator Field = SD->field_begin(), 696 FieldEnd = SD->field_end(); 697 Field != FieldEnd; ++Field) { 698 // We're done once we hit the flexible array member 699 if (Field->getType()->isIncompleteArrayType()) 700 break; 701 702 if (Field->isUnnamedBitfield()) 703 continue; 704 705 // Don't emit GEP before a noop store of zero. 706 if (CurInitVal == NumInitElements && Dest.isZeroed() && 707 CGF.getTypes().isZeroInitializable(E->getType())) 708 break; 709 710 // FIXME: volatility 711 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0); 712 // We never generate write-barries for initialized fields. 713 FieldLoc.setNonGC(true); 714 715 if (CurInitVal < NumInitElements) { 716 // Store the initializer into the field. 717 EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc, 718 Field->getType()); 719 } else { 720 // We're out of initalizers; default-initialize to null 721 EmitNullInitializationToLValue(FieldLoc, Field->getType()); 722 } 723 724 // If the GEP didn't get used because of a dead zero init or something 725 // else, clean it up for -O0 builds and general tidiness. 726 if (FieldLoc.isSimple()) 727 if (llvm::GetElementPtrInst *GEP = 728 dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress())) 729 if (GEP->use_empty()) 730 GEP->eraseFromParent(); 731 } 732} 733 734//===----------------------------------------------------------------------===// 735// Entry Points into this File 736//===----------------------------------------------------------------------===// 737 738/// GetNumNonZeroBytesInInit - Get an approximate count of the number of 739/// non-zero bytes that will be stored when outputting the initializer for the 740/// specified initializer expression. 741static uint64_t GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 742 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 743 return GetNumNonZeroBytesInInit(PE->getSubExpr(), CGF); 744 745 // 0 and 0.0 won't require any non-zero stores! 746 if (isSimpleZero(E, CGF)) return 0; 747 748 // If this is an initlist expr, sum up the size of sizes of the (present) 749 // elements. If this is something weird, assume the whole thing is non-zero. 750 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 751 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType())) 752 return CGF.getContext().getTypeSize(E->getType())/8; 753 754 // InitListExprs for structs have to be handled carefully. If there are 755 // reference members, we need to consider the size of the reference, not the 756 // referencee. InitListExprs for unions and arrays can't have references. 757 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 758 if (!RT->isUnionType()) { 759 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 760 uint64_t NumNonZeroBytes = 0; 761 762 unsigned ILEElement = 0; 763 for (RecordDecl::field_iterator Field = SD->field_begin(), 764 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) { 765 // We're done once we hit the flexible array member or run out of 766 // InitListExpr elements. 767 if (Field->getType()->isIncompleteArrayType() || 768 ILEElement == ILE->getNumInits()) 769 break; 770 if (Field->isUnnamedBitfield()) 771 continue; 772 773 const Expr *E = ILE->getInit(ILEElement++); 774 775 // Reference values are always non-null and have the width of a pointer. 776 if (Field->getType()->isReferenceType()) 777 NumNonZeroBytes += CGF.getContext().Target.getPointerWidth(0); 778 else 779 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 780 } 781 782 return NumNonZeroBytes; 783 } 784 } 785 786 787 uint64_t NumNonZeroBytes = 0; 788 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 789 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 790 return NumNonZeroBytes; 791} 792 793/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 794/// zeros in it, emit a memset and avoid storing the individual zeros. 795/// 796static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 797 CodeGenFunction &CGF) { 798 // If the slot is already known to be zeroed, nothing to do. Don't mess with 799 // volatile stores. 800 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; 801 802 // If the type is 16-bytes or smaller, prefer individual stores over memset. 803 std::pair<uint64_t, unsigned> TypeInfo = 804 CGF.getContext().getTypeInfo(E->getType()); 805 if (TypeInfo.first/8 <= 16) 806 return; 807 808 // Check to see if over 3/4 of the initializer are known to be zero. If so, 809 // we prefer to emit memset + individual stores for the rest. 810 uint64_t NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 811 if (NumNonZeroBytes*4 > TypeInfo.first/8) 812 return; 813 814 // Okay, it seems like a good idea to use an initial memset, emit the call. 815 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first/8); 816 unsigned Align = TypeInfo.second/8; 817 818 llvm::Value *Loc = Slot.getAddr(); 819 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 820 821 Loc = CGF.Builder.CreateBitCast(Loc, BP); 822 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, Align, false); 823 824 // Tell the AggExprEmitter that the slot is known zero. 825 Slot.setZeroed(); 826} 827 828 829 830 831/// EmitAggExpr - Emit the computation of the specified expression of aggregate 832/// type. The result is computed into DestPtr. Note that if DestPtr is null, 833/// the value of the aggregate expression is not needed. If VolatileDest is 834/// true, DestPtr cannot be 0. 835/// 836/// \param IsInitializer - true if this evaluation is initializing an 837/// object whose lifetime is already being managed. 838// 839// FIXME: Take Qualifiers object. 840void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot, 841 bool IgnoreResult) { 842 assert(E && hasAggregateLLVMType(E->getType()) && 843 "Invalid aggregate expression to emit"); 844 assert((Slot.getAddr() != 0 || Slot.isIgnored()) && 845 "slot has bits but no address"); 846 847 // Optimize the slot if possible. 848 CheckAggExprForMemSetUse(Slot, E, *this); 849 850 AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E)); 851} 852 853LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 854 assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!"); 855 llvm::Value *Temp = CreateMemTemp(E->getType()); 856 LValue LV = MakeAddrLValue(Temp, E->getType()); 857 EmitAggExpr(E, AggValueSlot::forAddr(Temp, LV.isVolatileQualified(), false)); 858 return LV; 859} 860 861void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, 862 llvm::Value *SrcPtr, QualType Ty, 863 bool isVolatile) { 864 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 865 866 if (getContext().getLangOptions().CPlusPlus) { 867 if (const RecordType *RT = Ty->getAs<RecordType>()) { 868 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 869 assert((Record->hasTrivialCopyConstructor() || 870 Record->hasTrivialCopyAssignment()) && 871 "Trying to aggregate-copy a type without a trivial copy " 872 "constructor or assignment operator"); 873 // Ignore empty classes in C++. 874 if (Record->isEmpty()) 875 return; 876 } 877 } 878 879 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 880 // C99 6.5.16.1p3, which states "If the value being stored in an object is 881 // read from another object that overlaps in anyway the storage of the first 882 // object, then the overlap shall be exact and the two objects shall have 883 // qualified or unqualified versions of a compatible type." 884 // 885 // memcpy is not defined if the source and destination pointers are exactly 886 // equal, but other compilers do this optimization, and almost every memcpy 887 // implementation handles this case safely. If there is a libc that does not 888 // safely handle this, we can add a target hook. 889 890 // Get size and alignment info for this aggregate. 891 std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); 892 893 // FIXME: Handle variable sized types. 894 895 // FIXME: If we have a volatile struct, the optimizer can remove what might 896 // appear to be `extra' memory ops: 897 // 898 // volatile struct { int i; } a, b; 899 // 900 // int main() { 901 // a = b; 902 // a = b; 903 // } 904 // 905 // we need to use a different call here. We use isVolatile to indicate when 906 // either the source or the destination is volatile. 907 908 const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 909 const llvm::Type *DBP = 910 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace()); 911 DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp"); 912 913 const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 914 const llvm::Type *SBP = 915 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace()); 916 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp"); 917 918 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 919 RecordDecl *Record = RecordTy->getDecl(); 920 if (Record->hasObjectMember()) { 921 unsigned long size = TypeInfo.first/8; 922 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 923 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); 924 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 925 SizeVal); 926 return; 927 } 928 } else if (getContext().getAsArrayType(Ty)) { 929 QualType BaseType = getContext().getBaseElementType(Ty); 930 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 931 if (RecordTy->getDecl()->hasObjectMember()) { 932 unsigned long size = TypeInfo.first/8; 933 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 934 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); 935 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 936 SizeVal); 937 return; 938 } 939 } 940 } 941 942 Builder.CreateMemCpy(DestPtr, SrcPtr, 943 llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), 944 TypeInfo.second/8, isVolatile); 945} 946