CodeGenFunction.h revision 57b3b6a60856eaec30fd876a8a3face8f7e3ad7b
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This is the internal per-function state used for llvm translation. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H 15#define CLANG_CODEGEN_CODEGENFUNCTION_H 16 17#include "clang/AST/Type.h" 18#include "clang/AST/ExprCXX.h" 19#include "clang/AST/ExprObjC.h" 20#include "clang/AST/CharUnits.h" 21#include "clang/Basic/ABI.h" 22#include "clang/Basic/TargetInfo.h" 23#include "llvm/ADT/DenseMap.h" 24#include "llvm/ADT/SmallVector.h" 25#include "llvm/Support/ValueHandle.h" 26#include "CodeGenModule.h" 27#include "CGBuilder.h" 28#include "CGCall.h" 29#include "CGValue.h" 30 31namespace llvm { 32 class BasicBlock; 33 class LLVMContext; 34 class MDNode; 35 class Module; 36 class SwitchInst; 37 class Twine; 38 class Value; 39 class CallSite; 40} 41 42namespace clang { 43 class APValue; 44 class ASTContext; 45 class CXXDestructorDecl; 46 class CXXTryStmt; 47 class Decl; 48 class LabelDecl; 49 class EnumConstantDecl; 50 class FunctionDecl; 51 class FunctionProtoType; 52 class LabelStmt; 53 class ObjCContainerDecl; 54 class ObjCInterfaceDecl; 55 class ObjCIvarDecl; 56 class ObjCMethodDecl; 57 class ObjCImplementationDecl; 58 class ObjCPropertyImplDecl; 59 class TargetInfo; 60 class TargetCodeGenInfo; 61 class VarDecl; 62 class ObjCForCollectionStmt; 63 class ObjCAtTryStmt; 64 class ObjCAtThrowStmt; 65 class ObjCAtSynchronizedStmt; 66 67namespace CodeGen { 68 class CodeGenTypes; 69 class CGDebugInfo; 70 class CGFunctionInfo; 71 class CGRecordLayout; 72 class CGBlockInfo; 73 class CGCXXABI; 74 class BlockFlags; 75 class BlockFieldFlags; 76 77/// A branch fixup. These are required when emitting a goto to a 78/// label which hasn't been emitted yet. The goto is optimistically 79/// emitted as a branch to the basic block for the label, and (if it 80/// occurs in a scope with non-trivial cleanups) a fixup is added to 81/// the innermost cleanup. When a (normal) cleanup is popped, any 82/// unresolved fixups in that scope are threaded through the cleanup. 83struct BranchFixup { 84 /// The block containing the terminator which needs to be modified 85 /// into a switch if this fixup is resolved into the current scope. 86 /// If null, LatestBranch points directly to the destination. 87 llvm::BasicBlock *OptimisticBranchBlock; 88 89 /// The ultimate destination of the branch. 90 /// 91 /// This can be set to null to indicate that this fixup was 92 /// successfully resolved. 93 llvm::BasicBlock *Destination; 94 95 /// The destination index value. 96 unsigned DestinationIndex; 97 98 /// The initial branch of the fixup. 99 llvm::BranchInst *InitialBranch; 100}; 101 102template <class T> struct InvariantValue { 103 typedef T type; 104 typedef T saved_type; 105 static bool needsSaving(type value) { return false; } 106 static saved_type save(CodeGenFunction &CGF, type value) { return value; } 107 static type restore(CodeGenFunction &CGF, saved_type value) { return value; } 108}; 109 110/// A metaprogramming class for ensuring that a value will dominate an 111/// arbitrary position in a function. 112template <class T> struct DominatingValue : InvariantValue<T> {}; 113 114template <class T, bool mightBeInstruction = 115 llvm::is_base_of<llvm::Value, T>::value && 116 !llvm::is_base_of<llvm::Constant, T>::value && 117 !llvm::is_base_of<llvm::BasicBlock, T>::value> 118struct DominatingPointer; 119template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {}; 120// template <class T> struct DominatingPointer<T,true> at end of file 121 122template <class T> struct DominatingValue<T*> : DominatingPointer<T> {}; 123 124enum CleanupKind { 125 EHCleanup = 0x1, 126 NormalCleanup = 0x2, 127 NormalAndEHCleanup = EHCleanup | NormalCleanup, 128 129 InactiveCleanup = 0x4, 130 InactiveEHCleanup = EHCleanup | InactiveCleanup, 131 InactiveNormalCleanup = NormalCleanup | InactiveCleanup, 132 InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup 133}; 134 135/// A stack of scopes which respond to exceptions, including cleanups 136/// and catch blocks. 137class EHScopeStack { 138public: 139 /// A saved depth on the scope stack. This is necessary because 140 /// pushing scopes onto the stack invalidates iterators. 141 class stable_iterator { 142 friend class EHScopeStack; 143 144 /// Offset from StartOfData to EndOfBuffer. 145 ptrdiff_t Size; 146 147 stable_iterator(ptrdiff_t Size) : Size(Size) {} 148 149 public: 150 static stable_iterator invalid() { return stable_iterator(-1); } 151 stable_iterator() : Size(-1) {} 152 153 bool isValid() const { return Size >= 0; } 154 155 /// Returns true if this scope encloses I. 156 /// Returns false if I is invalid. 157 /// This scope must be valid. 158 bool encloses(stable_iterator I) const { return Size <= I.Size; } 159 160 /// Returns true if this scope strictly encloses I: that is, 161 /// if it encloses I and is not I. 162 /// Returns false is I is invalid. 163 /// This scope must be valid. 164 bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; } 165 166 friend bool operator==(stable_iterator A, stable_iterator B) { 167 return A.Size == B.Size; 168 } 169 friend bool operator!=(stable_iterator A, stable_iterator B) { 170 return A.Size != B.Size; 171 } 172 }; 173 174 /// Information for lazily generating a cleanup. Subclasses must be 175 /// POD-like: cleanups will not be destructed, and they will be 176 /// allocated on the cleanup stack and freely copied and moved 177 /// around. 178 /// 179 /// Cleanup implementations should generally be declared in an 180 /// anonymous namespace. 181 class Cleanup { 182 public: 183 // Anchor the construction vtable. We use the destructor because 184 // gcc gives an obnoxious warning if there are virtual methods 185 // with an accessible non-virtual destructor. Unfortunately, 186 // declaring this destructor makes it non-trivial, but there 187 // doesn't seem to be any other way around this warning. 188 // 189 // This destructor will never be called. 190 virtual ~Cleanup(); 191 192 /// Emit the cleanup. For normal cleanups, this is run in the 193 /// same EH context as when the cleanup was pushed, i.e. the 194 /// immediately-enclosing context of the cleanup scope. For 195 /// EH cleanups, this is run in a terminate context. 196 /// 197 // \param IsForEHCleanup true if this is for an EH cleanup, false 198 /// if for a normal cleanup. 199 virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0; 200 }; 201 202 /// UnconditionalCleanupN stores its N parameters and just passes 203 /// them to the real cleanup function. 204 template <class T, class A0> 205 class UnconditionalCleanup1 : public Cleanup { 206 A0 a0; 207 public: 208 UnconditionalCleanup1(A0 a0) : a0(a0) {} 209 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) { 210 T::Emit(CGF, IsForEHCleanup, a0); 211 } 212 }; 213 214 template <class T, class A0, class A1> 215 class UnconditionalCleanup2 : public Cleanup { 216 A0 a0; A1 a1; 217 public: 218 UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {} 219 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) { 220 T::Emit(CGF, IsForEHCleanup, a0, a1); 221 } 222 }; 223 224 /// ConditionalCleanupN stores the saved form of its N parameters, 225 /// then restores them and performs the cleanup. 226 template <class T, class A0> 227 class ConditionalCleanup1 : public Cleanup { 228 typedef typename DominatingValue<A0>::saved_type A0_saved; 229 A0_saved a0_saved; 230 231 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) { 232 A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved); 233 T::Emit(CGF, IsForEHCleanup, a0); 234 } 235 236 public: 237 ConditionalCleanup1(A0_saved a0) 238 : a0_saved(a0) {} 239 }; 240 241 template <class T, class A0, class A1> 242 class ConditionalCleanup2 : public Cleanup { 243 typedef typename DominatingValue<A0>::saved_type A0_saved; 244 typedef typename DominatingValue<A1>::saved_type A1_saved; 245 A0_saved a0_saved; 246 A1_saved a1_saved; 247 248 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) { 249 A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved); 250 A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved); 251 T::Emit(CGF, IsForEHCleanup, a0, a1); 252 } 253 254 public: 255 ConditionalCleanup2(A0_saved a0, A1_saved a1) 256 : a0_saved(a0), a1_saved(a1) {} 257 }; 258 259private: 260 // The implementation for this class is in CGException.h and 261 // CGException.cpp; the definition is here because it's used as a 262 // member of CodeGenFunction. 263 264 /// The start of the scope-stack buffer, i.e. the allocated pointer 265 /// for the buffer. All of these pointers are either simultaneously 266 /// null or simultaneously valid. 267 char *StartOfBuffer; 268 269 /// The end of the buffer. 270 char *EndOfBuffer; 271 272 /// The first valid entry in the buffer. 273 char *StartOfData; 274 275 /// The innermost normal cleanup on the stack. 276 stable_iterator InnermostNormalCleanup; 277 278 /// The innermost EH cleanup on the stack. 279 stable_iterator InnermostEHCleanup; 280 281 /// The number of catches on the stack. 282 unsigned CatchDepth; 283 284 /// The current EH destination index. Reset to FirstCatchIndex 285 /// whenever the last EH cleanup is popped. 286 unsigned NextEHDestIndex; 287 enum { FirstEHDestIndex = 1 }; 288 289 /// The current set of branch fixups. A branch fixup is a jump to 290 /// an as-yet unemitted label, i.e. a label for which we don't yet 291 /// know the EH stack depth. Whenever we pop a cleanup, we have 292 /// to thread all the current branch fixups through it. 293 /// 294 /// Fixups are recorded as the Use of the respective branch or 295 /// switch statement. The use points to the final destination. 296 /// When popping out of a cleanup, these uses are threaded through 297 /// the cleanup and adjusted to point to the new cleanup. 298 /// 299 /// Note that branches are allowed to jump into protected scopes 300 /// in certain situations; e.g. the following code is legal: 301 /// struct A { ~A(); }; // trivial ctor, non-trivial dtor 302 /// goto foo; 303 /// A a; 304 /// foo: 305 /// bar(); 306 llvm::SmallVector<BranchFixup, 8> BranchFixups; 307 308 char *allocate(size_t Size); 309 310 void *pushCleanup(CleanupKind K, size_t DataSize); 311 312public: 313 EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0), 314 InnermostNormalCleanup(stable_end()), 315 InnermostEHCleanup(stable_end()), 316 CatchDepth(0), NextEHDestIndex(FirstEHDestIndex) {} 317 ~EHScopeStack() { delete[] StartOfBuffer; } 318 319 // Variadic templates would make this not terrible. 320 321 /// Push a lazily-created cleanup on the stack. 322 template <class T> 323 void pushCleanup(CleanupKind Kind) { 324 void *Buffer = pushCleanup(Kind, sizeof(T)); 325 Cleanup *Obj = new(Buffer) T(); 326 (void) Obj; 327 } 328 329 /// Push a lazily-created cleanup on the stack. 330 template <class T, class A0> 331 void pushCleanup(CleanupKind Kind, A0 a0) { 332 void *Buffer = pushCleanup(Kind, sizeof(T)); 333 Cleanup *Obj = new(Buffer) T(a0); 334 (void) Obj; 335 } 336 337 /// Push a lazily-created cleanup on the stack. 338 template <class T, class A0, class A1> 339 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) { 340 void *Buffer = pushCleanup(Kind, sizeof(T)); 341 Cleanup *Obj = new(Buffer) T(a0, a1); 342 (void) Obj; 343 } 344 345 /// Push a lazily-created cleanup on the stack. 346 template <class T, class A0, class A1, class A2> 347 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) { 348 void *Buffer = pushCleanup(Kind, sizeof(T)); 349 Cleanup *Obj = new(Buffer) T(a0, a1, a2); 350 (void) Obj; 351 } 352 353 /// Push a lazily-created cleanup on the stack. 354 template <class T, class A0, class A1, class A2, class A3> 355 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) { 356 void *Buffer = pushCleanup(Kind, sizeof(T)); 357 Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3); 358 (void) Obj; 359 } 360 361 /// Push a lazily-created cleanup on the stack. 362 template <class T, class A0, class A1, class A2, class A3, class A4> 363 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) { 364 void *Buffer = pushCleanup(Kind, sizeof(T)); 365 Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4); 366 (void) Obj; 367 } 368 369 // Feel free to add more variants of the following: 370 371 /// Push a cleanup with non-constant storage requirements on the 372 /// stack. The cleanup type must provide an additional static method: 373 /// static size_t getExtraSize(size_t); 374 /// The argument to this method will be the value N, which will also 375 /// be passed as the first argument to the constructor. 376 /// 377 /// The data stored in the extra storage must obey the same 378 /// restrictions as normal cleanup member data. 379 /// 380 /// The pointer returned from this method is valid until the cleanup 381 /// stack is modified. 382 template <class T, class A0, class A1, class A2> 383 T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) { 384 void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N)); 385 return new (Buffer) T(N, a0, a1, a2); 386 } 387 388 /// Pops a cleanup scope off the stack. This should only be called 389 /// by CodeGenFunction::PopCleanupBlock. 390 void popCleanup(); 391 392 /// Push a set of catch handlers on the stack. The catch is 393 /// uninitialized and will need to have the given number of handlers 394 /// set on it. 395 class EHCatchScope *pushCatch(unsigned NumHandlers); 396 397 /// Pops a catch scope off the stack. 398 void popCatch(); 399 400 /// Push an exceptions filter on the stack. 401 class EHFilterScope *pushFilter(unsigned NumFilters); 402 403 /// Pops an exceptions filter off the stack. 404 void popFilter(); 405 406 /// Push a terminate handler on the stack. 407 void pushTerminate(); 408 409 /// Pops a terminate handler off the stack. 410 void popTerminate(); 411 412 /// Determines whether the exception-scopes stack is empty. 413 bool empty() const { return StartOfData == EndOfBuffer; } 414 415 bool requiresLandingPad() const { 416 return (CatchDepth || hasEHCleanups()); 417 } 418 419 /// Determines whether there are any normal cleanups on the stack. 420 bool hasNormalCleanups() const { 421 return InnermostNormalCleanup != stable_end(); 422 } 423 424 /// Returns the innermost normal cleanup on the stack, or 425 /// stable_end() if there are no normal cleanups. 426 stable_iterator getInnermostNormalCleanup() const { 427 return InnermostNormalCleanup; 428 } 429 stable_iterator getInnermostActiveNormalCleanup() const; // CGException.h 430 431 /// Determines whether there are any EH cleanups on the stack. 432 bool hasEHCleanups() const { 433 return InnermostEHCleanup != stable_end(); 434 } 435 436 /// Returns the innermost EH cleanup on the stack, or stable_end() 437 /// if there are no EH cleanups. 438 stable_iterator getInnermostEHCleanup() const { 439 return InnermostEHCleanup; 440 } 441 stable_iterator getInnermostActiveEHCleanup() const; // CGException.h 442 443 /// An unstable reference to a scope-stack depth. Invalidated by 444 /// pushes but not pops. 445 class iterator; 446 447 /// Returns an iterator pointing to the innermost EH scope. 448 iterator begin() const; 449 450 /// Returns an iterator pointing to the outermost EH scope. 451 iterator end() const; 452 453 /// Create a stable reference to the top of the EH stack. The 454 /// returned reference is valid until that scope is popped off the 455 /// stack. 456 stable_iterator stable_begin() const { 457 return stable_iterator(EndOfBuffer - StartOfData); 458 } 459 460 /// Create a stable reference to the bottom of the EH stack. 461 static stable_iterator stable_end() { 462 return stable_iterator(0); 463 } 464 465 /// Translates an iterator into a stable_iterator. 466 stable_iterator stabilize(iterator it) const; 467 468 /// Finds the nearest cleanup enclosing the given iterator. 469 /// Returns stable_iterator::invalid() if there are no such cleanups. 470 stable_iterator getEnclosingEHCleanup(iterator it) const; 471 472 /// Turn a stable reference to a scope depth into a unstable pointer 473 /// to the EH stack. 474 iterator find(stable_iterator save) const; 475 476 /// Removes the cleanup pointed to by the given stable_iterator. 477 void removeCleanup(stable_iterator save); 478 479 /// Add a branch fixup to the current cleanup scope. 480 BranchFixup &addBranchFixup() { 481 assert(hasNormalCleanups() && "adding fixup in scope without cleanups"); 482 BranchFixups.push_back(BranchFixup()); 483 return BranchFixups.back(); 484 } 485 486 unsigned getNumBranchFixups() const { return BranchFixups.size(); } 487 BranchFixup &getBranchFixup(unsigned I) { 488 assert(I < getNumBranchFixups()); 489 return BranchFixups[I]; 490 } 491 492 /// Pops lazily-removed fixups from the end of the list. This 493 /// should only be called by procedures which have just popped a 494 /// cleanup or resolved one or more fixups. 495 void popNullFixups(); 496 497 /// Clears the branch-fixups list. This should only be called by 498 /// ResolveAllBranchFixups. 499 void clearFixups() { BranchFixups.clear(); } 500 501 /// Gets the next EH destination index. 502 unsigned getNextEHDestIndex() { return NextEHDestIndex++; } 503}; 504 505/// CodeGenFunction - This class organizes the per-function state that is used 506/// while generating LLVM code. 507class CodeGenFunction : public CodeGenTypeCache { 508 CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT 509 void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT 510 511 friend class CGCXXABI; 512public: 513 /// A jump destination is an abstract label, branching to which may 514 /// require a jump out through normal cleanups. 515 struct JumpDest { 516 JumpDest() : Block(0), ScopeDepth(), Index(0) {} 517 JumpDest(llvm::BasicBlock *Block, 518 EHScopeStack::stable_iterator Depth, 519 unsigned Index) 520 : Block(Block), ScopeDepth(Depth), Index(Index) {} 521 522 bool isValid() const { return Block != 0; } 523 llvm::BasicBlock *getBlock() const { return Block; } 524 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; } 525 unsigned getDestIndex() const { return Index; } 526 527 private: 528 llvm::BasicBlock *Block; 529 EHScopeStack::stable_iterator ScopeDepth; 530 unsigned Index; 531 }; 532 533 /// An unwind destination is an abstract label, branching to which 534 /// may require a jump out through EH cleanups. 535 struct UnwindDest { 536 UnwindDest() : Block(0), ScopeDepth(), Index(0) {} 537 UnwindDest(llvm::BasicBlock *Block, 538 EHScopeStack::stable_iterator Depth, 539 unsigned Index) 540 : Block(Block), ScopeDepth(Depth), Index(Index) {} 541 542 bool isValid() const { return Block != 0; } 543 llvm::BasicBlock *getBlock() const { return Block; } 544 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; } 545 unsigned getDestIndex() const { return Index; } 546 547 private: 548 llvm::BasicBlock *Block; 549 EHScopeStack::stable_iterator ScopeDepth; 550 unsigned Index; 551 }; 552 553 CodeGenModule &CGM; // Per-module state. 554 const TargetInfo &Target; 555 556 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy; 557 CGBuilderTy Builder; 558 559 /// CurFuncDecl - Holds the Decl for the current function or ObjC method. 560 /// This excludes BlockDecls. 561 const Decl *CurFuncDecl; 562 /// CurCodeDecl - This is the inner-most code context, which includes blocks. 563 const Decl *CurCodeDecl; 564 const CGFunctionInfo *CurFnInfo; 565 QualType FnRetTy; 566 llvm::Function *CurFn; 567 568 /// CurGD - The GlobalDecl for the current function being compiled. 569 GlobalDecl CurGD; 570 571 /// ReturnBlock - Unified return block. 572 JumpDest ReturnBlock; 573 574 /// ReturnValue - The temporary alloca to hold the return value. This is null 575 /// iff the function has no return value. 576 llvm::Value *ReturnValue; 577 578 /// RethrowBlock - Unified rethrow block. 579 UnwindDest RethrowBlock; 580 581 /// AllocaInsertPoint - This is an instruction in the entry block before which 582 /// we prefer to insert allocas. 583 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt; 584 585 bool CatchUndefined; 586 587 const CodeGen::CGBlockInfo *BlockInfo; 588 llvm::Value *BlockPointer; 589 590 /// \brief A mapping from NRVO variables to the flags used to indicate 591 /// when the NRVO has been applied to this variable. 592 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags; 593 594 EHScopeStack EHStack; 595 596 /// i32s containing the indexes of the cleanup destinations. 597 llvm::AllocaInst *NormalCleanupDest; 598 llvm::AllocaInst *EHCleanupDest; 599 600 unsigned NextCleanupDestIndex; 601 602 /// The exception slot. All landing pads write the current 603 /// exception pointer into this alloca. 604 llvm::Value *ExceptionSlot; 605 606 /// Emits a landing pad for the current EH stack. 607 llvm::BasicBlock *EmitLandingPad(); 608 609 llvm::BasicBlock *getInvokeDestImpl(); 610 611 /// Set up the last cleaup that was pushed as a conditional 612 /// full-expression cleanup. 613 void initFullExprCleanup(); 614 615 template <class T> 616 typename DominatingValue<T>::saved_type saveValueInCond(T value) { 617 return DominatingValue<T>::save(*this, value); 618 } 619 620public: 621 /// ObjCEHValueStack - Stack of Objective-C exception values, used for 622 /// rethrows. 623 llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack; 624 625 // A struct holding information about a finally block's IR 626 // generation. For now, doesn't actually hold anything. 627 struct FinallyInfo { 628 }; 629 630 FinallyInfo EnterFinallyBlock(const Stmt *Stmt, 631 llvm::Constant *BeginCatchFn, 632 llvm::Constant *EndCatchFn, 633 llvm::Constant *RethrowFn); 634 void ExitFinallyBlock(FinallyInfo &FinallyInfo); 635 636 /// pushFullExprCleanup - Push a cleanup to be run at the end of the 637 /// current full-expression. Safe against the possibility that 638 /// we're currently inside a conditionally-evaluated expression. 639 template <class T, class A0> 640 void pushFullExprCleanup(CleanupKind kind, A0 a0) { 641 // If we're not in a conditional branch, or if none of the 642 // arguments requires saving, then use the unconditional cleanup. 643 if (!isInConditionalBranch()) { 644 typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType; 645 return EHStack.pushCleanup<CleanupType>(kind, a0); 646 } 647 648 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0); 649 650 typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType; 651 EHStack.pushCleanup<CleanupType>(kind, a0_saved); 652 initFullExprCleanup(); 653 } 654 655 /// pushFullExprCleanup - Push a cleanup to be run at the end of the 656 /// current full-expression. Safe against the possibility that 657 /// we're currently inside a conditionally-evaluated expression. 658 template <class T, class A0, class A1> 659 void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) { 660 // If we're not in a conditional branch, or if none of the 661 // arguments requires saving, then use the unconditional cleanup. 662 if (!isInConditionalBranch()) { 663 typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType; 664 return EHStack.pushCleanup<CleanupType>(kind, a0, a1); 665 } 666 667 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0); 668 typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1); 669 670 typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType; 671 EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved); 672 initFullExprCleanup(); 673 } 674 675 /// PushDestructorCleanup - Push a cleanup to call the 676 /// complete-object destructor of an object of the given type at the 677 /// given address. Does nothing if T is not a C++ class type with a 678 /// non-trivial destructor. 679 void PushDestructorCleanup(QualType T, llvm::Value *Addr); 680 681 /// PushDestructorCleanup - Push a cleanup to call the 682 /// complete-object variant of the given destructor on the object at 683 /// the given address. 684 void PushDestructorCleanup(const CXXDestructorDecl *Dtor, 685 llvm::Value *Addr); 686 687 /// PopCleanupBlock - Will pop the cleanup entry on the stack and 688 /// process all branch fixups. 689 void PopCleanupBlock(bool FallThroughIsBranchThrough = false); 690 691 /// DeactivateCleanupBlock - Deactivates the given cleanup block. 692 /// The block cannot be reactivated. Pops it if it's the top of the 693 /// stack. 694 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup); 695 696 /// ActivateCleanupBlock - Activates an initially-inactive cleanup. 697 /// Cannot be used to resurrect a deactivated cleanup. 698 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup); 699 700 /// \brief Enters a new scope for capturing cleanups, all of which 701 /// will be executed once the scope is exited. 702 class RunCleanupsScope { 703 CodeGenFunction& CGF; 704 EHScopeStack::stable_iterator CleanupStackDepth; 705 bool OldDidCallStackSave; 706 bool PerformCleanup; 707 708 RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT 709 RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT 710 711 public: 712 /// \brief Enter a new cleanup scope. 713 explicit RunCleanupsScope(CodeGenFunction &CGF) 714 : CGF(CGF), PerformCleanup(true) 715 { 716 CleanupStackDepth = CGF.EHStack.stable_begin(); 717 OldDidCallStackSave = CGF.DidCallStackSave; 718 CGF.DidCallStackSave = false; 719 } 720 721 /// \brief Exit this cleanup scope, emitting any accumulated 722 /// cleanups. 723 ~RunCleanupsScope() { 724 if (PerformCleanup) { 725 CGF.DidCallStackSave = OldDidCallStackSave; 726 CGF.PopCleanupBlocks(CleanupStackDepth); 727 } 728 } 729 730 /// \brief Determine whether this scope requires any cleanups. 731 bool requiresCleanups() const { 732 return CGF.EHStack.stable_begin() != CleanupStackDepth; 733 } 734 735 /// \brief Force the emission of cleanups now, instead of waiting 736 /// until this object is destroyed. 737 void ForceCleanup() { 738 assert(PerformCleanup && "Already forced cleanup"); 739 CGF.DidCallStackSave = OldDidCallStackSave; 740 CGF.PopCleanupBlocks(CleanupStackDepth); 741 PerformCleanup = false; 742 } 743 }; 744 745 746 /// PopCleanupBlocks - Takes the old cleanup stack size and emits 747 /// the cleanup blocks that have been added. 748 void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize); 749 750 void ResolveBranchFixups(llvm::BasicBlock *Target); 751 752 /// The given basic block lies in the current EH scope, but may be a 753 /// target of a potentially scope-crossing jump; get a stable handle 754 /// to which we can perform this jump later. 755 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) { 756 return JumpDest(Target, 757 EHStack.getInnermostNormalCleanup(), 758 NextCleanupDestIndex++); 759 } 760 761 /// The given basic block lies in the current EH scope, but may be a 762 /// target of a potentially scope-crossing jump; get a stable handle 763 /// to which we can perform this jump later. 764 JumpDest getJumpDestInCurrentScope(llvm::StringRef Name = llvm::StringRef()) { 765 return getJumpDestInCurrentScope(createBasicBlock(Name)); 766 } 767 768 /// EmitBranchThroughCleanup - Emit a branch from the current insert 769 /// block through the normal cleanup handling code (if any) and then 770 /// on to \arg Dest. 771 void EmitBranchThroughCleanup(JumpDest Dest); 772 773 /// EmitBranchThroughEHCleanup - Emit a branch from the current 774 /// insert block through the EH cleanup handling code (if any) and 775 /// then on to \arg Dest. 776 void EmitBranchThroughEHCleanup(UnwindDest Dest); 777 778 /// getRethrowDest - Returns the unified outermost-scope rethrow 779 /// destination. 780 UnwindDest getRethrowDest(); 781 782 /// An object to manage conditionally-evaluated expressions. 783 class ConditionalEvaluation { 784 llvm::BasicBlock *StartBB; 785 786 public: 787 ConditionalEvaluation(CodeGenFunction &CGF) 788 : StartBB(CGF.Builder.GetInsertBlock()) {} 789 790 void begin(CodeGenFunction &CGF) { 791 assert(CGF.OutermostConditional != this); 792 if (!CGF.OutermostConditional) 793 CGF.OutermostConditional = this; 794 } 795 796 void end(CodeGenFunction &CGF) { 797 assert(CGF.OutermostConditional != 0); 798 if (CGF.OutermostConditional == this) 799 CGF.OutermostConditional = 0; 800 } 801 802 /// Returns a block which will be executed prior to each 803 /// evaluation of the conditional code. 804 llvm::BasicBlock *getStartingBlock() const { 805 return StartBB; 806 } 807 }; 808 809 /// isInConditionalBranch - Return true if we're currently emitting 810 /// one branch or the other of a conditional expression. 811 bool isInConditionalBranch() const { return OutermostConditional != 0; } 812 813 /// An RAII object to record that we're evaluating a statement 814 /// expression. 815 class StmtExprEvaluation { 816 CodeGenFunction &CGF; 817 818 /// We have to save the outermost conditional: cleanups in a 819 /// statement expression aren't conditional just because the 820 /// StmtExpr is. 821 ConditionalEvaluation *SavedOutermostConditional; 822 823 public: 824 StmtExprEvaluation(CodeGenFunction &CGF) 825 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) { 826 CGF.OutermostConditional = 0; 827 } 828 829 ~StmtExprEvaluation() { 830 CGF.OutermostConditional = SavedOutermostConditional; 831 CGF.EnsureInsertPoint(); 832 } 833 }; 834 835 /// An object which temporarily prevents a value from being 836 /// destroyed by aggressive peephole optimizations that assume that 837 /// all uses of a value have been realized in the IR. 838 class PeepholeProtection { 839 llvm::Instruction *Inst; 840 friend class CodeGenFunction; 841 842 public: 843 PeepholeProtection() : Inst(0) {} 844 }; 845 846 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr. 847 class OpaqueValueMapping { 848 CodeGenFunction &CGF; 849 const OpaqueValueExpr *OpaqueValue; 850 bool BoundLValue; 851 CodeGenFunction::PeepholeProtection Protection; 852 853 public: 854 static bool shouldBindAsLValue(const Expr *expr) { 855 return expr->isGLValue() || expr->getType()->isRecordType(); 856 } 857 858 /// Build the opaque value mapping for the given conditional 859 /// operator if it's the GNU ?: extension. This is a common 860 /// enough pattern that the convenience operator is really 861 /// helpful. 862 /// 863 OpaqueValueMapping(CodeGenFunction &CGF, 864 const AbstractConditionalOperator *op) : CGF(CGF) { 865 if (isa<ConditionalOperator>(op)) { 866 OpaqueValue = 0; 867 BoundLValue = false; 868 return; 869 } 870 871 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op); 872 init(e->getOpaqueValue(), e->getCommon()); 873 } 874 875 OpaqueValueMapping(CodeGenFunction &CGF, 876 const OpaqueValueExpr *opaqueValue, 877 LValue lvalue) 878 : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) { 879 assert(opaqueValue && "no opaque value expression!"); 880 assert(shouldBindAsLValue(opaqueValue)); 881 initLValue(lvalue); 882 } 883 884 OpaqueValueMapping(CodeGenFunction &CGF, 885 const OpaqueValueExpr *opaqueValue, 886 RValue rvalue) 887 : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) { 888 assert(opaqueValue && "no opaque value expression!"); 889 assert(!shouldBindAsLValue(opaqueValue)); 890 initRValue(rvalue); 891 } 892 893 void pop() { 894 assert(OpaqueValue && "mapping already popped!"); 895 popImpl(); 896 OpaqueValue = 0; 897 } 898 899 ~OpaqueValueMapping() { 900 if (OpaqueValue) popImpl(); 901 } 902 903 private: 904 void popImpl() { 905 if (BoundLValue) 906 CGF.OpaqueLValues.erase(OpaqueValue); 907 else { 908 CGF.OpaqueRValues.erase(OpaqueValue); 909 CGF.unprotectFromPeepholes(Protection); 910 } 911 } 912 913 void init(const OpaqueValueExpr *ov, const Expr *e) { 914 OpaqueValue = ov; 915 BoundLValue = shouldBindAsLValue(ov); 916 assert(BoundLValue == shouldBindAsLValue(e) 917 && "inconsistent expression value kinds!"); 918 if (BoundLValue) 919 initLValue(CGF.EmitLValue(e)); 920 else 921 initRValue(CGF.EmitAnyExpr(e)); 922 } 923 924 void initLValue(const LValue &lv) { 925 CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv)); 926 } 927 928 void initRValue(const RValue &rv) { 929 // Work around an extremely aggressive peephole optimization in 930 // EmitScalarConversion which assumes that all other uses of a 931 // value are extant. 932 Protection = CGF.protectFromPeepholes(rv); 933 CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv)); 934 } 935 }; 936 937 /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field 938 /// number that holds the value. 939 unsigned getByRefValueLLVMField(const ValueDecl *VD) const; 940 941 /// BuildBlockByrefAddress - Computes address location of the 942 /// variable which is declared as __block. 943 llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr, 944 const VarDecl *V); 945private: 946 CGDebugInfo *DebugInfo; 947 948 /// IndirectBranch - The first time an indirect goto is seen we create a block 949 /// with an indirect branch. Every time we see the address of a label taken, 950 /// we add the label to the indirect goto. Every subsequent indirect goto is 951 /// codegen'd as a jump to the IndirectBranch's basic block. 952 llvm::IndirectBrInst *IndirectBranch; 953 954 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C 955 /// decls. 956 typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy; 957 DeclMapTy LocalDeclMap; 958 959 /// LabelMap - This keeps track of the LLVM basic block for each C label. 960 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap; 961 962 // BreakContinueStack - This keeps track of where break and continue 963 // statements should jump to. 964 struct BreakContinue { 965 BreakContinue(JumpDest Break, JumpDest Continue) 966 : BreakBlock(Break), ContinueBlock(Continue) {} 967 968 JumpDest BreakBlock; 969 JumpDest ContinueBlock; 970 }; 971 llvm::SmallVector<BreakContinue, 8> BreakContinueStack; 972 973 /// SwitchInsn - This is nearest current switch instruction. It is null if if 974 /// current context is not in a switch. 975 llvm::SwitchInst *SwitchInsn; 976 977 /// CaseRangeBlock - This block holds if condition check for last case 978 /// statement range in current switch instruction. 979 llvm::BasicBlock *CaseRangeBlock; 980 981 /// OpaqueLValues - Keeps track of the current set of opaque value 982 /// expressions. 983 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues; 984 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues; 985 986 // VLASizeMap - This keeps track of the associated size for each VLA type. 987 // We track this by the size expression rather than the type itself because 988 // in certain situations, like a const qualifier applied to an VLA typedef, 989 // multiple VLA types can share the same size expression. 990 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we 991 // enter/leave scopes. 992 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap; 993 994 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid 995 /// calling llvm.stacksave for multiple VLAs in the same scope. 996 bool DidCallStackSave; 997 998 /// A block containing a single 'unreachable' instruction. Created 999 /// lazily by getUnreachableBlock(). 1000 llvm::BasicBlock *UnreachableBlock; 1001 1002 /// CXXThisDecl - When generating code for a C++ member function, 1003 /// this will hold the implicit 'this' declaration. 1004 ImplicitParamDecl *CXXThisDecl; 1005 llvm::Value *CXXThisValue; 1006 1007 /// CXXVTTDecl - When generating code for a base object constructor or 1008 /// base object destructor with virtual bases, this will hold the implicit 1009 /// VTT parameter. 1010 ImplicitParamDecl *CXXVTTDecl; 1011 llvm::Value *CXXVTTValue; 1012 1013 /// OutermostConditional - Points to the outermost active 1014 /// conditional control. This is used so that we know if a 1015 /// temporary should be destroyed conditionally. 1016 ConditionalEvaluation *OutermostConditional; 1017 1018 1019 /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM 1020 /// type as well as the field number that contains the actual data. 1021 llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *, 1022 unsigned> > ByRefValueInfo; 1023 1024 llvm::BasicBlock *TerminateLandingPad; 1025 llvm::BasicBlock *TerminateHandler; 1026 llvm::BasicBlock *TrapBB; 1027 1028public: 1029 CodeGenFunction(CodeGenModule &cgm); 1030 1031 CodeGenTypes &getTypes() const { return CGM.getTypes(); } 1032 ASTContext &getContext() const; 1033 CGDebugInfo *getDebugInfo() { return DebugInfo; } 1034 1035 const LangOptions &getLangOptions() const { return CGM.getLangOptions(); } 1036 1037 /// Returns a pointer to the function's exception object slot, which 1038 /// is assigned in every landing pad. 1039 llvm::Value *getExceptionSlot(); 1040 1041 llvm::Value *getNormalCleanupDestSlot(); 1042 llvm::Value *getEHCleanupDestSlot(); 1043 1044 llvm::BasicBlock *getUnreachableBlock() { 1045 if (!UnreachableBlock) { 1046 UnreachableBlock = createBasicBlock("unreachable"); 1047 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock); 1048 } 1049 return UnreachableBlock; 1050 } 1051 1052 llvm::BasicBlock *getInvokeDest() { 1053 if (!EHStack.requiresLandingPad()) return 0; 1054 return getInvokeDestImpl(); 1055 } 1056 1057 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); } 1058 1059 //===--------------------------------------------------------------------===// 1060 // Objective-C 1061 //===--------------------------------------------------------------------===// 1062 1063 void GenerateObjCMethod(const ObjCMethodDecl *OMD); 1064 1065 void StartObjCMethod(const ObjCMethodDecl *MD, 1066 const ObjCContainerDecl *CD); 1067 1068 /// GenerateObjCGetter - Synthesize an Objective-C property getter function. 1069 void GenerateObjCGetter(ObjCImplementationDecl *IMP, 1070 const ObjCPropertyImplDecl *PID); 1071 void GenerateObjCGetterBody(ObjCIvarDecl *Ivar, bool IsAtomic, bool IsStrong); 1072 void GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD, 1073 ObjCIvarDecl *Ivar); 1074 1075 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1076 ObjCMethodDecl *MD, bool ctor); 1077 1078 /// GenerateObjCSetter - Synthesize an Objective-C property setter function 1079 /// for the given property. 1080 void GenerateObjCSetter(ObjCImplementationDecl *IMP, 1081 const ObjCPropertyImplDecl *PID); 1082 bool IndirectObjCSetterArg(const CGFunctionInfo &FI); 1083 bool IvarTypeWithAggrGCObjects(QualType Ty); 1084 1085 //===--------------------------------------------------------------------===// 1086 // Block Bits 1087 //===--------------------------------------------------------------------===// 1088 1089 llvm::Value *EmitBlockLiteral(const BlockExpr *); 1090 llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *, 1091 const CGBlockInfo &Info, 1092 const llvm::StructType *, 1093 llvm::Constant *BlockVarLayout); 1094 1095 llvm::Function *GenerateBlockFunction(GlobalDecl GD, 1096 const CGBlockInfo &Info, 1097 const Decl *OuterFuncDecl, 1098 const DeclMapTy &ldm); 1099 1100 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo); 1101 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo); 1102 1103 llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, 1104 BlockFieldFlags flags, 1105 const VarDecl *BD); 1106 llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 1107 BlockFieldFlags flags, 1108 const VarDecl *BD); 1109 1110 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags); 1111 1112 llvm::Value *LoadBlockStruct() { 1113 assert(BlockPointer && "no block pointer set!"); 1114 return BlockPointer; 1115 } 1116 1117 void AllocateBlockCXXThisPointer(const CXXThisExpr *E); 1118 void AllocateBlockDecl(const BlockDeclRefExpr *E); 1119 llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { 1120 return GetAddrOfBlockDecl(E->getDecl(), E->isByRef()); 1121 } 1122 llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef); 1123 const llvm::Type *BuildByRefType(const VarDecl *var); 1124 1125 void GenerateCode(GlobalDecl GD, llvm::Function *Fn); 1126 void StartFunction(GlobalDecl GD, QualType RetTy, 1127 llvm::Function *Fn, 1128 const FunctionArgList &Args, 1129 SourceLocation StartLoc); 1130 1131 void EmitConstructorBody(FunctionArgList &Args); 1132 void EmitDestructorBody(FunctionArgList &Args); 1133 void EmitFunctionBody(FunctionArgList &Args); 1134 1135 /// EmitReturnBlock - Emit the unified return block, trying to avoid its 1136 /// emission when possible. 1137 void EmitReturnBlock(); 1138 1139 /// FinishFunction - Complete IR generation of the current function. It is 1140 /// legal to call this function even if there is no current insertion point. 1141 void FinishFunction(SourceLocation EndLoc=SourceLocation()); 1142 1143 /// GenerateThunk - Generate a thunk for the given method. 1144 void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk); 1145 1146 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type, 1147 FunctionArgList &Args); 1148 1149 /// InitializeVTablePointer - Initialize the vtable pointer of the given 1150 /// subobject. 1151 /// 1152 void InitializeVTablePointer(BaseSubobject Base, 1153 const CXXRecordDecl *NearestVBase, 1154 uint64_t OffsetFromNearestVBase, 1155 llvm::Constant *VTable, 1156 const CXXRecordDecl *VTableClass); 1157 1158 typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy; 1159 void InitializeVTablePointers(BaseSubobject Base, 1160 const CXXRecordDecl *NearestVBase, 1161 uint64_t OffsetFromNearestVBase, 1162 bool BaseIsNonVirtualPrimaryBase, 1163 llvm::Constant *VTable, 1164 const CXXRecordDecl *VTableClass, 1165 VisitedVirtualBasesSetTy& VBases); 1166 1167 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl); 1168 1169 /// GetVTablePtr - Return the Value of the vtable pointer member pointed 1170 /// to by This. 1171 llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty); 1172 1173 /// EnterDtorCleanups - Enter the cleanups necessary to complete the 1174 /// given phase of destruction for a destructor. The end result 1175 /// should call destructors on members and base classes in reverse 1176 /// order of their construction. 1177 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type); 1178 1179 /// ShouldInstrumentFunction - Return true if the current function should be 1180 /// instrumented with __cyg_profile_func_* calls 1181 bool ShouldInstrumentFunction(); 1182 1183 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified 1184 /// instrumentation function with the current function and the call site, if 1185 /// function instrumentation is enabled. 1186 void EmitFunctionInstrumentation(const char *Fn); 1187 1188 /// EmitMCountInstrumentation - Emit call to .mcount. 1189 void EmitMCountInstrumentation(); 1190 1191 /// EmitFunctionProlog - Emit the target specific LLVM code to load the 1192 /// arguments for the given function. This is also responsible for naming the 1193 /// LLVM function arguments. 1194 void EmitFunctionProlog(const CGFunctionInfo &FI, 1195 llvm::Function *Fn, 1196 const FunctionArgList &Args); 1197 1198 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the 1199 /// given temporary. 1200 void EmitFunctionEpilog(const CGFunctionInfo &FI); 1201 1202 /// EmitStartEHSpec - Emit the start of the exception spec. 1203 void EmitStartEHSpec(const Decl *D); 1204 1205 /// EmitEndEHSpec - Emit the end of the exception spec. 1206 void EmitEndEHSpec(const Decl *D); 1207 1208 /// getTerminateLandingPad - Return a landing pad that just calls terminate. 1209 llvm::BasicBlock *getTerminateLandingPad(); 1210 1211 /// getTerminateHandler - Return a handler (not a landing pad, just 1212 /// a catch handler) that just calls terminate. This is used when 1213 /// a terminate scope encloses a try. 1214 llvm::BasicBlock *getTerminateHandler(); 1215 1216 const llvm::Type *ConvertTypeForMem(QualType T); 1217 const llvm::Type *ConvertType(QualType T); 1218 const llvm::Type *ConvertType(const TypeDecl *T) { 1219 return ConvertType(getContext().getTypeDeclType(T)); 1220 } 1221 1222 /// LoadObjCSelf - Load the value of self. This function is only valid while 1223 /// generating code for an Objective-C method. 1224 llvm::Value *LoadObjCSelf(); 1225 1226 /// TypeOfSelfObject - Return type of object that this self represents. 1227 QualType TypeOfSelfObject(); 1228 1229 /// hasAggregateLLVMType - Return true if the specified AST type will map into 1230 /// an aggregate LLVM type or is void. 1231 static bool hasAggregateLLVMType(QualType T); 1232 1233 /// createBasicBlock - Create an LLVM basic block. 1234 llvm::BasicBlock *createBasicBlock(llvm::StringRef name = "", 1235 llvm::Function *parent = 0, 1236 llvm::BasicBlock *before = 0) { 1237#ifdef NDEBUG 1238 return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before); 1239#else 1240 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before); 1241#endif 1242 } 1243 1244 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified 1245 /// label maps to. 1246 JumpDest getJumpDestForLabel(const LabelDecl *S); 1247 1248 /// SimplifyForwardingBlocks - If the given basic block is only a branch to 1249 /// another basic block, simplify it. This assumes that no other code could 1250 /// potentially reference the basic block. 1251 void SimplifyForwardingBlocks(llvm::BasicBlock *BB); 1252 1253 /// EmitBlock - Emit the given block \arg BB and set it as the insert point, 1254 /// adding a fall-through branch from the current insert block if 1255 /// necessary. It is legal to call this function even if there is no current 1256 /// insertion point. 1257 /// 1258 /// IsFinished - If true, indicates that the caller has finished emitting 1259 /// branches to the given block and does not expect to emit code into it. This 1260 /// means the block can be ignored if it is unreachable. 1261 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false); 1262 1263 /// EmitBranch - Emit a branch to the specified basic block from the current 1264 /// insert block, taking care to avoid creation of branches from dummy 1265 /// blocks. It is legal to call this function even if there is no current 1266 /// insertion point. 1267 /// 1268 /// This function clears the current insertion point. The caller should follow 1269 /// calls to this function with calls to Emit*Block prior to generation new 1270 /// code. 1271 void EmitBranch(llvm::BasicBlock *Block); 1272 1273 /// HaveInsertPoint - True if an insertion point is defined. If not, this 1274 /// indicates that the current code being emitted is unreachable. 1275 bool HaveInsertPoint() const { 1276 return Builder.GetInsertBlock() != 0; 1277 } 1278 1279 /// EnsureInsertPoint - Ensure that an insertion point is defined so that 1280 /// emitted IR has a place to go. Note that by definition, if this function 1281 /// creates a block then that block is unreachable; callers may do better to 1282 /// detect when no insertion point is defined and simply skip IR generation. 1283 void EnsureInsertPoint() { 1284 if (!HaveInsertPoint()) 1285 EmitBlock(createBasicBlock()); 1286 } 1287 1288 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1289 /// specified stmt yet. 1290 void ErrorUnsupported(const Stmt *S, const char *Type, 1291 bool OmitOnError=false); 1292 1293 //===--------------------------------------------------------------------===// 1294 // Helpers 1295 //===--------------------------------------------------------------------===// 1296 1297 LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) { 1298 return LValue::MakeAddr(V, T, Alignment, getContext(), 1299 CGM.getTBAAInfo(T)); 1300 } 1301 1302 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 1303 /// block. The caller is responsible for setting an appropriate alignment on 1304 /// the alloca. 1305 llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty, 1306 const llvm::Twine &Name = "tmp"); 1307 1308 /// InitTempAlloca - Provide an initial value for the given alloca. 1309 void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value); 1310 1311 /// CreateIRTemp - Create a temporary IR object of the given type, with 1312 /// appropriate alignment. This routine should only be used when an temporary 1313 /// value needs to be stored into an alloca (for example, to avoid explicit 1314 /// PHI construction), but the type is the IR type, not the type appropriate 1315 /// for storing in memory. 1316 llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp"); 1317 1318 /// CreateMemTemp - Create a temporary memory object of the given type, with 1319 /// appropriate alignment. 1320 llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp"); 1321 1322 /// CreateAggTemp - Create a temporary memory object for the given 1323 /// aggregate type. 1324 AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") { 1325 return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false); 1326 } 1327 1328 /// Emit a cast to void* in the appropriate address space. 1329 llvm::Value *EmitCastToVoidPtr(llvm::Value *value); 1330 1331 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 1332 /// expression and compare the result against zero, returning an Int1Ty value. 1333 llvm::Value *EvaluateExprAsBool(const Expr *E); 1334 1335 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result. 1336 void EmitIgnoredExpr(const Expr *E); 1337 1338 /// EmitAnyExpr - Emit code to compute the specified expression which can have 1339 /// any type. The result is returned as an RValue struct. If this is an 1340 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where 1341 /// the result should be returned. 1342 /// 1343 /// \param IgnoreResult - True if the resulting value isn't used. 1344 RValue EmitAnyExpr(const Expr *E, 1345 AggValueSlot AggSlot = AggValueSlot::ignored(), 1346 bool IgnoreResult = false); 1347 1348 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address 1349 // or the value of the expression, depending on how va_list is defined. 1350 llvm::Value *EmitVAListRef(const Expr *E); 1351 1352 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 1353 /// always be accessible even if no aggregate location is provided. 1354 RValue EmitAnyExprToTemp(const Expr *E); 1355 1356 /// EmitsAnyExprToMem - Emits the code necessary to evaluate an 1357 /// arbitrary expression into the given memory location. 1358 void EmitAnyExprToMem(const Expr *E, llvm::Value *Location, 1359 bool IsLocationVolatile, 1360 bool IsInitializer); 1361 1362 /// EmitAggregateCopy - Emit an aggrate copy. 1363 /// 1364 /// \param isVolatile - True iff either the source or the destination is 1365 /// volatile. 1366 void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr, 1367 QualType EltTy, bool isVolatile=false); 1368 1369 /// StartBlock - Start new block named N. If insert block is a dummy block 1370 /// then reuse it. 1371 void StartBlock(const char *N); 1372 1373 /// GetAddrOfStaticLocalVar - Return the address of a static local variable. 1374 llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) { 1375 return cast<llvm::Constant>(GetAddrOfLocalVar(BVD)); 1376 } 1377 1378 /// GetAddrOfLocalVar - Return the address of a local variable. 1379 llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) { 1380 llvm::Value *Res = LocalDeclMap[VD]; 1381 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!"); 1382 return Res; 1383 } 1384 1385 /// getOpaqueLValueMapping - Given an opaque value expression (which 1386 /// must be mapped to an l-value), return its mapping. 1387 const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) { 1388 assert(OpaqueValueMapping::shouldBindAsLValue(e)); 1389 1390 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator 1391 it = OpaqueLValues.find(e); 1392 assert(it != OpaqueLValues.end() && "no mapping for opaque value!"); 1393 return it->second; 1394 } 1395 1396 /// getOpaqueRValueMapping - Given an opaque value expression (which 1397 /// must be mapped to an r-value), return its mapping. 1398 const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) { 1399 assert(!OpaqueValueMapping::shouldBindAsLValue(e)); 1400 1401 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator 1402 it = OpaqueRValues.find(e); 1403 assert(it != OpaqueRValues.end() && "no mapping for opaque value!"); 1404 return it->second; 1405 } 1406 1407 /// getAccessedFieldNo - Given an encoded value and a result number, return 1408 /// the input field number being accessed. 1409 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts); 1410 1411 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L); 1412 llvm::BasicBlock *GetIndirectGotoBlock(); 1413 1414 /// EmitNullInitialization - Generate code to set a value of the given type to 1415 /// null, If the type contains data member pointers, they will be initialized 1416 /// to -1 in accordance with the Itanium C++ ABI. 1417 void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty); 1418 1419 // EmitVAArg - Generate code to get an argument from the passed in pointer 1420 // and update it accordingly. The return value is a pointer to the argument. 1421 // FIXME: We should be able to get rid of this method and use the va_arg 1422 // instruction in LLVM instead once it works well enough. 1423 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty); 1424 1425 /// EmitVLASize - Generate code for any VLA size expressions that might occur 1426 /// in a variably modified type. If Ty is a VLA, will return the value that 1427 /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise. 1428 /// 1429 /// This function can be called with a null (unreachable) insert point. 1430 llvm::Value *EmitVLASize(QualType Ty); 1431 1432 // GetVLASize - Returns an LLVM value that corresponds to the size in bytes 1433 // of a variable length array type. 1434 llvm::Value *GetVLASize(const VariableArrayType *); 1435 1436 /// LoadCXXThis - Load the value of 'this'. This function is only valid while 1437 /// generating code for an C++ member function. 1438 llvm::Value *LoadCXXThis() { 1439 assert(CXXThisValue && "no 'this' value for this function"); 1440 return CXXThisValue; 1441 } 1442 1443 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have 1444 /// virtual bases. 1445 llvm::Value *LoadCXXVTT() { 1446 assert(CXXVTTValue && "no VTT value for this function"); 1447 return CXXVTTValue; 1448 } 1449 1450 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a 1451 /// complete class to the given direct base. 1452 llvm::Value * 1453 GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value, 1454 const CXXRecordDecl *Derived, 1455 const CXXRecordDecl *Base, 1456 bool BaseIsVirtual); 1457 1458 /// GetAddressOfBaseClass - This function will add the necessary delta to the 1459 /// load of 'this' and returns address of the base class. 1460 llvm::Value *GetAddressOfBaseClass(llvm::Value *Value, 1461 const CXXRecordDecl *Derived, 1462 CastExpr::path_const_iterator PathBegin, 1463 CastExpr::path_const_iterator PathEnd, 1464 bool NullCheckValue); 1465 1466 llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value, 1467 const CXXRecordDecl *Derived, 1468 CastExpr::path_const_iterator PathBegin, 1469 CastExpr::path_const_iterator PathEnd, 1470 bool NullCheckValue); 1471 1472 llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This, 1473 const CXXRecordDecl *ClassDecl, 1474 const CXXRecordDecl *BaseClassDecl); 1475 1476 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1477 CXXCtorType CtorType, 1478 const FunctionArgList &Args); 1479 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, 1480 bool ForVirtualBase, llvm::Value *This, 1481 CallExpr::const_arg_iterator ArgBeg, 1482 CallExpr::const_arg_iterator ArgEnd); 1483 1484 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1485 llvm::Value *This, llvm::Value *Src, 1486 CallExpr::const_arg_iterator ArgBeg, 1487 CallExpr::const_arg_iterator ArgEnd); 1488 1489 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, 1490 const ConstantArrayType *ArrayTy, 1491 llvm::Value *ArrayPtr, 1492 CallExpr::const_arg_iterator ArgBeg, 1493 CallExpr::const_arg_iterator ArgEnd, 1494 bool ZeroInitialization = false); 1495 1496 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, 1497 llvm::Value *NumElements, 1498 llvm::Value *ArrayPtr, 1499 CallExpr::const_arg_iterator ArgBeg, 1500 CallExpr::const_arg_iterator ArgEnd, 1501 bool ZeroInitialization = false); 1502 1503 void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, 1504 const ArrayType *Array, 1505 llvm::Value *This); 1506 1507 void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, 1508 llvm::Value *NumElements, 1509 llvm::Value *This); 1510 1511 llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D, 1512 const ArrayType *Array, 1513 llvm::Value *This); 1514 1515 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, 1516 bool ForVirtualBase, llvm::Value *This); 1517 1518 void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr, 1519 llvm::Value *NumElements); 1520 1521 void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr); 1522 1523 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E); 1524 void EmitCXXDeleteExpr(const CXXDeleteExpr *E); 1525 1526 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr, 1527 QualType DeleteTy); 1528 1529 llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E); 1530 llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE); 1531 1532 void EmitCheck(llvm::Value *, unsigned Size); 1533 1534 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 1535 bool isInc, bool isPre); 1536 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 1537 bool isInc, bool isPre); 1538 //===--------------------------------------------------------------------===// 1539 // Declaration Emission 1540 //===--------------------------------------------------------------------===// 1541 1542 /// EmitDecl - Emit a declaration. 1543 /// 1544 /// This function can be called with a null (unreachable) insert point. 1545 void EmitDecl(const Decl &D); 1546 1547 /// EmitVarDecl - Emit a local variable declaration. 1548 /// 1549 /// This function can be called with a null (unreachable) insert point. 1550 void EmitVarDecl(const VarDecl &D); 1551 1552 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, 1553 llvm::Value *Address); 1554 1555 /// EmitAutoVarDecl - Emit an auto variable declaration. 1556 /// 1557 /// This function can be called with a null (unreachable) insert point. 1558 void EmitAutoVarDecl(const VarDecl &D); 1559 1560 class AutoVarEmission { 1561 friend class CodeGenFunction; 1562 1563 const VarDecl *Variable; 1564 1565 /// The alignment of the variable. 1566 CharUnits Alignment; 1567 1568 /// The address of the alloca. Null if the variable was emitted 1569 /// as a global constant. 1570 llvm::Value *Address; 1571 1572 llvm::Value *NRVOFlag; 1573 1574 /// True if the variable is a __block variable. 1575 bool IsByRef; 1576 1577 /// True if the variable is of aggregate type and has a constant 1578 /// initializer. 1579 bool IsConstantAggregate; 1580 1581 struct Invalid {}; 1582 AutoVarEmission(Invalid) : Variable(0) {} 1583 1584 AutoVarEmission(const VarDecl &variable) 1585 : Variable(&variable), Address(0), NRVOFlag(0), 1586 IsByRef(false), IsConstantAggregate(false) {} 1587 1588 bool wasEmittedAsGlobal() const { return Address == 0; } 1589 1590 public: 1591 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } 1592 1593 /// Returns the address of the object within this declaration. 1594 /// Note that this does not chase the forwarding pointer for 1595 /// __block decls. 1596 llvm::Value *getObjectAddress(CodeGenFunction &CGF) const { 1597 if (!IsByRef) return Address; 1598 1599 return CGF.Builder.CreateStructGEP(Address, 1600 CGF.getByRefValueLLVMField(Variable), 1601 Variable->getNameAsString()); 1602 } 1603 }; 1604 AutoVarEmission EmitAutoVarAlloca(const VarDecl &var); 1605 void EmitAutoVarInit(const AutoVarEmission &emission); 1606 void EmitAutoVarCleanups(const AutoVarEmission &emission); 1607 1608 void EmitStaticVarDecl(const VarDecl &D, 1609 llvm::GlobalValue::LinkageTypes Linkage); 1610 1611 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl. 1612 void EmitParmDecl(const VarDecl &D, llvm::Value *Arg); 1613 1614 /// protectFromPeepholes - Protect a value that we're intending to 1615 /// store to the side, but which will probably be used later, from 1616 /// aggressive peepholing optimizations that might delete it. 1617 /// 1618 /// Pass the result to unprotectFromPeepholes to declare that 1619 /// protection is no longer required. 1620 /// 1621 /// There's no particular reason why this shouldn't apply to 1622 /// l-values, it's just that no existing peepholes work on pointers. 1623 PeepholeProtection protectFromPeepholes(RValue rvalue); 1624 void unprotectFromPeepholes(PeepholeProtection protection); 1625 1626 //===--------------------------------------------------------------------===// 1627 // Statement Emission 1628 //===--------------------------------------------------------------------===// 1629 1630 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info. 1631 void EmitStopPoint(const Stmt *S); 1632 1633 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call 1634 /// this function even if there is no current insertion point. 1635 /// 1636 /// This function may clear the current insertion point; callers should use 1637 /// EnsureInsertPoint if they wish to subsequently generate code without first 1638 /// calling EmitBlock, EmitBranch, or EmitStmt. 1639 void EmitStmt(const Stmt *S); 1640 1641 /// EmitSimpleStmt - Try to emit a "simple" statement which does not 1642 /// necessarily require an insertion point or debug information; typically 1643 /// because the statement amounts to a jump or a container of other 1644 /// statements. 1645 /// 1646 /// \return True if the statement was handled. 1647 bool EmitSimpleStmt(const Stmt *S); 1648 1649 RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false, 1650 AggValueSlot AVS = AggValueSlot::ignored()); 1651 1652 /// EmitLabel - Emit the block for the given label. It is legal to call this 1653 /// function even if there is no current insertion point. 1654 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt. 1655 1656 void EmitLabelStmt(const LabelStmt &S); 1657 void EmitGotoStmt(const GotoStmt &S); 1658 void EmitIndirectGotoStmt(const IndirectGotoStmt &S); 1659 void EmitIfStmt(const IfStmt &S); 1660 void EmitWhileStmt(const WhileStmt &S); 1661 void EmitDoStmt(const DoStmt &S); 1662 void EmitForStmt(const ForStmt &S); 1663 void EmitReturnStmt(const ReturnStmt &S); 1664 void EmitDeclStmt(const DeclStmt &S); 1665 void EmitBreakStmt(const BreakStmt &S); 1666 void EmitContinueStmt(const ContinueStmt &S); 1667 void EmitSwitchStmt(const SwitchStmt &S); 1668 void EmitDefaultStmt(const DefaultStmt &S); 1669 void EmitCaseStmt(const CaseStmt &S); 1670 void EmitCaseStmtRange(const CaseStmt &S); 1671 void EmitAsmStmt(const AsmStmt &S); 1672 1673 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S); 1674 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S); 1675 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S); 1676 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S); 1677 1678 llvm::Constant *getUnwindResumeOrRethrowFn(); 1679 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); 1680 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); 1681 1682 void EmitCXXTryStmt(const CXXTryStmt &S); 1683 1684 //===--------------------------------------------------------------------===// 1685 // LValue Expression Emission 1686 //===--------------------------------------------------------------------===// 1687 1688 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type. 1689 RValue GetUndefRValue(QualType Ty); 1690 1691 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E 1692 /// and issue an ErrorUnsupported style diagnostic (using the 1693 /// provided Name). 1694 RValue EmitUnsupportedRValue(const Expr *E, 1695 const char *Name); 1696 1697 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue 1698 /// an ErrorUnsupported style diagnostic (using the provided Name). 1699 LValue EmitUnsupportedLValue(const Expr *E, 1700 const char *Name); 1701 1702 /// EmitLValue - Emit code to compute a designator that specifies the location 1703 /// of the expression. 1704 /// 1705 /// This can return one of two things: a simple address or a bitfield 1706 /// reference. In either case, the LLVM Value* in the LValue structure is 1707 /// guaranteed to be an LLVM pointer type. 1708 /// 1709 /// If this returns a bitfield reference, nothing about the pointee type of 1710 /// the LLVM value is known: For example, it may not be a pointer to an 1711 /// integer. 1712 /// 1713 /// If this returns a normal address, and if the lvalue's C type is fixed 1714 /// size, this method guarantees that the returned pointer type will point to 1715 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a 1716 /// variable length type, this is not possible. 1717 /// 1718 LValue EmitLValue(const Expr *E); 1719 1720 /// EmitCheckedLValue - Same as EmitLValue but additionally we generate 1721 /// checking code to guard against undefined behavior. This is only 1722 /// suitable when we know that the address will be used to access the 1723 /// object. 1724 LValue EmitCheckedLValue(const Expr *E); 1725 1726 /// EmitToMemory - Change a scalar value from its value 1727 /// representation to its in-memory representation. 1728 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty); 1729 1730 /// EmitFromMemory - Change a scalar value from its memory 1731 /// representation to its value representation. 1732 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty); 1733 1734 /// EmitLoadOfScalar - Load a scalar value from an address, taking 1735 /// care to appropriately convert from the memory representation to 1736 /// the LLVM value representation. 1737 llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 1738 unsigned Alignment, QualType Ty, 1739 llvm::MDNode *TBAAInfo = 0); 1740 1741 /// EmitStoreOfScalar - Store a scalar value to an address, taking 1742 /// care to appropriately convert from the memory representation to 1743 /// the LLVM value representation. 1744 void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 1745 bool Volatile, unsigned Alignment, QualType Ty, 1746 llvm::MDNode *TBAAInfo = 0); 1747 1748 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, 1749 /// this method emits the address of the lvalue, then loads the result as an 1750 /// rvalue, returning the rvalue. 1751 RValue EmitLoadOfLValue(LValue V, QualType LVType); 1752 RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType); 1753 RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType); 1754 RValue EmitLoadOfPropertyRefLValue(LValue LV, 1755 ReturnValueSlot Return = ReturnValueSlot()); 1756 1757 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 1758 /// lvalue, where both are guaranteed to the have the same type, and that type 1759 /// is 'Ty'. 1760 void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty); 1761 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst, 1762 QualType Ty); 1763 void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst); 1764 1765 /// EmitStoreThroughLValue - Store Src into Dst with same constraints as 1766 /// EmitStoreThroughLValue. 1767 /// 1768 /// \param Result [out] - If non-null, this will be set to a Value* for the 1769 /// bit-field contents after the store, appropriate for use as the result of 1770 /// an assignment to the bit-field. 1771 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty, 1772 llvm::Value **Result=0); 1773 1774 /// Emit an l-value for an assignment (simple or compound) of complex type. 1775 LValue EmitComplexAssignmentLValue(const BinaryOperator *E); 1776 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); 1777 1778 // Note: only availabe for agg return types 1779 LValue EmitBinaryOperatorLValue(const BinaryOperator *E); 1780 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E); 1781 // Note: only available for agg return types 1782 LValue EmitCallExprLValue(const CallExpr *E); 1783 // Note: only available for agg return types 1784 LValue EmitVAArgExprLValue(const VAArgExpr *E); 1785 LValue EmitDeclRefLValue(const DeclRefExpr *E); 1786 LValue EmitStringLiteralLValue(const StringLiteral *E); 1787 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E); 1788 LValue EmitPredefinedLValue(const PredefinedExpr *E); 1789 LValue EmitUnaryOpLValue(const UnaryOperator *E); 1790 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E); 1791 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E); 1792 LValue EmitMemberExpr(const MemberExpr *E); 1793 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E); 1794 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E); 1795 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E); 1796 LValue EmitCastLValue(const CastExpr *E); 1797 LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E); 1798 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e); 1799 1800 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1801 const ObjCIvarDecl *Ivar); 1802 LValue EmitLValueForAnonRecordField(llvm::Value* Base, 1803 const IndirectFieldDecl* Field, 1804 unsigned CVRQualifiers); 1805 LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field, 1806 unsigned CVRQualifiers); 1807 1808 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that 1809 /// if the Field is a reference, this will return the address of the reference 1810 /// and not the address of the value stored in the reference. 1811 LValue EmitLValueForFieldInitialization(llvm::Value* Base, 1812 const FieldDecl* Field, 1813 unsigned CVRQualifiers); 1814 1815 LValue EmitLValueForIvar(QualType ObjectTy, 1816 llvm::Value* Base, const ObjCIvarDecl *Ivar, 1817 unsigned CVRQualifiers); 1818 1819 LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field, 1820 unsigned CVRQualifiers); 1821 1822 LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E); 1823 1824 LValue EmitCXXConstructLValue(const CXXConstructExpr *E); 1825 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E); 1826 LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E); 1827 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E); 1828 1829 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E); 1830 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E); 1831 LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E); 1832 LValue EmitStmtExprLValue(const StmtExpr *E); 1833 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E); 1834 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E); 1835 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init); 1836 1837 //===--------------------------------------------------------------------===// 1838 // Scalar Expression Emission 1839 //===--------------------------------------------------------------------===// 1840 1841 /// EmitCall - Generate a call of the given function, expecting the given 1842 /// result type, and using the given argument list which specifies both the 1843 /// LLVM arguments and the types they were derived from. 1844 /// 1845 /// \param TargetDecl - If given, the decl of the function in a direct call; 1846 /// used to set attributes on the call (noreturn, etc.). 1847 RValue EmitCall(const CGFunctionInfo &FnInfo, 1848 llvm::Value *Callee, 1849 ReturnValueSlot ReturnValue, 1850 const CallArgList &Args, 1851 const Decl *TargetDecl = 0, 1852 llvm::Instruction **callOrInvoke = 0); 1853 1854 RValue EmitCall(QualType FnType, llvm::Value *Callee, 1855 ReturnValueSlot ReturnValue, 1856 CallExpr::const_arg_iterator ArgBeg, 1857 CallExpr::const_arg_iterator ArgEnd, 1858 const Decl *TargetDecl = 0); 1859 RValue EmitCallExpr(const CallExpr *E, 1860 ReturnValueSlot ReturnValue = ReturnValueSlot()); 1861 1862 llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, 1863 llvm::Value * const *ArgBegin, 1864 llvm::Value * const *ArgEnd, 1865 const llvm::Twine &Name = ""); 1866 1867 llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This, 1868 const llvm::Type *Ty); 1869 llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type, 1870 llvm::Value *This, const llvm::Type *Ty); 1871 llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD, 1872 NestedNameSpecifier *Qual, 1873 const llvm::Type *Ty); 1874 1875 llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD, 1876 CXXDtorType Type, 1877 const CXXRecordDecl *RD); 1878 1879 RValue EmitCXXMemberCall(const CXXMethodDecl *MD, 1880 llvm::Value *Callee, 1881 ReturnValueSlot ReturnValue, 1882 llvm::Value *This, 1883 llvm::Value *VTT, 1884 CallExpr::const_arg_iterator ArgBeg, 1885 CallExpr::const_arg_iterator ArgEnd); 1886 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, 1887 ReturnValueSlot ReturnValue); 1888 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 1889 ReturnValueSlot ReturnValue); 1890 1891 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 1892 const CXXMethodDecl *MD, 1893 ReturnValueSlot ReturnValue); 1894 1895 1896 RValue EmitBuiltinExpr(const FunctionDecl *FD, 1897 unsigned BuiltinID, const CallExpr *E); 1898 1899 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue); 1900 1901 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call 1902 /// is unhandled by the current target. 1903 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E); 1904 1905 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E); 1906 llvm::Value *EmitNeonCall(llvm::Function *F, 1907 llvm::SmallVectorImpl<llvm::Value*> &O, 1908 const char *name, 1909 unsigned shift = 0, bool rightshift = false); 1910 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx); 1911 llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty, 1912 bool negateForRightShift); 1913 1914 llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops); 1915 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); 1916 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E); 1917 1918 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E); 1919 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E); 1920 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E); 1921 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, 1922 ReturnValueSlot Return = ReturnValueSlot()); 1923 1924 /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in 1925 /// expression. Will emit a temporary variable if E is not an LValue. 1926 RValue EmitReferenceBindingToExpr(const Expr* E, 1927 const NamedDecl *InitializedDecl); 1928 1929 //===--------------------------------------------------------------------===// 1930 // Expression Emission 1931 //===--------------------------------------------------------------------===// 1932 1933 // Expressions are broken into three classes: scalar, complex, aggregate. 1934 1935 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM 1936 /// scalar type, returning the result. 1937 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false); 1938 1939 /// EmitScalarConversion - Emit a conversion from the specified type to the 1940 /// specified destination type, both of which are LLVM scalar types. 1941 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy, 1942 QualType DstTy); 1943 1944 /// EmitComplexToScalarConversion - Emit a conversion from the specified 1945 /// complex type to the specified destination type, where the destination type 1946 /// is an LLVM scalar type. 1947 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, 1948 QualType DstTy); 1949 1950 1951 /// EmitAggExpr - Emit the computation of the specified expression 1952 /// of aggregate type. The result is computed into the given slot, 1953 /// which may be null to indicate that the value is not needed. 1954 void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false); 1955 1956 /// EmitAggExprToLValue - Emit the computation of the specified expression of 1957 /// aggregate type into a temporary LValue. 1958 LValue EmitAggExprToLValue(const Expr *E); 1959 1960 /// EmitGCMemmoveCollectable - Emit special API for structs with object 1961 /// pointers. 1962 void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr, 1963 QualType Ty); 1964 1965 /// EmitComplexExpr - Emit the computation of the specified expression of 1966 /// complex type, returning the result. 1967 ComplexPairTy EmitComplexExpr(const Expr *E, 1968 bool IgnoreReal = false, 1969 bool IgnoreImag = false); 1970 1971 /// EmitComplexExprIntoAddr - Emit the computation of the specified expression 1972 /// of complex type, storing into the specified Value*. 1973 void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr, 1974 bool DestIsVolatile); 1975 1976 /// StoreComplexToAddr - Store a complex number into the specified address. 1977 void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr, 1978 bool DestIsVolatile); 1979 /// LoadComplexFromAddr - Load a complex number from the specified address. 1980 ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile); 1981 1982 /// CreateStaticVarDecl - Create a zero-initialized LLVM global for 1983 /// a static local variable. 1984 llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D, 1985 const char *Separator, 1986 llvm::GlobalValue::LinkageTypes Linkage); 1987 1988 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the 1989 /// global variable that has already been created for it. If the initializer 1990 /// has a different type than GV does, this may free GV and return a different 1991 /// one. Otherwise it just returns GV. 1992 llvm::GlobalVariable * 1993 AddInitializerToStaticVarDecl(const VarDecl &D, 1994 llvm::GlobalVariable *GV); 1995 1996 1997 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++ 1998 /// variable with global storage. 1999 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr); 2000 2001 /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr 2002 /// with the C++ runtime so that its destructor will be called at exit. 2003 void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn, 2004 llvm::Constant *DeclPtr); 2005 2006 /// Emit code in this function to perform a guarded variable 2007 /// initialization. Guarded initializations are used when it's not 2008 /// possible to prove that an initialization will be done exactly 2009 /// once, e.g. with a static local variable or a static data member 2010 /// of a class template. 2011 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr); 2012 2013 /// GenerateCXXGlobalInitFunc - Generates code for initializing global 2014 /// variables. 2015 void GenerateCXXGlobalInitFunc(llvm::Function *Fn, 2016 llvm::Constant **Decls, 2017 unsigned NumDecls); 2018 2019 /// GenerateCXXGlobalDtorFunc - Generates code for destroying global 2020 /// variables. 2021 void GenerateCXXGlobalDtorFunc(llvm::Function *Fn, 2022 const std::vector<std::pair<llvm::WeakVH, 2023 llvm::Constant*> > &DtorsAndObjects); 2024 2025 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D, 2026 llvm::GlobalVariable *Addr); 2027 2028 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest); 2029 2030 void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src, 2031 const Expr *Exp); 2032 2033 RValue EmitExprWithCleanups(const ExprWithCleanups *E, 2034 AggValueSlot Slot =AggValueSlot::ignored()); 2035 2036 void EmitCXXThrowExpr(const CXXThrowExpr *E); 2037 2038 //===--------------------------------------------------------------------===// 2039 // Internal Helpers 2040 //===--------------------------------------------------------------------===// 2041 2042 /// ContainsLabel - Return true if the statement contains a label in it. If 2043 /// this statement is not executed normally, it not containing a label means 2044 /// that we can just remove the code. 2045 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false); 2046 2047 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 2048 /// to a constant, or if it does but contains a label, return 0. If it 2049 /// constant folds to 'true' and does not contain a label, return 1, if it 2050 /// constant folds to 'false' and does not contain a label, return -1. 2051 int ConstantFoldsToSimpleInteger(const Expr *Cond); 2052 2053 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an 2054 /// if statement) to the specified blocks. Based on the condition, this might 2055 /// try to simplify the codegen of the conditional based on the branch. 2056 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, 2057 llvm::BasicBlock *FalseBlock); 2058 2059 /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll 2060 /// generate a branch around the created basic block as necessary. 2061 llvm::BasicBlock *getTrapBB(); 2062 2063 /// EmitCallArg - Emit a single call argument. 2064 RValue EmitCallArg(const Expr *E, QualType ArgType); 2065 2066 /// EmitDelegateCallArg - We are performing a delegate call; that 2067 /// is, the current function is delegating to another one. Produce 2068 /// a r-value suitable for passing the given parameter. 2069 RValue EmitDelegateCallArg(const VarDecl *Param); 2070 2071private: 2072 void EmitReturnOfRValue(RValue RV, QualType Ty); 2073 2074 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty 2075 /// from function arguments into \arg Dst. See ABIArgInfo::Expand. 2076 /// 2077 /// \param AI - The first function argument of the expansion. 2078 /// \return The argument following the last expanded function 2079 /// argument. 2080 llvm::Function::arg_iterator 2081 ExpandTypeFromArgs(QualType Ty, LValue Dst, 2082 llvm::Function::arg_iterator AI); 2083 2084 /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg 2085 /// Ty, into individual arguments on the provided vector \arg Args. See 2086 /// ABIArgInfo::Expand. 2087 void ExpandTypeToArgs(QualType Ty, RValue Src, 2088 llvm::SmallVector<llvm::Value*, 16> &Args); 2089 2090 llvm::Value* EmitAsmInput(const AsmStmt &S, 2091 const TargetInfo::ConstraintInfo &Info, 2092 const Expr *InputExpr, std::string &ConstraintStr); 2093 2094 llvm::Value* EmitAsmInputLValue(const AsmStmt &S, 2095 const TargetInfo::ConstraintInfo &Info, 2096 LValue InputValue, QualType InputType, 2097 std::string &ConstraintStr); 2098 2099 /// EmitCallArgs - Emit call arguments for a function. 2100 /// The CallArgTypeInfo parameter is used for iterating over the known 2101 /// argument types of the function being called. 2102 template<typename T> 2103 void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo, 2104 CallExpr::const_arg_iterator ArgBeg, 2105 CallExpr::const_arg_iterator ArgEnd) { 2106 CallExpr::const_arg_iterator Arg = ArgBeg; 2107 2108 // First, use the argument types that the type info knows about 2109 if (CallArgTypeInfo) { 2110 for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(), 2111 E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) { 2112 assert(Arg != ArgEnd && "Running over edge of argument list!"); 2113 QualType ArgType = *I; 2114#ifndef NDEBUG 2115 QualType ActualArgType = Arg->getType(); 2116 if (ArgType->isPointerType() && ActualArgType->isPointerType()) { 2117 QualType ActualBaseType = 2118 ActualArgType->getAs<PointerType>()->getPointeeType(); 2119 QualType ArgBaseType = 2120 ArgType->getAs<PointerType>()->getPointeeType(); 2121 if (ArgBaseType->isVariableArrayType()) { 2122 if (const VariableArrayType *VAT = 2123 getContext().getAsVariableArrayType(ActualBaseType)) { 2124 if (!VAT->getSizeExpr()) 2125 ActualArgType = ArgType; 2126 } 2127 } 2128 } 2129 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()). 2130 getTypePtr() == 2131 getContext().getCanonicalType(ActualArgType).getTypePtr() && 2132 "type mismatch in call argument!"); 2133#endif 2134 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType), 2135 ArgType)); 2136 } 2137 2138 // Either we've emitted all the call args, or we have a call to a 2139 // variadic function. 2140 assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) && 2141 "Extra arguments in non-variadic function!"); 2142 2143 } 2144 2145 // If we still have any arguments, emit them using the type of the argument. 2146 for (; Arg != ArgEnd; ++Arg) { 2147 QualType ArgType = Arg->getType(); 2148 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType), 2149 ArgType)); 2150 } 2151 } 2152 2153 const TargetCodeGenInfo &getTargetHooks() const { 2154 return CGM.getTargetCodeGenInfo(); 2155 } 2156 2157 void EmitDeclMetadata(); 2158}; 2159 2160/// Helper class with most of the code for saving a value for a 2161/// conditional expression cleanup. 2162struct DominatingLLVMValue { 2163 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type; 2164 2165 /// Answer whether the given value needs extra work to be saved. 2166 static bool needsSaving(llvm::Value *value) { 2167 // If it's not an instruction, we don't need to save. 2168 if (!isa<llvm::Instruction>(value)) return false; 2169 2170 // If it's an instruction in the entry block, we don't need to save. 2171 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent(); 2172 return (block != &block->getParent()->getEntryBlock()); 2173 } 2174 2175 /// Try to save the given value. 2176 static saved_type save(CodeGenFunction &CGF, llvm::Value *value) { 2177 if (!needsSaving(value)) return saved_type(value, false); 2178 2179 // Otherwise we need an alloca. 2180 llvm::Value *alloca = 2181 CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save"); 2182 CGF.Builder.CreateStore(value, alloca); 2183 2184 return saved_type(alloca, true); 2185 } 2186 2187 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) { 2188 if (!value.getInt()) return value.getPointer(); 2189 return CGF.Builder.CreateLoad(value.getPointer()); 2190 } 2191}; 2192 2193/// A partial specialization of DominatingValue for llvm::Values that 2194/// might be llvm::Instructions. 2195template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue { 2196 typedef T *type; 2197 static type restore(CodeGenFunction &CGF, saved_type value) { 2198 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value)); 2199 } 2200}; 2201 2202/// A specialization of DominatingValue for RValue. 2203template <> struct DominatingValue<RValue> { 2204 typedef RValue type; 2205 class saved_type { 2206 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral, 2207 AggregateAddress, ComplexAddress }; 2208 2209 llvm::Value *Value; 2210 Kind K; 2211 saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {} 2212 2213 public: 2214 static bool needsSaving(RValue value); 2215 static saved_type save(CodeGenFunction &CGF, RValue value); 2216 RValue restore(CodeGenFunction &CGF); 2217 2218 // implementations in CGExprCXX.cpp 2219 }; 2220 2221 static bool needsSaving(type value) { 2222 return saved_type::needsSaving(value); 2223 } 2224 static saved_type save(CodeGenFunction &CGF, type value) { 2225 return saved_type::save(CGF, value); 2226 } 2227 static type restore(CodeGenFunction &CGF, saved_type value) { 2228 return value.restore(CGF); 2229 } 2230}; 2231 2232} // end namespace CodeGen 2233} // end namespace clang 2234 2235#endif 2236