1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the internal per-function state used for llvm translation.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15#define CLANG_CODEGEN_CODEGENFUNCTION_H
16
17#include "clang/AST/Type.h"
18#include "clang/AST/ExprCXX.h"
19#include "clang/AST/ExprObjC.h"
20#include "clang/AST/CharUnits.h"
21#include "clang/Frontend/CodeGenOptions.h"
22#include "clang/Basic/ABI.h"
23#include "clang/Basic/TargetInfo.h"
24#include "llvm/ADT/ArrayRef.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/Support/ValueHandle.h"
28#include "llvm/Support/Debug.h"
29#include "CodeGenModule.h"
30#include "CGBuilder.h"
31#include "CGDebugInfo.h"
32#include "CGValue.h"
33
34namespace llvm {
35  class BasicBlock;
36  class LLVMContext;
37  class MDNode;
38  class Module;
39  class SwitchInst;
40  class Twine;
41  class Value;
42  class CallSite;
43}
44
45namespace clang {
46  class ASTContext;
47  class BlockDecl;
48  class CXXDestructorDecl;
49  class CXXForRangeStmt;
50  class CXXTryStmt;
51  class Decl;
52  class LabelDecl;
53  class EnumConstantDecl;
54  class FunctionDecl;
55  class FunctionProtoType;
56  class LabelStmt;
57  class ObjCContainerDecl;
58  class ObjCInterfaceDecl;
59  class ObjCIvarDecl;
60  class ObjCMethodDecl;
61  class ObjCImplementationDecl;
62  class ObjCPropertyImplDecl;
63  class TargetInfo;
64  class TargetCodeGenInfo;
65  class VarDecl;
66  class ObjCForCollectionStmt;
67  class ObjCAtTryStmt;
68  class ObjCAtThrowStmt;
69  class ObjCAtSynchronizedStmt;
70  class ObjCAutoreleasePoolStmt;
71
72namespace CodeGen {
73  class CodeGenTypes;
74  class CGFunctionInfo;
75  class CGRecordLayout;
76  class CGBlockInfo;
77  class CGCXXABI;
78  class BlockFlags;
79  class BlockFieldFlags;
80
81/// A branch fixup.  These are required when emitting a goto to a
82/// label which hasn't been emitted yet.  The goto is optimistically
83/// emitted as a branch to the basic block for the label, and (if it
84/// occurs in a scope with non-trivial cleanups) a fixup is added to
85/// the innermost cleanup.  When a (normal) cleanup is popped, any
86/// unresolved fixups in that scope are threaded through the cleanup.
87struct BranchFixup {
88  /// The block containing the terminator which needs to be modified
89  /// into a switch if this fixup is resolved into the current scope.
90  /// If null, LatestBranch points directly to the destination.
91  llvm::BasicBlock *OptimisticBranchBlock;
92
93  /// The ultimate destination of the branch.
94  ///
95  /// This can be set to null to indicate that this fixup was
96  /// successfully resolved.
97  llvm::BasicBlock *Destination;
98
99  /// The destination index value.
100  unsigned DestinationIndex;
101
102  /// The initial branch of the fixup.
103  llvm::BranchInst *InitialBranch;
104};
105
106template <class T> struct InvariantValue {
107  typedef T type;
108  typedef T saved_type;
109  static bool needsSaving(type value) { return false; }
110  static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111  static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112};
113
114/// A metaprogramming class for ensuring that a value will dominate an
115/// arbitrary position in a function.
116template <class T> struct DominatingValue : InvariantValue<T> {};
117
118template <class T, bool mightBeInstruction =
119            llvm::is_base_of<llvm::Value, T>::value &&
120            !llvm::is_base_of<llvm::Constant, T>::value &&
121            !llvm::is_base_of<llvm::BasicBlock, T>::value>
122struct DominatingPointer;
123template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124// template <class T> struct DominatingPointer<T,true> at end of file
125
126template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127
128enum CleanupKind {
129  EHCleanup = 0x1,
130  NormalCleanup = 0x2,
131  NormalAndEHCleanup = EHCleanup | NormalCleanup,
132
133  InactiveCleanup = 0x4,
134  InactiveEHCleanup = EHCleanup | InactiveCleanup,
135  InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136  InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137};
138
139/// A stack of scopes which respond to exceptions, including cleanups
140/// and catch blocks.
141class EHScopeStack {
142public:
143  /// A saved depth on the scope stack.  This is necessary because
144  /// pushing scopes onto the stack invalidates iterators.
145  class stable_iterator {
146    friend class EHScopeStack;
147
148    /// Offset from StartOfData to EndOfBuffer.
149    ptrdiff_t Size;
150
151    stable_iterator(ptrdiff_t Size) : Size(Size) {}
152
153  public:
154    static stable_iterator invalid() { return stable_iterator(-1); }
155    stable_iterator() : Size(-1) {}
156
157    bool isValid() const { return Size >= 0; }
158
159    /// Returns true if this scope encloses I.
160    /// Returns false if I is invalid.
161    /// This scope must be valid.
162    bool encloses(stable_iterator I) const { return Size <= I.Size; }
163
164    /// Returns true if this scope strictly encloses I: that is,
165    /// if it encloses I and is not I.
166    /// Returns false is I is invalid.
167    /// This scope must be valid.
168    bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169
170    friend bool operator==(stable_iterator A, stable_iterator B) {
171      return A.Size == B.Size;
172    }
173    friend bool operator!=(stable_iterator A, stable_iterator B) {
174      return A.Size != B.Size;
175    }
176  };
177
178  /// Information for lazily generating a cleanup.  Subclasses must be
179  /// POD-like: cleanups will not be destructed, and they will be
180  /// allocated on the cleanup stack and freely copied and moved
181  /// around.
182  ///
183  /// Cleanup implementations should generally be declared in an
184  /// anonymous namespace.
185  class Cleanup {
186    // Anchor the construction vtable.
187    virtual void anchor();
188  public:
189    /// Generation flags.
190    class Flags {
191      enum {
192        F_IsForEH             = 0x1,
193        F_IsNormalCleanupKind = 0x2,
194        F_IsEHCleanupKind     = 0x4
195      };
196      unsigned flags;
197
198    public:
199      Flags() : flags(0) {}
200
201      /// isForEH - true if the current emission is for an EH cleanup.
202      bool isForEHCleanup() const { return flags & F_IsForEH; }
203      bool isForNormalCleanup() const { return !isForEHCleanup(); }
204      void setIsForEHCleanup() { flags |= F_IsForEH; }
205
206      bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207      void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208
209      /// isEHCleanupKind - true if the cleanup was pushed as an EH
210      /// cleanup.
211      bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212      void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213    };
214
215    // Provide a virtual destructor to suppress a very common warning
216    // that unfortunately cannot be suppressed without this.  Cleanups
217    // should not rely on this destructor ever being called.
218    virtual ~Cleanup() {}
219
220    /// Emit the cleanup.  For normal cleanups, this is run in the
221    /// same EH context as when the cleanup was pushed, i.e. the
222    /// immediately-enclosing context of the cleanup scope.  For
223    /// EH cleanups, this is run in a terminate context.
224    ///
225    // \param IsForEHCleanup true if this is for an EH cleanup, false
226    ///  if for a normal cleanup.
227    virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
228  };
229
230  /// ConditionalCleanupN stores the saved form of its N parameters,
231  /// then restores them and performs the cleanup.
232  template <class T, class A0>
233  class ConditionalCleanup1 : public Cleanup {
234    typedef typename DominatingValue<A0>::saved_type A0_saved;
235    A0_saved a0_saved;
236
237    void Emit(CodeGenFunction &CGF, Flags flags) {
238      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
239      T(a0).Emit(CGF, flags);
240    }
241
242  public:
243    ConditionalCleanup1(A0_saved a0)
244      : a0_saved(a0) {}
245  };
246
247  template <class T, class A0, class A1>
248  class ConditionalCleanup2 : public Cleanup {
249    typedef typename DominatingValue<A0>::saved_type A0_saved;
250    typedef typename DominatingValue<A1>::saved_type A1_saved;
251    A0_saved a0_saved;
252    A1_saved a1_saved;
253
254    void Emit(CodeGenFunction &CGF, Flags flags) {
255      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
256      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
257      T(a0, a1).Emit(CGF, flags);
258    }
259
260  public:
261    ConditionalCleanup2(A0_saved a0, A1_saved a1)
262      : a0_saved(a0), a1_saved(a1) {}
263  };
264
265  template <class T, class A0, class A1, class A2>
266  class ConditionalCleanup3 : public Cleanup {
267    typedef typename DominatingValue<A0>::saved_type A0_saved;
268    typedef typename DominatingValue<A1>::saved_type A1_saved;
269    typedef typename DominatingValue<A2>::saved_type A2_saved;
270    A0_saved a0_saved;
271    A1_saved a1_saved;
272    A2_saved a2_saved;
273
274    void Emit(CodeGenFunction &CGF, Flags flags) {
275      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
276      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
277      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
278      T(a0, a1, a2).Emit(CGF, flags);
279    }
280
281  public:
282    ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
283      : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
284  };
285
286  template <class T, class A0, class A1, class A2, class A3>
287  class ConditionalCleanup4 : public Cleanup {
288    typedef typename DominatingValue<A0>::saved_type A0_saved;
289    typedef typename DominatingValue<A1>::saved_type A1_saved;
290    typedef typename DominatingValue<A2>::saved_type A2_saved;
291    typedef typename DominatingValue<A3>::saved_type A3_saved;
292    A0_saved a0_saved;
293    A1_saved a1_saved;
294    A2_saved a2_saved;
295    A3_saved a3_saved;
296
297    void Emit(CodeGenFunction &CGF, Flags flags) {
298      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
299      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
300      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
301      A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
302      T(a0, a1, a2, a3).Emit(CGF, flags);
303    }
304
305  public:
306    ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
307      : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
308  };
309
310private:
311  // The implementation for this class is in CGException.h and
312  // CGException.cpp; the definition is here because it's used as a
313  // member of CodeGenFunction.
314
315  /// The start of the scope-stack buffer, i.e. the allocated pointer
316  /// for the buffer.  All of these pointers are either simultaneously
317  /// null or simultaneously valid.
318  char *StartOfBuffer;
319
320  /// The end of the buffer.
321  char *EndOfBuffer;
322
323  /// The first valid entry in the buffer.
324  char *StartOfData;
325
326  /// The innermost normal cleanup on the stack.
327  stable_iterator InnermostNormalCleanup;
328
329  /// The innermost EH scope on the stack.
330  stable_iterator InnermostEHScope;
331
332  /// The current set of branch fixups.  A branch fixup is a jump to
333  /// an as-yet unemitted label, i.e. a label for which we don't yet
334  /// know the EH stack depth.  Whenever we pop a cleanup, we have
335  /// to thread all the current branch fixups through it.
336  ///
337  /// Fixups are recorded as the Use of the respective branch or
338  /// switch statement.  The use points to the final destination.
339  /// When popping out of a cleanup, these uses are threaded through
340  /// the cleanup and adjusted to point to the new cleanup.
341  ///
342  /// Note that branches are allowed to jump into protected scopes
343  /// in certain situations;  e.g. the following code is legal:
344  ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
345  ///     goto foo;
346  ///     A a;
347  ///    foo:
348  ///     bar();
349  SmallVector<BranchFixup, 8> BranchFixups;
350
351  char *allocate(size_t Size);
352
353  void *pushCleanup(CleanupKind K, size_t DataSize);
354
355public:
356  EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
357                   InnermostNormalCleanup(stable_end()),
358                   InnermostEHScope(stable_end()) {}
359  ~EHScopeStack() { delete[] StartOfBuffer; }
360
361  // Variadic templates would make this not terrible.
362
363  /// Push a lazily-created cleanup on the stack.
364  template <class T>
365  void pushCleanup(CleanupKind Kind) {
366    void *Buffer = pushCleanup(Kind, sizeof(T));
367    Cleanup *Obj = new(Buffer) T();
368    (void) Obj;
369  }
370
371  /// Push a lazily-created cleanup on the stack.
372  template <class T, class A0>
373  void pushCleanup(CleanupKind Kind, A0 a0) {
374    void *Buffer = pushCleanup(Kind, sizeof(T));
375    Cleanup *Obj = new(Buffer) T(a0);
376    (void) Obj;
377  }
378
379  /// Push a lazily-created cleanup on the stack.
380  template <class T, class A0, class A1>
381  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
382    void *Buffer = pushCleanup(Kind, sizeof(T));
383    Cleanup *Obj = new(Buffer) T(a0, a1);
384    (void) Obj;
385  }
386
387  /// Push a lazily-created cleanup on the stack.
388  template <class T, class A0, class A1, class A2>
389  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
390    void *Buffer = pushCleanup(Kind, sizeof(T));
391    Cleanup *Obj = new(Buffer) T(a0, a1, a2);
392    (void) Obj;
393  }
394
395  /// Push a lazily-created cleanup on the stack.
396  template <class T, class A0, class A1, class A2, class A3>
397  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
398    void *Buffer = pushCleanup(Kind, sizeof(T));
399    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
400    (void) Obj;
401  }
402
403  /// Push a lazily-created cleanup on the stack.
404  template <class T, class A0, class A1, class A2, class A3, class A4>
405  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
406    void *Buffer = pushCleanup(Kind, sizeof(T));
407    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
408    (void) Obj;
409  }
410
411  // Feel free to add more variants of the following:
412
413  /// Push a cleanup with non-constant storage requirements on the
414  /// stack.  The cleanup type must provide an additional static method:
415  ///   static size_t getExtraSize(size_t);
416  /// The argument to this method will be the value N, which will also
417  /// be passed as the first argument to the constructor.
418  ///
419  /// The data stored in the extra storage must obey the same
420  /// restrictions as normal cleanup member data.
421  ///
422  /// The pointer returned from this method is valid until the cleanup
423  /// stack is modified.
424  template <class T, class A0, class A1, class A2>
425  T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
426    void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
427    return new (Buffer) T(N, a0, a1, a2);
428  }
429
430  /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
431  void popCleanup();
432
433  /// Push a set of catch handlers on the stack.  The catch is
434  /// uninitialized and will need to have the given number of handlers
435  /// set on it.
436  class EHCatchScope *pushCatch(unsigned NumHandlers);
437
438  /// Pops a catch scope off the stack.  This is private to CGException.cpp.
439  void popCatch();
440
441  /// Push an exceptions filter on the stack.
442  class EHFilterScope *pushFilter(unsigned NumFilters);
443
444  /// Pops an exceptions filter off the stack.
445  void popFilter();
446
447  /// Push a terminate handler on the stack.
448  void pushTerminate();
449
450  /// Pops a terminate handler off the stack.
451  void popTerminate();
452
453  /// Determines whether the exception-scopes stack is empty.
454  bool empty() const { return StartOfData == EndOfBuffer; }
455
456  bool requiresLandingPad() const {
457    return InnermostEHScope != stable_end();
458  }
459
460  /// Determines whether there are any normal cleanups on the stack.
461  bool hasNormalCleanups() const {
462    return InnermostNormalCleanup != stable_end();
463  }
464
465  /// Returns the innermost normal cleanup on the stack, or
466  /// stable_end() if there are no normal cleanups.
467  stable_iterator getInnermostNormalCleanup() const {
468    return InnermostNormalCleanup;
469  }
470  stable_iterator getInnermostActiveNormalCleanup() const;
471
472  stable_iterator getInnermostEHScope() const {
473    return InnermostEHScope;
474  }
475
476  stable_iterator getInnermostActiveEHScope() const;
477
478  /// An unstable reference to a scope-stack depth.  Invalidated by
479  /// pushes but not pops.
480  class iterator;
481
482  /// Returns an iterator pointing to the innermost EH scope.
483  iterator begin() const;
484
485  /// Returns an iterator pointing to the outermost EH scope.
486  iterator end() const;
487
488  /// Create a stable reference to the top of the EH stack.  The
489  /// returned reference is valid until that scope is popped off the
490  /// stack.
491  stable_iterator stable_begin() const {
492    return stable_iterator(EndOfBuffer - StartOfData);
493  }
494
495  /// Create a stable reference to the bottom of the EH stack.
496  static stable_iterator stable_end() {
497    return stable_iterator(0);
498  }
499
500  /// Translates an iterator into a stable_iterator.
501  stable_iterator stabilize(iterator it) const;
502
503  /// Turn a stable reference to a scope depth into a unstable pointer
504  /// to the EH stack.
505  iterator find(stable_iterator save) const;
506
507  /// Removes the cleanup pointed to by the given stable_iterator.
508  void removeCleanup(stable_iterator save);
509
510  /// Add a branch fixup to the current cleanup scope.
511  BranchFixup &addBranchFixup() {
512    assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
513    BranchFixups.push_back(BranchFixup());
514    return BranchFixups.back();
515  }
516
517  unsigned getNumBranchFixups() const { return BranchFixups.size(); }
518  BranchFixup &getBranchFixup(unsigned I) {
519    assert(I < getNumBranchFixups());
520    return BranchFixups[I];
521  }
522
523  /// Pops lazily-removed fixups from the end of the list.  This
524  /// should only be called by procedures which have just popped a
525  /// cleanup or resolved one or more fixups.
526  void popNullFixups();
527
528  /// Clears the branch-fixups list.  This should only be called by
529  /// ResolveAllBranchFixups.
530  void clearFixups() { BranchFixups.clear(); }
531};
532
533/// CodeGenFunction - This class organizes the per-function state that is used
534/// while generating LLVM code.
535class CodeGenFunction : public CodeGenTypeCache {
536  CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
537  void operator=(const CodeGenFunction&);  // DO NOT IMPLEMENT
538
539  friend class CGCXXABI;
540public:
541  /// A jump destination is an abstract label, branching to which may
542  /// require a jump out through normal cleanups.
543  struct JumpDest {
544    JumpDest() : Block(0), ScopeDepth(), Index(0) {}
545    JumpDest(llvm::BasicBlock *Block,
546             EHScopeStack::stable_iterator Depth,
547             unsigned Index)
548      : Block(Block), ScopeDepth(Depth), Index(Index) {}
549
550    bool isValid() const { return Block != 0; }
551    llvm::BasicBlock *getBlock() const { return Block; }
552    EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
553    unsigned getDestIndex() const { return Index; }
554
555  private:
556    llvm::BasicBlock *Block;
557    EHScopeStack::stable_iterator ScopeDepth;
558    unsigned Index;
559  };
560
561  CodeGenModule &CGM;  // Per-module state.
562  const TargetInfo &Target;
563
564  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
565  CGBuilderTy Builder;
566
567  /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
568  /// This excludes BlockDecls.
569  const Decl *CurFuncDecl;
570  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
571  const Decl *CurCodeDecl;
572  const CGFunctionInfo *CurFnInfo;
573  QualType FnRetTy;
574  llvm::Function *CurFn;
575
576  /// CurGD - The GlobalDecl for the current function being compiled.
577  GlobalDecl CurGD;
578
579  /// PrologueCleanupDepth - The cleanup depth enclosing all the
580  /// cleanups associated with the parameters.
581  EHScopeStack::stable_iterator PrologueCleanupDepth;
582
583  /// ReturnBlock - Unified return block.
584  JumpDest ReturnBlock;
585
586  /// ReturnValue - The temporary alloca to hold the return value. This is null
587  /// iff the function has no return value.
588  llvm::Value *ReturnValue;
589
590  /// AllocaInsertPoint - This is an instruction in the entry block before which
591  /// we prefer to insert allocas.
592  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
593
594  bool CatchUndefined;
595
596  /// In ARC, whether we should autorelease the return value.
597  bool AutoreleaseResult;
598
599  const CodeGen::CGBlockInfo *BlockInfo;
600  llvm::Value *BlockPointer;
601
602  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
603  FieldDecl *LambdaThisCaptureField;
604
605  /// \brief A mapping from NRVO variables to the flags used to indicate
606  /// when the NRVO has been applied to this variable.
607  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
608
609  EHScopeStack EHStack;
610
611  /// i32s containing the indexes of the cleanup destinations.
612  llvm::AllocaInst *NormalCleanupDest;
613
614  unsigned NextCleanupDestIndex;
615
616  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
617  CGBlockInfo *FirstBlockInfo;
618
619  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
620  llvm::BasicBlock *EHResumeBlock;
621
622  /// The exception slot.  All landing pads write the current exception pointer
623  /// into this alloca.
624  llvm::Value *ExceptionSlot;
625
626  /// The selector slot.  Under the MandatoryCleanup model, all landing pads
627  /// write the current selector value into this alloca.
628  llvm::AllocaInst *EHSelectorSlot;
629
630  /// Emits a landing pad for the current EH stack.
631  llvm::BasicBlock *EmitLandingPad();
632
633  llvm::BasicBlock *getInvokeDestImpl();
634
635  template <class T>
636  typename DominatingValue<T>::saved_type saveValueInCond(T value) {
637    return DominatingValue<T>::save(*this, value);
638  }
639
640public:
641  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
642  /// rethrows.
643  SmallVector<llvm::Value*, 8> ObjCEHValueStack;
644
645  /// A class controlling the emission of a finally block.
646  class FinallyInfo {
647    /// Where the catchall's edge through the cleanup should go.
648    JumpDest RethrowDest;
649
650    /// A function to call to enter the catch.
651    llvm::Constant *BeginCatchFn;
652
653    /// An i1 variable indicating whether or not the @finally is
654    /// running for an exception.
655    llvm::AllocaInst *ForEHVar;
656
657    /// An i8* variable into which the exception pointer to rethrow
658    /// has been saved.
659    llvm::AllocaInst *SavedExnVar;
660
661  public:
662    void enter(CodeGenFunction &CGF, const Stmt *Finally,
663               llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
664               llvm::Constant *rethrowFn);
665    void exit(CodeGenFunction &CGF);
666  };
667
668  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
669  /// current full-expression.  Safe against the possibility that
670  /// we're currently inside a conditionally-evaluated expression.
671  template <class T, class A0>
672  void pushFullExprCleanup(CleanupKind kind, A0 a0) {
673    // If we're not in a conditional branch, or if none of the
674    // arguments requires saving, then use the unconditional cleanup.
675    if (!isInConditionalBranch())
676      return EHStack.pushCleanup<T>(kind, a0);
677
678    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
679
680    typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
681    EHStack.pushCleanup<CleanupType>(kind, a0_saved);
682    initFullExprCleanup();
683  }
684
685  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
686  /// current full-expression.  Safe against the possibility that
687  /// we're currently inside a conditionally-evaluated expression.
688  template <class T, class A0, class A1>
689  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
690    // If we're not in a conditional branch, or if none of the
691    // arguments requires saving, then use the unconditional cleanup.
692    if (!isInConditionalBranch())
693      return EHStack.pushCleanup<T>(kind, a0, a1);
694
695    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
696    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
697
698    typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
699    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
700    initFullExprCleanup();
701  }
702
703  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
704  /// current full-expression.  Safe against the possibility that
705  /// we're currently inside a conditionally-evaluated expression.
706  template <class T, class A0, class A1, class A2>
707  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
708    // If we're not in a conditional branch, or if none of the
709    // arguments requires saving, then use the unconditional cleanup.
710    if (!isInConditionalBranch()) {
711      return EHStack.pushCleanup<T>(kind, a0, a1, a2);
712    }
713
714    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
715    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
716    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
717
718    typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
719    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
720    initFullExprCleanup();
721  }
722
723  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
724  /// current full-expression.  Safe against the possibility that
725  /// we're currently inside a conditionally-evaluated expression.
726  template <class T, class A0, class A1, class A2, class A3>
727  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
728    // If we're not in a conditional branch, or if none of the
729    // arguments requires saving, then use the unconditional cleanup.
730    if (!isInConditionalBranch()) {
731      return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
732    }
733
734    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
735    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
736    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
737    typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
738
739    typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
740    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
741                                     a2_saved, a3_saved);
742    initFullExprCleanup();
743  }
744
745  /// Set up the last cleaup that was pushed as a conditional
746  /// full-expression cleanup.
747  void initFullExprCleanup();
748
749  /// PushDestructorCleanup - Push a cleanup to call the
750  /// complete-object destructor of an object of the given type at the
751  /// given address.  Does nothing if T is not a C++ class type with a
752  /// non-trivial destructor.
753  void PushDestructorCleanup(QualType T, llvm::Value *Addr);
754
755  /// PushDestructorCleanup - Push a cleanup to call the
756  /// complete-object variant of the given destructor on the object at
757  /// the given address.
758  void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
759                             llvm::Value *Addr);
760
761  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
762  /// process all branch fixups.
763  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
764
765  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
766  /// The block cannot be reactivated.  Pops it if it's the top of the
767  /// stack.
768  ///
769  /// \param DominatingIP - An instruction which is known to
770  ///   dominate the current IP (if set) and which lies along
771  ///   all paths of execution between the current IP and the
772  ///   the point at which the cleanup comes into scope.
773  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
774                              llvm::Instruction *DominatingIP);
775
776  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
777  /// Cannot be used to resurrect a deactivated cleanup.
778  ///
779  /// \param DominatingIP - An instruction which is known to
780  ///   dominate the current IP (if set) and which lies along
781  ///   all paths of execution between the current IP and the
782  ///   the point at which the cleanup comes into scope.
783  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
784                            llvm::Instruction *DominatingIP);
785
786  /// \brief Enters a new scope for capturing cleanups, all of which
787  /// will be executed once the scope is exited.
788  class RunCleanupsScope {
789    EHScopeStack::stable_iterator CleanupStackDepth;
790    bool OldDidCallStackSave;
791    bool PerformCleanup;
792
793    RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
794    RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
795
796  protected:
797    CodeGenFunction& CGF;
798
799  public:
800    /// \brief Enter a new cleanup scope.
801    explicit RunCleanupsScope(CodeGenFunction &CGF)
802      : PerformCleanup(true), CGF(CGF)
803    {
804      CleanupStackDepth = CGF.EHStack.stable_begin();
805      OldDidCallStackSave = CGF.DidCallStackSave;
806      CGF.DidCallStackSave = false;
807    }
808
809    /// \brief Exit this cleanup scope, emitting any accumulated
810    /// cleanups.
811    ~RunCleanupsScope() {
812      if (PerformCleanup) {
813        CGF.DidCallStackSave = OldDidCallStackSave;
814        CGF.PopCleanupBlocks(CleanupStackDepth);
815      }
816    }
817
818    /// \brief Determine whether this scope requires any cleanups.
819    bool requiresCleanups() const {
820      return CGF.EHStack.stable_begin() != CleanupStackDepth;
821    }
822
823    /// \brief Force the emission of cleanups now, instead of waiting
824    /// until this object is destroyed.
825    void ForceCleanup() {
826      assert(PerformCleanup && "Already forced cleanup");
827      CGF.DidCallStackSave = OldDidCallStackSave;
828      CGF.PopCleanupBlocks(CleanupStackDepth);
829      PerformCleanup = false;
830    }
831  };
832
833  class LexicalScope: protected RunCleanupsScope {
834    SourceRange Range;
835    bool PopDebugStack;
836
837    LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
838    LexicalScope &operator=(const LexicalScope &);
839
840  public:
841    /// \brief Enter a new cleanup scope.
842    explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
843      : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
844      if (CGDebugInfo *DI = CGF.getDebugInfo())
845        DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
846    }
847
848    /// \brief Exit this cleanup scope, emitting any accumulated
849    /// cleanups.
850    ~LexicalScope() {
851      if (PopDebugStack) {
852        CGDebugInfo *DI = CGF.getDebugInfo();
853        if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
854      }
855    }
856
857    /// \brief Force the emission of cleanups now, instead of waiting
858    /// until this object is destroyed.
859    void ForceCleanup() {
860      RunCleanupsScope::ForceCleanup();
861      if (CGDebugInfo *DI = CGF.getDebugInfo()) {
862        DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
863        PopDebugStack = false;
864      }
865    }
866  };
867
868
869  /// PopCleanupBlocks - Takes the old cleanup stack size and emits
870  /// the cleanup blocks that have been added.
871  void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
872
873  void ResolveBranchFixups(llvm::BasicBlock *Target);
874
875  /// The given basic block lies in the current EH scope, but may be a
876  /// target of a potentially scope-crossing jump; get a stable handle
877  /// to which we can perform this jump later.
878  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
879    return JumpDest(Target,
880                    EHStack.getInnermostNormalCleanup(),
881                    NextCleanupDestIndex++);
882  }
883
884  /// The given basic block lies in the current EH scope, but may be a
885  /// target of a potentially scope-crossing jump; get a stable handle
886  /// to which we can perform this jump later.
887  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
888    return getJumpDestInCurrentScope(createBasicBlock(Name));
889  }
890
891  /// EmitBranchThroughCleanup - Emit a branch from the current insert
892  /// block through the normal cleanup handling code (if any) and then
893  /// on to \arg Dest.
894  void EmitBranchThroughCleanup(JumpDest Dest);
895
896  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
897  /// specified destination obviously has no cleanups to run.  'false' is always
898  /// a conservatively correct answer for this method.
899  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
900
901  /// popCatchScope - Pops the catch scope at the top of the EHScope
902  /// stack, emitting any required code (other than the catch handlers
903  /// themselves).
904  void popCatchScope();
905
906  llvm::BasicBlock *getEHResumeBlock();
907  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
908
909  /// An object to manage conditionally-evaluated expressions.
910  class ConditionalEvaluation {
911    llvm::BasicBlock *StartBB;
912
913  public:
914    ConditionalEvaluation(CodeGenFunction &CGF)
915      : StartBB(CGF.Builder.GetInsertBlock()) {}
916
917    void begin(CodeGenFunction &CGF) {
918      assert(CGF.OutermostConditional != this);
919      if (!CGF.OutermostConditional)
920        CGF.OutermostConditional = this;
921    }
922
923    void end(CodeGenFunction &CGF) {
924      assert(CGF.OutermostConditional != 0);
925      if (CGF.OutermostConditional == this)
926        CGF.OutermostConditional = 0;
927    }
928
929    /// Returns a block which will be executed prior to each
930    /// evaluation of the conditional code.
931    llvm::BasicBlock *getStartingBlock() const {
932      return StartBB;
933    }
934  };
935
936  /// isInConditionalBranch - Return true if we're currently emitting
937  /// one branch or the other of a conditional expression.
938  bool isInConditionalBranch() const { return OutermostConditional != 0; }
939
940  void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
941    assert(isInConditionalBranch());
942    llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
943    new llvm::StoreInst(value, addr, &block->back());
944  }
945
946  /// An RAII object to record that we're evaluating a statement
947  /// expression.
948  class StmtExprEvaluation {
949    CodeGenFunction &CGF;
950
951    /// We have to save the outermost conditional: cleanups in a
952    /// statement expression aren't conditional just because the
953    /// StmtExpr is.
954    ConditionalEvaluation *SavedOutermostConditional;
955
956  public:
957    StmtExprEvaluation(CodeGenFunction &CGF)
958      : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
959      CGF.OutermostConditional = 0;
960    }
961
962    ~StmtExprEvaluation() {
963      CGF.OutermostConditional = SavedOutermostConditional;
964      CGF.EnsureInsertPoint();
965    }
966  };
967
968  /// An object which temporarily prevents a value from being
969  /// destroyed by aggressive peephole optimizations that assume that
970  /// all uses of a value have been realized in the IR.
971  class PeepholeProtection {
972    llvm::Instruction *Inst;
973    friend class CodeGenFunction;
974
975  public:
976    PeepholeProtection() : Inst(0) {}
977  };
978
979  /// A non-RAII class containing all the information about a bound
980  /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
981  /// this which makes individual mappings very simple; using this
982  /// class directly is useful when you have a variable number of
983  /// opaque values or don't want the RAII functionality for some
984  /// reason.
985  class OpaqueValueMappingData {
986    const OpaqueValueExpr *OpaqueValue;
987    bool BoundLValue;
988    CodeGenFunction::PeepholeProtection Protection;
989
990    OpaqueValueMappingData(const OpaqueValueExpr *ov,
991                           bool boundLValue)
992      : OpaqueValue(ov), BoundLValue(boundLValue) {}
993  public:
994    OpaqueValueMappingData() : OpaqueValue(0) {}
995
996    static bool shouldBindAsLValue(const Expr *expr) {
997      // gl-values should be bound as l-values for obvious reasons.
998      // Records should be bound as l-values because IR generation
999      // always keeps them in memory.  Expressions of function type
1000      // act exactly like l-values but are formally required to be
1001      // r-values in C.
1002      return expr->isGLValue() ||
1003             expr->getType()->isRecordType() ||
1004             expr->getType()->isFunctionType();
1005    }
1006
1007    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1008                                       const OpaqueValueExpr *ov,
1009                                       const Expr *e) {
1010      if (shouldBindAsLValue(ov))
1011        return bind(CGF, ov, CGF.EmitLValue(e));
1012      return bind(CGF, ov, CGF.EmitAnyExpr(e));
1013    }
1014
1015    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1016                                       const OpaqueValueExpr *ov,
1017                                       const LValue &lv) {
1018      assert(shouldBindAsLValue(ov));
1019      CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1020      return OpaqueValueMappingData(ov, true);
1021    }
1022
1023    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1024                                       const OpaqueValueExpr *ov,
1025                                       const RValue &rv) {
1026      assert(!shouldBindAsLValue(ov));
1027      CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1028
1029      OpaqueValueMappingData data(ov, false);
1030
1031      // Work around an extremely aggressive peephole optimization in
1032      // EmitScalarConversion which assumes that all other uses of a
1033      // value are extant.
1034      data.Protection = CGF.protectFromPeepholes(rv);
1035
1036      return data;
1037    }
1038
1039    bool isValid() const { return OpaqueValue != 0; }
1040    void clear() { OpaqueValue = 0; }
1041
1042    void unbind(CodeGenFunction &CGF) {
1043      assert(OpaqueValue && "no data to unbind!");
1044
1045      if (BoundLValue) {
1046        CGF.OpaqueLValues.erase(OpaqueValue);
1047      } else {
1048        CGF.OpaqueRValues.erase(OpaqueValue);
1049        CGF.unprotectFromPeepholes(Protection);
1050      }
1051    }
1052  };
1053
1054  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1055  class OpaqueValueMapping {
1056    CodeGenFunction &CGF;
1057    OpaqueValueMappingData Data;
1058
1059  public:
1060    static bool shouldBindAsLValue(const Expr *expr) {
1061      return OpaqueValueMappingData::shouldBindAsLValue(expr);
1062    }
1063
1064    /// Build the opaque value mapping for the given conditional
1065    /// operator if it's the GNU ?: extension.  This is a common
1066    /// enough pattern that the convenience operator is really
1067    /// helpful.
1068    ///
1069    OpaqueValueMapping(CodeGenFunction &CGF,
1070                       const AbstractConditionalOperator *op) : CGF(CGF) {
1071      if (isa<ConditionalOperator>(op))
1072        // Leave Data empty.
1073        return;
1074
1075      const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1076      Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1077                                          e->getCommon());
1078    }
1079
1080    OpaqueValueMapping(CodeGenFunction &CGF,
1081                       const OpaqueValueExpr *opaqueValue,
1082                       LValue lvalue)
1083      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1084    }
1085
1086    OpaqueValueMapping(CodeGenFunction &CGF,
1087                       const OpaqueValueExpr *opaqueValue,
1088                       RValue rvalue)
1089      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1090    }
1091
1092    void pop() {
1093      Data.unbind(CGF);
1094      Data.clear();
1095    }
1096
1097    ~OpaqueValueMapping() {
1098      if (Data.isValid()) Data.unbind(CGF);
1099    }
1100  };
1101
1102  /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1103  /// number that holds the value.
1104  unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1105
1106  /// BuildBlockByrefAddress - Computes address location of the
1107  /// variable which is declared as __block.
1108  llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1109                                      const VarDecl *V);
1110private:
1111  CGDebugInfo *DebugInfo;
1112  bool DisableDebugInfo;
1113
1114  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1115  /// calling llvm.stacksave for multiple VLAs in the same scope.
1116  bool DidCallStackSave;
1117
1118  /// IndirectBranch - The first time an indirect goto is seen we create a block
1119  /// with an indirect branch.  Every time we see the address of a label taken,
1120  /// we add the label to the indirect goto.  Every subsequent indirect goto is
1121  /// codegen'd as a jump to the IndirectBranch's basic block.
1122  llvm::IndirectBrInst *IndirectBranch;
1123
1124  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1125  /// decls.
1126  typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1127  DeclMapTy LocalDeclMap;
1128
1129  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1130  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1131
1132  // BreakContinueStack - This keeps track of where break and continue
1133  // statements should jump to.
1134  struct BreakContinue {
1135    BreakContinue(JumpDest Break, JumpDest Continue)
1136      : BreakBlock(Break), ContinueBlock(Continue) {}
1137
1138    JumpDest BreakBlock;
1139    JumpDest ContinueBlock;
1140  };
1141  SmallVector<BreakContinue, 8> BreakContinueStack;
1142
1143  /// SwitchInsn - This is nearest current switch instruction. It is null if
1144  /// current context is not in a switch.
1145  llvm::SwitchInst *SwitchInsn;
1146
1147  /// CaseRangeBlock - This block holds if condition check for last case
1148  /// statement range in current switch instruction.
1149  llvm::BasicBlock *CaseRangeBlock;
1150
1151  /// OpaqueLValues - Keeps track of the current set of opaque value
1152  /// expressions.
1153  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1154  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1155
1156  // VLASizeMap - This keeps track of the associated size for each VLA type.
1157  // We track this by the size expression rather than the type itself because
1158  // in certain situations, like a const qualifier applied to an VLA typedef,
1159  // multiple VLA types can share the same size expression.
1160  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1161  // enter/leave scopes.
1162  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1163
1164  /// A block containing a single 'unreachable' instruction.  Created
1165  /// lazily by getUnreachableBlock().
1166  llvm::BasicBlock *UnreachableBlock;
1167
1168  /// CXXThisDecl - When generating code for a C++ member function,
1169  /// this will hold the implicit 'this' declaration.
1170  ImplicitParamDecl *CXXABIThisDecl;
1171  llvm::Value *CXXABIThisValue;
1172  llvm::Value *CXXThisValue;
1173
1174  /// CXXVTTDecl - When generating code for a base object constructor or
1175  /// base object destructor with virtual bases, this will hold the implicit
1176  /// VTT parameter.
1177  ImplicitParamDecl *CXXVTTDecl;
1178  llvm::Value *CXXVTTValue;
1179
1180  /// OutermostConditional - Points to the outermost active
1181  /// conditional control.  This is used so that we know if a
1182  /// temporary should be destroyed conditionally.
1183  ConditionalEvaluation *OutermostConditional;
1184
1185
1186  /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1187  /// type as well as the field number that contains the actual data.
1188  llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1189                                              unsigned> > ByRefValueInfo;
1190
1191  llvm::BasicBlock *TerminateLandingPad;
1192  llvm::BasicBlock *TerminateHandler;
1193  llvm::BasicBlock *TrapBB;
1194
1195public:
1196  CodeGenFunction(CodeGenModule &cgm);
1197  ~CodeGenFunction();
1198
1199  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1200  ASTContext &getContext() const { return CGM.getContext(); }
1201  CGDebugInfo *getDebugInfo() {
1202    if (DisableDebugInfo)
1203      return NULL;
1204    return DebugInfo;
1205  }
1206  void disableDebugInfo() { DisableDebugInfo = true; }
1207  void enableDebugInfo() { DisableDebugInfo = false; }
1208
1209  bool shouldUseFusedARCCalls() {
1210    return CGM.getCodeGenOpts().OptimizationLevel == 0;
1211  }
1212
1213  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1214
1215  /// Returns a pointer to the function's exception object and selector slot,
1216  /// which is assigned in every landing pad.
1217  llvm::Value *getExceptionSlot();
1218  llvm::Value *getEHSelectorSlot();
1219
1220  /// Returns the contents of the function's exception object and selector
1221  /// slots.
1222  llvm::Value *getExceptionFromSlot();
1223  llvm::Value *getSelectorFromSlot();
1224
1225  llvm::Value *getNormalCleanupDestSlot();
1226
1227  llvm::BasicBlock *getUnreachableBlock() {
1228    if (!UnreachableBlock) {
1229      UnreachableBlock = createBasicBlock("unreachable");
1230      new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1231    }
1232    return UnreachableBlock;
1233  }
1234
1235  llvm::BasicBlock *getInvokeDest() {
1236    if (!EHStack.requiresLandingPad()) return 0;
1237    return getInvokeDestImpl();
1238  }
1239
1240  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1241
1242  //===--------------------------------------------------------------------===//
1243  //                                  Cleanups
1244  //===--------------------------------------------------------------------===//
1245
1246  typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1247
1248  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1249                                        llvm::Value *arrayEndPointer,
1250                                        QualType elementType,
1251                                        Destroyer *destroyer);
1252  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1253                                      llvm::Value *arrayEnd,
1254                                      QualType elementType,
1255                                      Destroyer *destroyer);
1256
1257  void pushDestroy(QualType::DestructionKind dtorKind,
1258                   llvm::Value *addr, QualType type);
1259  void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1260                   Destroyer *destroyer, bool useEHCleanupForArray);
1261  void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1262                   bool useEHCleanupForArray);
1263  llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1264                                        QualType type,
1265                                        Destroyer *destroyer,
1266                                        bool useEHCleanupForArray);
1267  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1268                        QualType type, Destroyer *destroyer,
1269                        bool checkZeroLength, bool useEHCleanup);
1270
1271  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1272
1273  /// Determines whether an EH cleanup is required to destroy a type
1274  /// with the given destruction kind.
1275  bool needsEHCleanup(QualType::DestructionKind kind) {
1276    switch (kind) {
1277    case QualType::DK_none:
1278      return false;
1279    case QualType::DK_cxx_destructor:
1280    case QualType::DK_objc_weak_lifetime:
1281      return getLangOpts().Exceptions;
1282    case QualType::DK_objc_strong_lifetime:
1283      return getLangOpts().Exceptions &&
1284             CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1285    }
1286    llvm_unreachable("bad destruction kind");
1287  }
1288
1289  CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1290    return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1291  }
1292
1293  //===--------------------------------------------------------------------===//
1294  //                                  Objective-C
1295  //===--------------------------------------------------------------------===//
1296
1297  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1298
1299  void StartObjCMethod(const ObjCMethodDecl *MD,
1300                       const ObjCContainerDecl *CD,
1301                       SourceLocation StartLoc);
1302
1303  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1304  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1305                          const ObjCPropertyImplDecl *PID);
1306  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1307                              const ObjCPropertyImplDecl *propImpl,
1308                              llvm::Constant *AtomicHelperFn);
1309
1310  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1311                                  ObjCMethodDecl *MD, bool ctor);
1312
1313  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1314  /// for the given property.
1315  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1316                          const ObjCPropertyImplDecl *PID);
1317  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1318                              const ObjCPropertyImplDecl *propImpl,
1319                              llvm::Constant *AtomicHelperFn);
1320  bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1321  bool IvarTypeWithAggrGCObjects(QualType Ty);
1322
1323  //===--------------------------------------------------------------------===//
1324  //                                  Block Bits
1325  //===--------------------------------------------------------------------===//
1326
1327  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1328  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1329  static void destroyBlockInfos(CGBlockInfo *info);
1330  llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1331                                           const CGBlockInfo &Info,
1332                                           llvm::StructType *,
1333                                           llvm::Constant *BlockVarLayout);
1334
1335  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1336                                        const CGBlockInfo &Info,
1337                                        const Decl *OuterFuncDecl,
1338                                        const DeclMapTy &ldm,
1339                                        bool IsLambdaConversionToBlock);
1340
1341  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1342  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1343  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1344                                             const ObjCPropertyImplDecl *PID);
1345  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1346                                             const ObjCPropertyImplDecl *PID);
1347  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1348
1349  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1350
1351  class AutoVarEmission;
1352
1353  void emitByrefStructureInit(const AutoVarEmission &emission);
1354  void enterByrefCleanup(const AutoVarEmission &emission);
1355
1356  llvm::Value *LoadBlockStruct() {
1357    assert(BlockPointer && "no block pointer set!");
1358    return BlockPointer;
1359  }
1360
1361  void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1362  void AllocateBlockDecl(const DeclRefExpr *E);
1363  llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1364  llvm::Type *BuildByRefType(const VarDecl *var);
1365
1366  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1367                    const CGFunctionInfo &FnInfo);
1368  void StartFunction(GlobalDecl GD, QualType RetTy,
1369                     llvm::Function *Fn,
1370                     const CGFunctionInfo &FnInfo,
1371                     const FunctionArgList &Args,
1372                     SourceLocation StartLoc);
1373
1374  void EmitConstructorBody(FunctionArgList &Args);
1375  void EmitDestructorBody(FunctionArgList &Args);
1376  void EmitFunctionBody(FunctionArgList &Args);
1377
1378  void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
1379                                  CallArgList &CallArgs);
1380  void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1381  void EmitLambdaBlockInvokeBody();
1382  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1383  void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1384
1385  /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1386  /// emission when possible.
1387  void EmitReturnBlock();
1388
1389  /// FinishFunction - Complete IR generation of the current function. It is
1390  /// legal to call this function even if there is no current insertion point.
1391  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1392
1393  /// GenerateThunk - Generate a thunk for the given method.
1394  void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1395                     GlobalDecl GD, const ThunkInfo &Thunk);
1396
1397  void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1398                            GlobalDecl GD, const ThunkInfo &Thunk);
1399
1400  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1401                        FunctionArgList &Args);
1402
1403  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1404                               ArrayRef<VarDecl *> ArrayIndexes);
1405
1406  /// InitializeVTablePointer - Initialize the vtable pointer of the given
1407  /// subobject.
1408  ///
1409  void InitializeVTablePointer(BaseSubobject Base,
1410                               const CXXRecordDecl *NearestVBase,
1411                               CharUnits OffsetFromNearestVBase,
1412                               llvm::Constant *VTable,
1413                               const CXXRecordDecl *VTableClass);
1414
1415  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1416  void InitializeVTablePointers(BaseSubobject Base,
1417                                const CXXRecordDecl *NearestVBase,
1418                                CharUnits OffsetFromNearestVBase,
1419                                bool BaseIsNonVirtualPrimaryBase,
1420                                llvm::Constant *VTable,
1421                                const CXXRecordDecl *VTableClass,
1422                                VisitedVirtualBasesSetTy& VBases);
1423
1424  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1425
1426  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1427  /// to by This.
1428  llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1429
1430  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1431  /// given phase of destruction for a destructor.  The end result
1432  /// should call destructors on members and base classes in reverse
1433  /// order of their construction.
1434  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1435
1436  /// ShouldInstrumentFunction - Return true if the current function should be
1437  /// instrumented with __cyg_profile_func_* calls
1438  bool ShouldInstrumentFunction();
1439
1440  /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1441  /// instrumentation function with the current function and the call site, if
1442  /// function instrumentation is enabled.
1443  void EmitFunctionInstrumentation(const char *Fn);
1444
1445  /// EmitMCountInstrumentation - Emit call to .mcount.
1446  void EmitMCountInstrumentation();
1447
1448  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1449  /// arguments for the given function. This is also responsible for naming the
1450  /// LLVM function arguments.
1451  void EmitFunctionProlog(const CGFunctionInfo &FI,
1452                          llvm::Function *Fn,
1453                          const FunctionArgList &Args);
1454
1455  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1456  /// given temporary.
1457  void EmitFunctionEpilog(const CGFunctionInfo &FI);
1458
1459  /// EmitStartEHSpec - Emit the start of the exception spec.
1460  void EmitStartEHSpec(const Decl *D);
1461
1462  /// EmitEndEHSpec - Emit the end of the exception spec.
1463  void EmitEndEHSpec(const Decl *D);
1464
1465  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1466  llvm::BasicBlock *getTerminateLandingPad();
1467
1468  /// getTerminateHandler - Return a handler (not a landing pad, just
1469  /// a catch handler) that just calls terminate.  This is used when
1470  /// a terminate scope encloses a try.
1471  llvm::BasicBlock *getTerminateHandler();
1472
1473  llvm::Type *ConvertTypeForMem(QualType T);
1474  llvm::Type *ConvertType(QualType T);
1475  llvm::Type *ConvertType(const TypeDecl *T) {
1476    return ConvertType(getContext().getTypeDeclType(T));
1477  }
1478
1479  /// LoadObjCSelf - Load the value of self. This function is only valid while
1480  /// generating code for an Objective-C method.
1481  llvm::Value *LoadObjCSelf();
1482
1483  /// TypeOfSelfObject - Return type of object that this self represents.
1484  QualType TypeOfSelfObject();
1485
1486  /// hasAggregateLLVMType - Return true if the specified AST type will map into
1487  /// an aggregate LLVM type or is void.
1488  static bool hasAggregateLLVMType(QualType T);
1489
1490  /// createBasicBlock - Create an LLVM basic block.
1491  llvm::BasicBlock *createBasicBlock(StringRef name = "",
1492                                     llvm::Function *parent = 0,
1493                                     llvm::BasicBlock *before = 0) {
1494#ifdef NDEBUG
1495    return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1496#else
1497    return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1498#endif
1499  }
1500
1501  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1502  /// label maps to.
1503  JumpDest getJumpDestForLabel(const LabelDecl *S);
1504
1505  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1506  /// another basic block, simplify it. This assumes that no other code could
1507  /// potentially reference the basic block.
1508  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1509
1510  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1511  /// adding a fall-through branch from the current insert block if
1512  /// necessary. It is legal to call this function even if there is no current
1513  /// insertion point.
1514  ///
1515  /// IsFinished - If true, indicates that the caller has finished emitting
1516  /// branches to the given block and does not expect to emit code into it. This
1517  /// means the block can be ignored if it is unreachable.
1518  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1519
1520  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1521  /// near its uses, and leave the insertion point in it.
1522  void EmitBlockAfterUses(llvm::BasicBlock *BB);
1523
1524  /// EmitBranch - Emit a branch to the specified basic block from the current
1525  /// insert block, taking care to avoid creation of branches from dummy
1526  /// blocks. It is legal to call this function even if there is no current
1527  /// insertion point.
1528  ///
1529  /// This function clears the current insertion point. The caller should follow
1530  /// calls to this function with calls to Emit*Block prior to generation new
1531  /// code.
1532  void EmitBranch(llvm::BasicBlock *Block);
1533
1534  /// HaveInsertPoint - True if an insertion point is defined. If not, this
1535  /// indicates that the current code being emitted is unreachable.
1536  bool HaveInsertPoint() const {
1537    return Builder.GetInsertBlock() != 0;
1538  }
1539
1540  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1541  /// emitted IR has a place to go. Note that by definition, if this function
1542  /// creates a block then that block is unreachable; callers may do better to
1543  /// detect when no insertion point is defined and simply skip IR generation.
1544  void EnsureInsertPoint() {
1545    if (!HaveInsertPoint())
1546      EmitBlock(createBasicBlock());
1547  }
1548
1549  /// ErrorUnsupported - Print out an error that codegen doesn't support the
1550  /// specified stmt yet.
1551  void ErrorUnsupported(const Stmt *S, const char *Type,
1552                        bool OmitOnError=false);
1553
1554  //===--------------------------------------------------------------------===//
1555  //                                  Helpers
1556  //===--------------------------------------------------------------------===//
1557
1558  LValue MakeAddrLValue(llvm::Value *V, QualType T,
1559                        CharUnits Alignment = CharUnits()) {
1560    return LValue::MakeAddr(V, T, Alignment, getContext(),
1561                            CGM.getTBAAInfo(T));
1562  }
1563  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1564    CharUnits Alignment;
1565    if (!T->isIncompleteType())
1566      Alignment = getContext().getTypeAlignInChars(T);
1567    return LValue::MakeAddr(V, T, Alignment, getContext(),
1568                            CGM.getTBAAInfo(T));
1569  }
1570
1571  /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1572  /// block. The caller is responsible for setting an appropriate alignment on
1573  /// the alloca.
1574  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1575                                     const Twine &Name = "tmp");
1576
1577  /// InitTempAlloca - Provide an initial value for the given alloca.
1578  void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1579
1580  /// CreateIRTemp - Create a temporary IR object of the given type, with
1581  /// appropriate alignment. This routine should only be used when an temporary
1582  /// value needs to be stored into an alloca (for example, to avoid explicit
1583  /// PHI construction), but the type is the IR type, not the type appropriate
1584  /// for storing in memory.
1585  llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1586
1587  /// CreateMemTemp - Create a temporary memory object of the given type, with
1588  /// appropriate alignment.
1589  llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1590
1591  /// CreateAggTemp - Create a temporary memory object for the given
1592  /// aggregate type.
1593  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1594    CharUnits Alignment = getContext().getTypeAlignInChars(T);
1595    return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1596                                 T.getQualifiers(),
1597                                 AggValueSlot::IsNotDestructed,
1598                                 AggValueSlot::DoesNotNeedGCBarriers,
1599                                 AggValueSlot::IsNotAliased);
1600  }
1601
1602  /// Emit a cast to void* in the appropriate address space.
1603  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1604
1605  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1606  /// expression and compare the result against zero, returning an Int1Ty value.
1607  llvm::Value *EvaluateExprAsBool(const Expr *E);
1608
1609  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1610  void EmitIgnoredExpr(const Expr *E);
1611
1612  /// EmitAnyExpr - Emit code to compute the specified expression which can have
1613  /// any type.  The result is returned as an RValue struct.  If this is an
1614  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1615  /// the result should be returned.
1616  ///
1617  /// \param IgnoreResult - True if the resulting value isn't used.
1618  RValue EmitAnyExpr(const Expr *E,
1619                     AggValueSlot AggSlot = AggValueSlot::ignored(),
1620                     bool IgnoreResult = false);
1621
1622  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1623  // or the value of the expression, depending on how va_list is defined.
1624  llvm::Value *EmitVAListRef(const Expr *E);
1625
1626  /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1627  /// always be accessible even if no aggregate location is provided.
1628  RValue EmitAnyExprToTemp(const Expr *E);
1629
1630  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1631  /// arbitrary expression into the given memory location.
1632  void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1633                        Qualifiers Quals, bool IsInitializer);
1634
1635  /// EmitExprAsInit - Emits the code necessary to initialize a
1636  /// location in memory with the given initializer.
1637  void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1638                      LValue lvalue, bool capturedByInit);
1639
1640  /// EmitAggregateCopy - Emit an aggrate copy.
1641  ///
1642  /// \param isVolatile - True iff either the source or the destination is
1643  /// volatile.
1644  void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1645                         QualType EltTy, bool isVolatile=false,
1646                         unsigned Alignment = 0);
1647
1648  /// StartBlock - Start new block named N. If insert block is a dummy block
1649  /// then reuse it.
1650  void StartBlock(const char *N);
1651
1652  /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1653  llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1654    return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1655  }
1656
1657  /// GetAddrOfLocalVar - Return the address of a local variable.
1658  llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1659    llvm::Value *Res = LocalDeclMap[VD];
1660    assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1661    return Res;
1662  }
1663
1664  /// getOpaqueLValueMapping - Given an opaque value expression (which
1665  /// must be mapped to an l-value), return its mapping.
1666  const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1667    assert(OpaqueValueMapping::shouldBindAsLValue(e));
1668
1669    llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1670      it = OpaqueLValues.find(e);
1671    assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1672    return it->second;
1673  }
1674
1675  /// getOpaqueRValueMapping - Given an opaque value expression (which
1676  /// must be mapped to an r-value), return its mapping.
1677  const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1678    assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1679
1680    llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1681      it = OpaqueRValues.find(e);
1682    assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1683    return it->second;
1684  }
1685
1686  /// getAccessedFieldNo - Given an encoded value and a result number, return
1687  /// the input field number being accessed.
1688  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1689
1690  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1691  llvm::BasicBlock *GetIndirectGotoBlock();
1692
1693  /// EmitNullInitialization - Generate code to set a value of the given type to
1694  /// null, If the type contains data member pointers, they will be initialized
1695  /// to -1 in accordance with the Itanium C++ ABI.
1696  void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1697
1698  // EmitVAArg - Generate code to get an argument from the passed in pointer
1699  // and update it accordingly. The return value is a pointer to the argument.
1700  // FIXME: We should be able to get rid of this method and use the va_arg
1701  // instruction in LLVM instead once it works well enough.
1702  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1703
1704  /// emitArrayLength - Compute the length of an array, even if it's a
1705  /// VLA, and drill down to the base element type.
1706  llvm::Value *emitArrayLength(const ArrayType *arrayType,
1707                               QualType &baseType,
1708                               llvm::Value *&addr);
1709
1710  /// EmitVLASize - Capture all the sizes for the VLA expressions in
1711  /// the given variably-modified type and store them in the VLASizeMap.
1712  ///
1713  /// This function can be called with a null (unreachable) insert point.
1714  void EmitVariablyModifiedType(QualType Ty);
1715
1716  /// getVLASize - Returns an LLVM value that corresponds to the size,
1717  /// in non-variably-sized elements, of a variable length array type,
1718  /// plus that largest non-variably-sized element type.  Assumes that
1719  /// the type has already been emitted with EmitVariablyModifiedType.
1720  std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1721  std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1722
1723  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1724  /// generating code for an C++ member function.
1725  llvm::Value *LoadCXXThis() {
1726    assert(CXXThisValue && "no 'this' value for this function");
1727    return CXXThisValue;
1728  }
1729
1730  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1731  /// virtual bases.
1732  llvm::Value *LoadCXXVTT() {
1733    assert(CXXVTTValue && "no VTT value for this function");
1734    return CXXVTTValue;
1735  }
1736
1737  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1738  /// complete class to the given direct base.
1739  llvm::Value *
1740  GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1741                                        const CXXRecordDecl *Derived,
1742                                        const CXXRecordDecl *Base,
1743                                        bool BaseIsVirtual);
1744
1745  /// GetAddressOfBaseClass - This function will add the necessary delta to the
1746  /// load of 'this' and returns address of the base class.
1747  llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1748                                     const CXXRecordDecl *Derived,
1749                                     CastExpr::path_const_iterator PathBegin,
1750                                     CastExpr::path_const_iterator PathEnd,
1751                                     bool NullCheckValue);
1752
1753  llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1754                                        const CXXRecordDecl *Derived,
1755                                        CastExpr::path_const_iterator PathBegin,
1756                                        CastExpr::path_const_iterator PathEnd,
1757                                        bool NullCheckValue);
1758
1759  llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1760                                         const CXXRecordDecl *ClassDecl,
1761                                         const CXXRecordDecl *BaseClassDecl);
1762
1763  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1764                                      CXXCtorType CtorType,
1765                                      const FunctionArgList &Args);
1766  // It's important not to confuse this and the previous function. Delegating
1767  // constructors are the C++0x feature. The constructor delegate optimization
1768  // is used to reduce duplication in the base and complete consturctors where
1769  // they are substantially the same.
1770  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1771                                        const FunctionArgList &Args);
1772  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1773                              bool ForVirtualBase, llvm::Value *This,
1774                              CallExpr::const_arg_iterator ArgBeg,
1775                              CallExpr::const_arg_iterator ArgEnd);
1776
1777  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1778                              llvm::Value *This, llvm::Value *Src,
1779                              CallExpr::const_arg_iterator ArgBeg,
1780                              CallExpr::const_arg_iterator ArgEnd);
1781
1782  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1783                                  const ConstantArrayType *ArrayTy,
1784                                  llvm::Value *ArrayPtr,
1785                                  CallExpr::const_arg_iterator ArgBeg,
1786                                  CallExpr::const_arg_iterator ArgEnd,
1787                                  bool ZeroInitialization = false);
1788
1789  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1790                                  llvm::Value *NumElements,
1791                                  llvm::Value *ArrayPtr,
1792                                  CallExpr::const_arg_iterator ArgBeg,
1793                                  CallExpr::const_arg_iterator ArgEnd,
1794                                  bool ZeroInitialization = false);
1795
1796  static Destroyer destroyCXXObject;
1797
1798  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1799                             bool ForVirtualBase, llvm::Value *This);
1800
1801  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1802                               llvm::Value *NewPtr, llvm::Value *NumElements);
1803
1804  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1805                        llvm::Value *Ptr);
1806
1807  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1808  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1809
1810  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1811                      QualType DeleteTy);
1812
1813  llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1814  llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1815
1816  void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
1817  void EmitStdInitializerListCleanup(llvm::Value *loc,
1818                                     const InitListExpr *init);
1819
1820  void EmitCheck(llvm::Value *, unsigned Size);
1821
1822  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1823                                       bool isInc, bool isPre);
1824  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1825                                         bool isInc, bool isPre);
1826  //===--------------------------------------------------------------------===//
1827  //                            Declaration Emission
1828  //===--------------------------------------------------------------------===//
1829
1830  /// EmitDecl - Emit a declaration.
1831  ///
1832  /// This function can be called with a null (unreachable) insert point.
1833  void EmitDecl(const Decl &D);
1834
1835  /// EmitVarDecl - Emit a local variable declaration.
1836  ///
1837  /// This function can be called with a null (unreachable) insert point.
1838  void EmitVarDecl(const VarDecl &D);
1839
1840  void EmitScalarInit(const Expr *init, const ValueDecl *D,
1841                      LValue lvalue, bool capturedByInit);
1842  void EmitScalarInit(llvm::Value *init, LValue lvalue);
1843
1844  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1845                             llvm::Value *Address);
1846
1847  /// EmitAutoVarDecl - Emit an auto variable declaration.
1848  ///
1849  /// This function can be called with a null (unreachable) insert point.
1850  void EmitAutoVarDecl(const VarDecl &D);
1851
1852  class AutoVarEmission {
1853    friend class CodeGenFunction;
1854
1855    const VarDecl *Variable;
1856
1857    /// The alignment of the variable.
1858    CharUnits Alignment;
1859
1860    /// The address of the alloca.  Null if the variable was emitted
1861    /// as a global constant.
1862    llvm::Value *Address;
1863
1864    llvm::Value *NRVOFlag;
1865
1866    /// True if the variable is a __block variable.
1867    bool IsByRef;
1868
1869    /// True if the variable is of aggregate type and has a constant
1870    /// initializer.
1871    bool IsConstantAggregate;
1872
1873    struct Invalid {};
1874    AutoVarEmission(Invalid) : Variable(0) {}
1875
1876    AutoVarEmission(const VarDecl &variable)
1877      : Variable(&variable), Address(0), NRVOFlag(0),
1878        IsByRef(false), IsConstantAggregate(false) {}
1879
1880    bool wasEmittedAsGlobal() const { return Address == 0; }
1881
1882  public:
1883    static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1884
1885    /// Returns the address of the object within this declaration.
1886    /// Note that this does not chase the forwarding pointer for
1887    /// __block decls.
1888    llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1889      if (!IsByRef) return Address;
1890
1891      return CGF.Builder.CreateStructGEP(Address,
1892                                         CGF.getByRefValueLLVMField(Variable),
1893                                         Variable->getNameAsString());
1894    }
1895  };
1896  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1897  void EmitAutoVarInit(const AutoVarEmission &emission);
1898  void EmitAutoVarCleanups(const AutoVarEmission &emission);
1899  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1900                              QualType::DestructionKind dtorKind);
1901
1902  void EmitStaticVarDecl(const VarDecl &D,
1903                         llvm::GlobalValue::LinkageTypes Linkage);
1904
1905  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1906  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1907
1908  /// protectFromPeepholes - Protect a value that we're intending to
1909  /// store to the side, but which will probably be used later, from
1910  /// aggressive peepholing optimizations that might delete it.
1911  ///
1912  /// Pass the result to unprotectFromPeepholes to declare that
1913  /// protection is no longer required.
1914  ///
1915  /// There's no particular reason why this shouldn't apply to
1916  /// l-values, it's just that no existing peepholes work on pointers.
1917  PeepholeProtection protectFromPeepholes(RValue rvalue);
1918  void unprotectFromPeepholes(PeepholeProtection protection);
1919
1920  //===--------------------------------------------------------------------===//
1921  //                             Statement Emission
1922  //===--------------------------------------------------------------------===//
1923
1924  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1925  void EmitStopPoint(const Stmt *S);
1926
1927  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1928  /// this function even if there is no current insertion point.
1929  ///
1930  /// This function may clear the current insertion point; callers should use
1931  /// EnsureInsertPoint if they wish to subsequently generate code without first
1932  /// calling EmitBlock, EmitBranch, or EmitStmt.
1933  void EmitStmt(const Stmt *S);
1934
1935  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1936  /// necessarily require an insertion point or debug information; typically
1937  /// because the statement amounts to a jump or a container of other
1938  /// statements.
1939  ///
1940  /// \return True if the statement was handled.
1941  bool EmitSimpleStmt(const Stmt *S);
1942
1943  RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1944                          AggValueSlot AVS = AggValueSlot::ignored());
1945
1946  /// EmitLabel - Emit the block for the given label. It is legal to call this
1947  /// function even if there is no current insertion point.
1948  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1949
1950  void EmitLabelStmt(const LabelStmt &S);
1951  void EmitAttributedStmt(const AttributedStmt &S);
1952  void EmitGotoStmt(const GotoStmt &S);
1953  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1954  void EmitIfStmt(const IfStmt &S);
1955  void EmitWhileStmt(const WhileStmt &S);
1956  void EmitDoStmt(const DoStmt &S);
1957  void EmitForStmt(const ForStmt &S);
1958  void EmitReturnStmt(const ReturnStmt &S);
1959  void EmitDeclStmt(const DeclStmt &S);
1960  void EmitBreakStmt(const BreakStmt &S);
1961  void EmitContinueStmt(const ContinueStmt &S);
1962  void EmitSwitchStmt(const SwitchStmt &S);
1963  void EmitDefaultStmt(const DefaultStmt &S);
1964  void EmitCaseStmt(const CaseStmt &S);
1965  void EmitCaseStmtRange(const CaseStmt &S);
1966  void EmitAsmStmt(const AsmStmt &S);
1967
1968  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1969  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1970  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1971  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1972  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
1973
1974  llvm::Constant *getUnwindResumeFn();
1975  llvm::Constant *getUnwindResumeOrRethrowFn();
1976  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1977  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1978
1979  void EmitCXXTryStmt(const CXXTryStmt &S);
1980  void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
1981
1982  //===--------------------------------------------------------------------===//
1983  //                         LValue Expression Emission
1984  //===--------------------------------------------------------------------===//
1985
1986  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1987  RValue GetUndefRValue(QualType Ty);
1988
1989  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1990  /// and issue an ErrorUnsupported style diagnostic (using the
1991  /// provided Name).
1992  RValue EmitUnsupportedRValue(const Expr *E,
1993                               const char *Name);
1994
1995  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
1996  /// an ErrorUnsupported style diagnostic (using the provided Name).
1997  LValue EmitUnsupportedLValue(const Expr *E,
1998                               const char *Name);
1999
2000  /// EmitLValue - Emit code to compute a designator that specifies the location
2001  /// of the expression.
2002  ///
2003  /// This can return one of two things: a simple address or a bitfield
2004  /// reference.  In either case, the LLVM Value* in the LValue structure is
2005  /// guaranteed to be an LLVM pointer type.
2006  ///
2007  /// If this returns a bitfield reference, nothing about the pointee type of
2008  /// the LLVM value is known: For example, it may not be a pointer to an
2009  /// integer.
2010  ///
2011  /// If this returns a normal address, and if the lvalue's C type is fixed
2012  /// size, this method guarantees that the returned pointer type will point to
2013  /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2014  /// variable length type, this is not possible.
2015  ///
2016  LValue EmitLValue(const Expr *E);
2017
2018  /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
2019  /// checking code to guard against undefined behavior.  This is only
2020  /// suitable when we know that the address will be used to access the
2021  /// object.
2022  LValue EmitCheckedLValue(const Expr *E);
2023
2024  /// EmitToMemory - Change a scalar value from its value
2025  /// representation to its in-memory representation.
2026  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2027
2028  /// EmitFromMemory - Change a scalar value from its memory
2029  /// representation to its value representation.
2030  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2031
2032  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2033  /// care to appropriately convert from the memory representation to
2034  /// the LLVM value representation.
2035  llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2036                                unsigned Alignment, QualType Ty,
2037                                llvm::MDNode *TBAAInfo = 0);
2038
2039  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2040  /// care to appropriately convert from the memory representation to
2041  /// the LLVM value representation.  The l-value must be a simple
2042  /// l-value.
2043  llvm::Value *EmitLoadOfScalar(LValue lvalue);
2044
2045  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2046  /// care to appropriately convert from the memory representation to
2047  /// the LLVM value representation.
2048  void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2049                         bool Volatile, unsigned Alignment, QualType Ty,
2050                         llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2051
2052  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2053  /// care to appropriately convert from the memory representation to
2054  /// the LLVM value representation.  The l-value must be a simple
2055  /// l-value.  The isInit flag indicates whether this is an initialization.
2056  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2057  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2058
2059  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2060  /// this method emits the address of the lvalue, then loads the result as an
2061  /// rvalue, returning the rvalue.
2062  RValue EmitLoadOfLValue(LValue V);
2063  RValue EmitLoadOfExtVectorElementLValue(LValue V);
2064  RValue EmitLoadOfBitfieldLValue(LValue LV);
2065
2066  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2067  /// lvalue, where both are guaranteed to the have the same type, and that type
2068  /// is 'Ty'.
2069  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2070  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2071
2072  /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2073  /// EmitStoreThroughLValue.
2074  ///
2075  /// \param Result [out] - If non-null, this will be set to a Value* for the
2076  /// bit-field contents after the store, appropriate for use as the result of
2077  /// an assignment to the bit-field.
2078  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2079                                      llvm::Value **Result=0);
2080
2081  /// Emit an l-value for an assignment (simple or compound) of complex type.
2082  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2083  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2084
2085  // Note: only available for agg return types
2086  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2087  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2088  // Note: only available for agg return types
2089  LValue EmitCallExprLValue(const CallExpr *E);
2090  // Note: only available for agg return types
2091  LValue EmitVAArgExprLValue(const VAArgExpr *E);
2092  LValue EmitDeclRefLValue(const DeclRefExpr *E);
2093  LValue EmitStringLiteralLValue(const StringLiteral *E);
2094  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2095  LValue EmitPredefinedLValue(const PredefinedExpr *E);
2096  LValue EmitUnaryOpLValue(const UnaryOperator *E);
2097  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2098  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2099  LValue EmitMemberExpr(const MemberExpr *E);
2100  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2101  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2102  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2103  LValue EmitCastLValue(const CastExpr *E);
2104  LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2105  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2106  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2107
2108  RValue EmitRValueForField(LValue LV, const FieldDecl *FD);
2109
2110  class ConstantEmission {
2111    llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
2112    ConstantEmission(llvm::Constant *C, bool isReference)
2113      : ValueAndIsReference(C, isReference) {}
2114  public:
2115    ConstantEmission() {}
2116    static ConstantEmission forReference(llvm::Constant *C) {
2117      return ConstantEmission(C, true);
2118    }
2119    static ConstantEmission forValue(llvm::Constant *C) {
2120      return ConstantEmission(C, false);
2121    }
2122
2123    operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
2124
2125    bool isReference() const { return ValueAndIsReference.getInt(); }
2126    LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
2127      assert(isReference());
2128      return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
2129                                            refExpr->getType());
2130    }
2131
2132    llvm::Constant *getValue() const {
2133      assert(!isReference());
2134      return ValueAndIsReference.getPointer();
2135    }
2136  };
2137
2138  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
2139
2140  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2141                                AggValueSlot slot = AggValueSlot::ignored());
2142  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2143
2144  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2145                              const ObjCIvarDecl *Ivar);
2146  LValue EmitLValueForAnonRecordField(llvm::Value* Base,
2147                                      const IndirectFieldDecl* Field,
2148                                      unsigned CVRQualifiers);
2149  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
2150
2151  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2152  /// if the Field is a reference, this will return the address of the reference
2153  /// and not the address of the value stored in the reference.
2154  LValue EmitLValueForFieldInitialization(LValue Base,
2155                                          const FieldDecl* Field);
2156
2157  LValue EmitLValueForIvar(QualType ObjectTy,
2158                           llvm::Value* Base, const ObjCIvarDecl *Ivar,
2159                           unsigned CVRQualifiers);
2160
2161  LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
2162                                unsigned CVRQualifiers);
2163
2164  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2165  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2166  LValue EmitLambdaLValue(const LambdaExpr *E);
2167  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2168
2169  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2170  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2171  LValue EmitStmtExprLValue(const StmtExpr *E);
2172  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2173  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2174  void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2175
2176  //===--------------------------------------------------------------------===//
2177  //                         Scalar Expression Emission
2178  //===--------------------------------------------------------------------===//
2179
2180  /// EmitCall - Generate a call of the given function, expecting the given
2181  /// result type, and using the given argument list which specifies both the
2182  /// LLVM arguments and the types they were derived from.
2183  ///
2184  /// \param TargetDecl - If given, the decl of the function in a direct call;
2185  /// used to set attributes on the call (noreturn, etc.).
2186  RValue EmitCall(const CGFunctionInfo &FnInfo,
2187                  llvm::Value *Callee,
2188                  ReturnValueSlot ReturnValue,
2189                  const CallArgList &Args,
2190                  const Decl *TargetDecl = 0,
2191                  llvm::Instruction **callOrInvoke = 0);
2192
2193  RValue EmitCall(QualType FnType, llvm::Value *Callee,
2194                  ReturnValueSlot ReturnValue,
2195                  CallExpr::const_arg_iterator ArgBeg,
2196                  CallExpr::const_arg_iterator ArgEnd,
2197                  const Decl *TargetDecl = 0);
2198  RValue EmitCallExpr(const CallExpr *E,
2199                      ReturnValueSlot ReturnValue = ReturnValueSlot());
2200
2201  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2202                                  ArrayRef<llvm::Value *> Args,
2203                                  const Twine &Name = "");
2204  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2205                                  const Twine &Name = "");
2206
2207  llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2208                                llvm::Type *Ty);
2209  llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2210                                llvm::Value *This, llvm::Type *Ty);
2211  llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2212                                         NestedNameSpecifier *Qual,
2213                                         llvm::Type *Ty);
2214
2215  llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2216                                                   CXXDtorType Type,
2217                                                   const CXXRecordDecl *RD);
2218
2219  RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2220                           llvm::Value *Callee,
2221                           ReturnValueSlot ReturnValue,
2222                           llvm::Value *This,
2223                           llvm::Value *VTT,
2224                           CallExpr::const_arg_iterator ArgBeg,
2225                           CallExpr::const_arg_iterator ArgEnd);
2226  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2227                               ReturnValueSlot ReturnValue);
2228  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2229                                      ReturnValueSlot ReturnValue);
2230
2231  llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2232                                           const CXXMethodDecl *MD,
2233                                           llvm::Value *This);
2234  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2235                                       const CXXMethodDecl *MD,
2236                                       ReturnValueSlot ReturnValue);
2237
2238  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2239                                ReturnValueSlot ReturnValue);
2240
2241
2242  RValue EmitBuiltinExpr(const FunctionDecl *FD,
2243                         unsigned BuiltinID, const CallExpr *E);
2244
2245  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2246
2247  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2248  /// is unhandled by the current target.
2249  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2250
2251  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2252  llvm::Value *EmitNeonCall(llvm::Function *F,
2253                            SmallVectorImpl<llvm::Value*> &O,
2254                            const char *name,
2255                            unsigned shift = 0, bool rightshift = false);
2256  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2257  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2258                                   bool negateForRightShift);
2259
2260  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2261  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2262  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2263  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2264
2265  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2266  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2267  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
2268  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
2269  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2270  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
2271                                const ObjCMethodDecl *MethodWithObjects);
2272  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2273  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2274                             ReturnValueSlot Return = ReturnValueSlot());
2275
2276  /// Retrieves the default cleanup kind for an ARC cleanup.
2277  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2278  CleanupKind getARCCleanupKind() {
2279    return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2280             ? NormalAndEHCleanup : NormalCleanup;
2281  }
2282
2283  // ARC primitives.
2284  void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2285  void EmitARCDestroyWeak(llvm::Value *addr);
2286  llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2287  llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2288  llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2289                                bool ignored);
2290  void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2291  void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2292  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2293  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2294  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2295                                  bool ignored);
2296  llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2297                                      bool ignored);
2298  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2299  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2300  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2301  void EmitARCRelease(llvm::Value *value, bool precise);
2302  llvm::Value *EmitARCAutorelease(llvm::Value *value);
2303  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2304  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2305  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2306
2307  std::pair<LValue,llvm::Value*>
2308  EmitARCStoreAutoreleasing(const BinaryOperator *e);
2309  std::pair<LValue,llvm::Value*>
2310  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2311
2312  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2313
2314  llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2315  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2316  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2317
2318  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2319  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2320  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2321
2322  static Destroyer destroyARCStrongImprecise;
2323  static Destroyer destroyARCStrongPrecise;
2324  static Destroyer destroyARCWeak;
2325
2326  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2327  llvm::Value *EmitObjCAutoreleasePoolPush();
2328  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2329  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2330  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2331
2332  /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2333  /// expression. Will emit a temporary variable if E is not an LValue.
2334  RValue EmitReferenceBindingToExpr(const Expr* E,
2335                                    const NamedDecl *InitializedDecl);
2336
2337  //===--------------------------------------------------------------------===//
2338  //                           Expression Emission
2339  //===--------------------------------------------------------------------===//
2340
2341  // Expressions are broken into three classes: scalar, complex, aggregate.
2342
2343  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2344  /// scalar type, returning the result.
2345  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2346
2347  /// EmitScalarConversion - Emit a conversion from the specified type to the
2348  /// specified destination type, both of which are LLVM scalar types.
2349  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2350                                    QualType DstTy);
2351
2352  /// EmitComplexToScalarConversion - Emit a conversion from the specified
2353  /// complex type to the specified destination type, where the destination type
2354  /// is an LLVM scalar type.
2355  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2356                                             QualType DstTy);
2357
2358
2359  /// EmitAggExpr - Emit the computation of the specified expression
2360  /// of aggregate type.  The result is computed into the given slot,
2361  /// which may be null to indicate that the value is not needed.
2362  void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
2363
2364  /// EmitAggExprToLValue - Emit the computation of the specified expression of
2365  /// aggregate type into a temporary LValue.
2366  LValue EmitAggExprToLValue(const Expr *E);
2367
2368  /// EmitGCMemmoveCollectable - Emit special API for structs with object
2369  /// pointers.
2370  void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2371                                QualType Ty);
2372
2373  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2374  /// make sure it survives garbage collection until this point.
2375  void EmitExtendGCLifetime(llvm::Value *object);
2376
2377  /// EmitComplexExpr - Emit the computation of the specified expression of
2378  /// complex type, returning the result.
2379  ComplexPairTy EmitComplexExpr(const Expr *E,
2380                                bool IgnoreReal = false,
2381                                bool IgnoreImag = false);
2382
2383  /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2384  /// of complex type, storing into the specified Value*.
2385  void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2386                               bool DestIsVolatile);
2387
2388  /// StoreComplexToAddr - Store a complex number into the specified address.
2389  void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2390                          bool DestIsVolatile);
2391  /// LoadComplexFromAddr - Load a complex number from the specified address.
2392  ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2393
2394  /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2395  /// a static local variable.
2396  llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2397                                            const char *Separator,
2398                                       llvm::GlobalValue::LinkageTypes Linkage);
2399
2400  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2401  /// global variable that has already been created for it.  If the initializer
2402  /// has a different type than GV does, this may free GV and return a different
2403  /// one.  Otherwise it just returns GV.
2404  llvm::GlobalVariable *
2405  AddInitializerToStaticVarDecl(const VarDecl &D,
2406                                llvm::GlobalVariable *GV);
2407
2408
2409  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2410  /// variable with global storage.
2411  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2412                                bool PerformInit);
2413
2414  /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
2415  /// with the C++ runtime so that its destructor will be called at exit.
2416  void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
2417                                     llvm::Constant *DeclPtr);
2418
2419  /// Emit code in this function to perform a guarded variable
2420  /// initialization.  Guarded initializations are used when it's not
2421  /// possible to prove that an initialization will be done exactly
2422  /// once, e.g. with a static local variable or a static data member
2423  /// of a class template.
2424  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2425                          bool PerformInit);
2426
2427  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2428  /// variables.
2429  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2430                                 llvm::Constant **Decls,
2431                                 unsigned NumDecls);
2432
2433  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
2434  /// variables.
2435  void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
2436                                  const std::vector<std::pair<llvm::WeakVH,
2437                                  llvm::Constant*> > &DtorsAndObjects);
2438
2439  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2440                                        const VarDecl *D,
2441                                        llvm::GlobalVariable *Addr,
2442                                        bool PerformInit);
2443
2444  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2445
2446  void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2447                                  const Expr *Exp);
2448
2449  void enterFullExpression(const ExprWithCleanups *E) {
2450    if (E->getNumObjects() == 0) return;
2451    enterNonTrivialFullExpression(E);
2452  }
2453  void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2454
2455  void EmitCXXThrowExpr(const CXXThrowExpr *E);
2456
2457  void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2458
2459  RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2460
2461  //===--------------------------------------------------------------------===//
2462  //                         Annotations Emission
2463  //===--------------------------------------------------------------------===//
2464
2465  /// Emit an annotation call (intrinsic or builtin).
2466  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2467                                  llvm::Value *AnnotatedVal,
2468                                  llvm::StringRef AnnotationStr,
2469                                  SourceLocation Location);
2470
2471  /// Emit local annotations for the local variable V, declared by D.
2472  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2473
2474  /// Emit field annotations for the given field & value. Returns the
2475  /// annotation result.
2476  llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2477
2478  //===--------------------------------------------------------------------===//
2479  //                             Internal Helpers
2480  //===--------------------------------------------------------------------===//
2481
2482  /// ContainsLabel - Return true if the statement contains a label in it.  If
2483  /// this statement is not executed normally, it not containing a label means
2484  /// that we can just remove the code.
2485  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2486
2487  /// containsBreak - Return true if the statement contains a break out of it.
2488  /// If the statement (recursively) contains a switch or loop with a break
2489  /// inside of it, this is fine.
2490  static bool containsBreak(const Stmt *S);
2491
2492  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2493  /// to a constant, or if it does but contains a label, return false.  If it
2494  /// constant folds return true and set the boolean result in Result.
2495  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2496
2497  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2498  /// to a constant, or if it does but contains a label, return false.  If it
2499  /// constant folds return true and set the folded value.
2500  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
2501
2502  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2503  /// if statement) to the specified blocks.  Based on the condition, this might
2504  /// try to simplify the codegen of the conditional based on the branch.
2505  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2506                            llvm::BasicBlock *FalseBlock);
2507
2508  /// getTrapBB - Create a basic block that will call the trap intrinsic.  We'll
2509  /// generate a branch around the created basic block as necessary.
2510  llvm::BasicBlock *getTrapBB();
2511
2512  /// EmitCallArg - Emit a single call argument.
2513  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2514
2515  /// EmitDelegateCallArg - We are performing a delegate call; that
2516  /// is, the current function is delegating to another one.  Produce
2517  /// a r-value suitable for passing the given parameter.
2518  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2519
2520  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2521  /// point operation, expressed as the maximum relative error in ulp.
2522  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
2523
2524private:
2525  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
2526  void EmitReturnOfRValue(RValue RV, QualType Ty);
2527
2528  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2529  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2530  ///
2531  /// \param AI - The first function argument of the expansion.
2532  /// \return The argument following the last expanded function
2533  /// argument.
2534  llvm::Function::arg_iterator
2535  ExpandTypeFromArgs(QualType Ty, LValue Dst,
2536                     llvm::Function::arg_iterator AI);
2537
2538  /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2539  /// Ty, into individual arguments on the provided vector \arg Args. See
2540  /// ABIArgInfo::Expand.
2541  void ExpandTypeToArgs(QualType Ty, RValue Src,
2542                        SmallVector<llvm::Value*, 16> &Args,
2543                        llvm::FunctionType *IRFuncTy);
2544
2545  llvm::Value* EmitAsmInput(const AsmStmt &S,
2546                            const TargetInfo::ConstraintInfo &Info,
2547                            const Expr *InputExpr, std::string &ConstraintStr);
2548
2549  llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
2550                                  const TargetInfo::ConstraintInfo &Info,
2551                                  LValue InputValue, QualType InputType,
2552                                  std::string &ConstraintStr);
2553
2554  /// EmitCallArgs - Emit call arguments for a function.
2555  /// The CallArgTypeInfo parameter is used for iterating over the known
2556  /// argument types of the function being called.
2557  template<typename T>
2558  void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2559                    CallExpr::const_arg_iterator ArgBeg,
2560                    CallExpr::const_arg_iterator ArgEnd) {
2561      CallExpr::const_arg_iterator Arg = ArgBeg;
2562
2563    // First, use the argument types that the type info knows about
2564    if (CallArgTypeInfo) {
2565      for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2566           E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2567        assert(Arg != ArgEnd && "Running over edge of argument list!");
2568        QualType ArgType = *I;
2569#ifndef NDEBUG
2570        QualType ActualArgType = Arg->getType();
2571        if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2572          QualType ActualBaseType =
2573            ActualArgType->getAs<PointerType>()->getPointeeType();
2574          QualType ArgBaseType =
2575            ArgType->getAs<PointerType>()->getPointeeType();
2576          if (ArgBaseType->isVariableArrayType()) {
2577            if (const VariableArrayType *VAT =
2578                getContext().getAsVariableArrayType(ActualBaseType)) {
2579              if (!VAT->getSizeExpr())
2580                ActualArgType = ArgType;
2581            }
2582          }
2583        }
2584        assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2585               getTypePtr() ==
2586               getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2587               "type mismatch in call argument!");
2588#endif
2589        EmitCallArg(Args, *Arg, ArgType);
2590      }
2591
2592      // Either we've emitted all the call args, or we have a call to a
2593      // variadic function.
2594      assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2595             "Extra arguments in non-variadic function!");
2596
2597    }
2598
2599    // If we still have any arguments, emit them using the type of the argument.
2600    for (; Arg != ArgEnd; ++Arg)
2601      EmitCallArg(Args, *Arg, Arg->getType());
2602  }
2603
2604  const TargetCodeGenInfo &getTargetHooks() const {
2605    return CGM.getTargetCodeGenInfo();
2606  }
2607
2608  void EmitDeclMetadata();
2609
2610  CodeGenModule::ByrefHelpers *
2611  buildByrefHelpers(llvm::StructType &byrefType,
2612                    const AutoVarEmission &emission);
2613
2614  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2615
2616  /// GetPointeeAlignment - Given an expression with a pointer type, find the
2617  /// alignment of the type referenced by the pointer.  Skip over implicit
2618  /// casts.
2619  unsigned GetPointeeAlignment(const Expr *Addr);
2620
2621  /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
2622  /// the alignment of the type referenced by the pointer.  Skip over implicit
2623  /// casts.  Return the alignment as an llvm::Value.
2624  llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
2625};
2626
2627/// Helper class with most of the code for saving a value for a
2628/// conditional expression cleanup.
2629struct DominatingLLVMValue {
2630  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2631
2632  /// Answer whether the given value needs extra work to be saved.
2633  static bool needsSaving(llvm::Value *value) {
2634    // If it's not an instruction, we don't need to save.
2635    if (!isa<llvm::Instruction>(value)) return false;
2636
2637    // If it's an instruction in the entry block, we don't need to save.
2638    llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2639    return (block != &block->getParent()->getEntryBlock());
2640  }
2641
2642  /// Try to save the given value.
2643  static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2644    if (!needsSaving(value)) return saved_type(value, false);
2645
2646    // Otherwise we need an alloca.
2647    llvm::Value *alloca =
2648      CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2649    CGF.Builder.CreateStore(value, alloca);
2650
2651    return saved_type(alloca, true);
2652  }
2653
2654  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2655    if (!value.getInt()) return value.getPointer();
2656    return CGF.Builder.CreateLoad(value.getPointer());
2657  }
2658};
2659
2660/// A partial specialization of DominatingValue for llvm::Values that
2661/// might be llvm::Instructions.
2662template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2663  typedef T *type;
2664  static type restore(CodeGenFunction &CGF, saved_type value) {
2665    return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2666  }
2667};
2668
2669/// A specialization of DominatingValue for RValue.
2670template <> struct DominatingValue<RValue> {
2671  typedef RValue type;
2672  class saved_type {
2673    enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2674                AggregateAddress, ComplexAddress };
2675
2676    llvm::Value *Value;
2677    Kind K;
2678    saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2679
2680  public:
2681    static bool needsSaving(RValue value);
2682    static saved_type save(CodeGenFunction &CGF, RValue value);
2683    RValue restore(CodeGenFunction &CGF);
2684
2685    // implementations in CGExprCXX.cpp
2686  };
2687
2688  static bool needsSaving(type value) {
2689    return saved_type::needsSaving(value);
2690  }
2691  static saved_type save(CodeGenFunction &CGF, type value) {
2692    return saved_type::save(CGF, value);
2693  }
2694  static type restore(CodeGenFunction &CGF, saved_type value) {
2695    return value.restore(CGF);
2696  }
2697};
2698
2699}  // end namespace CodeGen
2700}  // end namespace clang
2701
2702#endif
2703