CodeGenFunction.h revision 55fc873017f10f6f566b182b70f6fc22aefa3464
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the internal per-function state used for llvm translation.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15#define CLANG_CODEGEN_CODEGENFUNCTION_H
16
17#include "CGBuilder.h"
18#include "CGDebugInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/ExprObjC.h"
24#include "clang/AST/Type.h"
25#include "clang/Basic/ABI.h"
26#include "clang/Basic/TargetInfo.h"
27#include "clang/Frontend/CodeGenOptions.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ValueHandle.h"
33
34namespace llvm {
35  class BasicBlock;
36  class LLVMContext;
37  class MDNode;
38  class Module;
39  class SwitchInst;
40  class Twine;
41  class Value;
42  class CallSite;
43}
44
45namespace clang {
46  class ASTContext;
47  class BlockDecl;
48  class CXXDestructorDecl;
49  class CXXForRangeStmt;
50  class CXXTryStmt;
51  class Decl;
52  class LabelDecl;
53  class EnumConstantDecl;
54  class FunctionDecl;
55  class FunctionProtoType;
56  class LabelStmt;
57  class ObjCContainerDecl;
58  class ObjCInterfaceDecl;
59  class ObjCIvarDecl;
60  class ObjCMethodDecl;
61  class ObjCImplementationDecl;
62  class ObjCPropertyImplDecl;
63  class TargetInfo;
64  class TargetCodeGenInfo;
65  class VarDecl;
66  class ObjCForCollectionStmt;
67  class ObjCAtTryStmt;
68  class ObjCAtThrowStmt;
69  class ObjCAtSynchronizedStmt;
70  class ObjCAutoreleasePoolStmt;
71
72namespace CodeGen {
73  class CodeGenTypes;
74  class CGFunctionInfo;
75  class CGRecordLayout;
76  class CGBlockInfo;
77  class CGCXXABI;
78  class BlockFlags;
79  class BlockFieldFlags;
80
81/// A branch fixup.  These are required when emitting a goto to a
82/// label which hasn't been emitted yet.  The goto is optimistically
83/// emitted as a branch to the basic block for the label, and (if it
84/// occurs in a scope with non-trivial cleanups) a fixup is added to
85/// the innermost cleanup.  When a (normal) cleanup is popped, any
86/// unresolved fixups in that scope are threaded through the cleanup.
87struct BranchFixup {
88  /// The block containing the terminator which needs to be modified
89  /// into a switch if this fixup is resolved into the current scope.
90  /// If null, LatestBranch points directly to the destination.
91  llvm::BasicBlock *OptimisticBranchBlock;
92
93  /// The ultimate destination of the branch.
94  ///
95  /// This can be set to null to indicate that this fixup was
96  /// successfully resolved.
97  llvm::BasicBlock *Destination;
98
99  /// The destination index value.
100  unsigned DestinationIndex;
101
102  /// The initial branch of the fixup.
103  llvm::BranchInst *InitialBranch;
104};
105
106template <class T> struct InvariantValue {
107  typedef T type;
108  typedef T saved_type;
109  static bool needsSaving(type value) { return false; }
110  static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111  static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112};
113
114/// A metaprogramming class for ensuring that a value will dominate an
115/// arbitrary position in a function.
116template <class T> struct DominatingValue : InvariantValue<T> {};
117
118template <class T, bool mightBeInstruction =
119            llvm::is_base_of<llvm::Value, T>::value &&
120            !llvm::is_base_of<llvm::Constant, T>::value &&
121            !llvm::is_base_of<llvm::BasicBlock, T>::value>
122struct DominatingPointer;
123template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124// template <class T> struct DominatingPointer<T,true> at end of file
125
126template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127
128enum CleanupKind {
129  EHCleanup = 0x1,
130  NormalCleanup = 0x2,
131  NormalAndEHCleanup = EHCleanup | NormalCleanup,
132
133  InactiveCleanup = 0x4,
134  InactiveEHCleanup = EHCleanup | InactiveCleanup,
135  InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136  InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137};
138
139/// A stack of scopes which respond to exceptions, including cleanups
140/// and catch blocks.
141class EHScopeStack {
142public:
143  /// A saved depth on the scope stack.  This is necessary because
144  /// pushing scopes onto the stack invalidates iterators.
145  class stable_iterator {
146    friend class EHScopeStack;
147
148    /// Offset from StartOfData to EndOfBuffer.
149    ptrdiff_t Size;
150
151    stable_iterator(ptrdiff_t Size) : Size(Size) {}
152
153  public:
154    static stable_iterator invalid() { return stable_iterator(-1); }
155    stable_iterator() : Size(-1) {}
156
157    bool isValid() const { return Size >= 0; }
158
159    /// Returns true if this scope encloses I.
160    /// Returns false if I is invalid.
161    /// This scope must be valid.
162    bool encloses(stable_iterator I) const { return Size <= I.Size; }
163
164    /// Returns true if this scope strictly encloses I: that is,
165    /// if it encloses I and is not I.
166    /// Returns false is I is invalid.
167    /// This scope must be valid.
168    bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169
170    friend bool operator==(stable_iterator A, stable_iterator B) {
171      return A.Size == B.Size;
172    }
173    friend bool operator!=(stable_iterator A, stable_iterator B) {
174      return A.Size != B.Size;
175    }
176  };
177
178  /// Information for lazily generating a cleanup.  Subclasses must be
179  /// POD-like: cleanups will not be destructed, and they will be
180  /// allocated on the cleanup stack and freely copied and moved
181  /// around.
182  ///
183  /// Cleanup implementations should generally be declared in an
184  /// anonymous namespace.
185  class Cleanup {
186    // Anchor the construction vtable.
187    virtual void anchor();
188  public:
189    /// Generation flags.
190    class Flags {
191      enum {
192        F_IsForEH             = 0x1,
193        F_IsNormalCleanupKind = 0x2,
194        F_IsEHCleanupKind     = 0x4
195      };
196      unsigned flags;
197
198    public:
199      Flags() : flags(0) {}
200
201      /// isForEH - true if the current emission is for an EH cleanup.
202      bool isForEHCleanup() const { return flags & F_IsForEH; }
203      bool isForNormalCleanup() const { return !isForEHCleanup(); }
204      void setIsForEHCleanup() { flags |= F_IsForEH; }
205
206      bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207      void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208
209      /// isEHCleanupKind - true if the cleanup was pushed as an EH
210      /// cleanup.
211      bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212      void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213    };
214
215    // Provide a virtual destructor to suppress a very common warning
216    // that unfortunately cannot be suppressed without this.  Cleanups
217    // should not rely on this destructor ever being called.
218    virtual ~Cleanup() {}
219
220    /// Emit the cleanup.  For normal cleanups, this is run in the
221    /// same EH context as when the cleanup was pushed, i.e. the
222    /// immediately-enclosing context of the cleanup scope.  For
223    /// EH cleanups, this is run in a terminate context.
224    ///
225    // \param flags cleanup kind.
226    virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
227  };
228
229  /// ConditionalCleanupN stores the saved form of its N parameters,
230  /// then restores them and performs the cleanup.
231  template <class T, class A0>
232  class ConditionalCleanup1 : public Cleanup {
233    typedef typename DominatingValue<A0>::saved_type A0_saved;
234    A0_saved a0_saved;
235
236    void Emit(CodeGenFunction &CGF, Flags flags) {
237      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
238      T(a0).Emit(CGF, flags);
239    }
240
241  public:
242    ConditionalCleanup1(A0_saved a0)
243      : a0_saved(a0) {}
244  };
245
246  template <class T, class A0, class A1>
247  class ConditionalCleanup2 : public Cleanup {
248    typedef typename DominatingValue<A0>::saved_type A0_saved;
249    typedef typename DominatingValue<A1>::saved_type A1_saved;
250    A0_saved a0_saved;
251    A1_saved a1_saved;
252
253    void Emit(CodeGenFunction &CGF, Flags flags) {
254      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
255      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
256      T(a0, a1).Emit(CGF, flags);
257    }
258
259  public:
260    ConditionalCleanup2(A0_saved a0, A1_saved a1)
261      : a0_saved(a0), a1_saved(a1) {}
262  };
263
264  template <class T, class A0, class A1, class A2>
265  class ConditionalCleanup3 : public Cleanup {
266    typedef typename DominatingValue<A0>::saved_type A0_saved;
267    typedef typename DominatingValue<A1>::saved_type A1_saved;
268    typedef typename DominatingValue<A2>::saved_type A2_saved;
269    A0_saved a0_saved;
270    A1_saved a1_saved;
271    A2_saved a2_saved;
272
273    void Emit(CodeGenFunction &CGF, Flags flags) {
274      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
275      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
276      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
277      T(a0, a1, a2).Emit(CGF, flags);
278    }
279
280  public:
281    ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
282      : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
283  };
284
285  template <class T, class A0, class A1, class A2, class A3>
286  class ConditionalCleanup4 : public Cleanup {
287    typedef typename DominatingValue<A0>::saved_type A0_saved;
288    typedef typename DominatingValue<A1>::saved_type A1_saved;
289    typedef typename DominatingValue<A2>::saved_type A2_saved;
290    typedef typename DominatingValue<A3>::saved_type A3_saved;
291    A0_saved a0_saved;
292    A1_saved a1_saved;
293    A2_saved a2_saved;
294    A3_saved a3_saved;
295
296    void Emit(CodeGenFunction &CGF, Flags flags) {
297      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
298      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
299      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
300      A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
301      T(a0, a1, a2, a3).Emit(CGF, flags);
302    }
303
304  public:
305    ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
306      : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
307  };
308
309private:
310  // The implementation for this class is in CGException.h and
311  // CGException.cpp; the definition is here because it's used as a
312  // member of CodeGenFunction.
313
314  /// The start of the scope-stack buffer, i.e. the allocated pointer
315  /// for the buffer.  All of these pointers are either simultaneously
316  /// null or simultaneously valid.
317  char *StartOfBuffer;
318
319  /// The end of the buffer.
320  char *EndOfBuffer;
321
322  /// The first valid entry in the buffer.
323  char *StartOfData;
324
325  /// The innermost normal cleanup on the stack.
326  stable_iterator InnermostNormalCleanup;
327
328  /// The innermost EH scope on the stack.
329  stable_iterator InnermostEHScope;
330
331  /// The current set of branch fixups.  A branch fixup is a jump to
332  /// an as-yet unemitted label, i.e. a label for which we don't yet
333  /// know the EH stack depth.  Whenever we pop a cleanup, we have
334  /// to thread all the current branch fixups through it.
335  ///
336  /// Fixups are recorded as the Use of the respective branch or
337  /// switch statement.  The use points to the final destination.
338  /// When popping out of a cleanup, these uses are threaded through
339  /// the cleanup and adjusted to point to the new cleanup.
340  ///
341  /// Note that branches are allowed to jump into protected scopes
342  /// in certain situations;  e.g. the following code is legal:
343  ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
344  ///     goto foo;
345  ///     A a;
346  ///    foo:
347  ///     bar();
348  SmallVector<BranchFixup, 8> BranchFixups;
349
350  char *allocate(size_t Size);
351
352  void *pushCleanup(CleanupKind K, size_t DataSize);
353
354public:
355  EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
356                   InnermostNormalCleanup(stable_end()),
357                   InnermostEHScope(stable_end()) {}
358  ~EHScopeStack() { delete[] StartOfBuffer; }
359
360  // Variadic templates would make this not terrible.
361
362  /// Push a lazily-created cleanup on the stack.
363  template <class T>
364  void pushCleanup(CleanupKind Kind) {
365    void *Buffer = pushCleanup(Kind, sizeof(T));
366    Cleanup *Obj = new(Buffer) T();
367    (void) Obj;
368  }
369
370  /// Push a lazily-created cleanup on the stack.
371  template <class T, class A0>
372  void pushCleanup(CleanupKind Kind, A0 a0) {
373    void *Buffer = pushCleanup(Kind, sizeof(T));
374    Cleanup *Obj = new(Buffer) T(a0);
375    (void) Obj;
376  }
377
378  /// Push a lazily-created cleanup on the stack.
379  template <class T, class A0, class A1>
380  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
381    void *Buffer = pushCleanup(Kind, sizeof(T));
382    Cleanup *Obj = new(Buffer) T(a0, a1);
383    (void) Obj;
384  }
385
386  /// Push a lazily-created cleanup on the stack.
387  template <class T, class A0, class A1, class A2>
388  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
389    void *Buffer = pushCleanup(Kind, sizeof(T));
390    Cleanup *Obj = new(Buffer) T(a0, a1, a2);
391    (void) Obj;
392  }
393
394  /// Push a lazily-created cleanup on the stack.
395  template <class T, class A0, class A1, class A2, class A3>
396  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
397    void *Buffer = pushCleanup(Kind, sizeof(T));
398    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
399    (void) Obj;
400  }
401
402  /// Push a lazily-created cleanup on the stack.
403  template <class T, class A0, class A1, class A2, class A3, class A4>
404  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
405    void *Buffer = pushCleanup(Kind, sizeof(T));
406    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
407    (void) Obj;
408  }
409
410  // Feel free to add more variants of the following:
411
412  /// Push a cleanup with non-constant storage requirements on the
413  /// stack.  The cleanup type must provide an additional static method:
414  ///   static size_t getExtraSize(size_t);
415  /// The argument to this method will be the value N, which will also
416  /// be passed as the first argument to the constructor.
417  ///
418  /// The data stored in the extra storage must obey the same
419  /// restrictions as normal cleanup member data.
420  ///
421  /// The pointer returned from this method is valid until the cleanup
422  /// stack is modified.
423  template <class T, class A0, class A1, class A2>
424  T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
425    void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
426    return new (Buffer) T(N, a0, a1, a2);
427  }
428
429  /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
430  void popCleanup();
431
432  /// Push a set of catch handlers on the stack.  The catch is
433  /// uninitialized and will need to have the given number of handlers
434  /// set on it.
435  class EHCatchScope *pushCatch(unsigned NumHandlers);
436
437  /// Pops a catch scope off the stack.  This is private to CGException.cpp.
438  void popCatch();
439
440  /// Push an exceptions filter on the stack.
441  class EHFilterScope *pushFilter(unsigned NumFilters);
442
443  /// Pops an exceptions filter off the stack.
444  void popFilter();
445
446  /// Push a terminate handler on the stack.
447  void pushTerminate();
448
449  /// Pops a terminate handler off the stack.
450  void popTerminate();
451
452  /// Determines whether the exception-scopes stack is empty.
453  bool empty() const { return StartOfData == EndOfBuffer; }
454
455  bool requiresLandingPad() const {
456    return InnermostEHScope != stable_end();
457  }
458
459  /// Determines whether there are any normal cleanups on the stack.
460  bool hasNormalCleanups() const {
461    return InnermostNormalCleanup != stable_end();
462  }
463
464  /// Returns the innermost normal cleanup on the stack, or
465  /// stable_end() if there are no normal cleanups.
466  stable_iterator getInnermostNormalCleanup() const {
467    return InnermostNormalCleanup;
468  }
469  stable_iterator getInnermostActiveNormalCleanup() const;
470
471  stable_iterator getInnermostEHScope() const {
472    return InnermostEHScope;
473  }
474
475  stable_iterator getInnermostActiveEHScope() const;
476
477  /// An unstable reference to a scope-stack depth.  Invalidated by
478  /// pushes but not pops.
479  class iterator;
480
481  /// Returns an iterator pointing to the innermost EH scope.
482  iterator begin() const;
483
484  /// Returns an iterator pointing to the outermost EH scope.
485  iterator end() const;
486
487  /// Create a stable reference to the top of the EH stack.  The
488  /// returned reference is valid until that scope is popped off the
489  /// stack.
490  stable_iterator stable_begin() const {
491    return stable_iterator(EndOfBuffer - StartOfData);
492  }
493
494  /// Create a stable reference to the bottom of the EH stack.
495  static stable_iterator stable_end() {
496    return stable_iterator(0);
497  }
498
499  /// Translates an iterator into a stable_iterator.
500  stable_iterator stabilize(iterator it) const;
501
502  /// Turn a stable reference to a scope depth into a unstable pointer
503  /// to the EH stack.
504  iterator find(stable_iterator save) const;
505
506  /// Removes the cleanup pointed to by the given stable_iterator.
507  void removeCleanup(stable_iterator save);
508
509  /// Add a branch fixup to the current cleanup scope.
510  BranchFixup &addBranchFixup() {
511    assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
512    BranchFixups.push_back(BranchFixup());
513    return BranchFixups.back();
514  }
515
516  unsigned getNumBranchFixups() const { return BranchFixups.size(); }
517  BranchFixup &getBranchFixup(unsigned I) {
518    assert(I < getNumBranchFixups());
519    return BranchFixups[I];
520  }
521
522  /// Pops lazily-removed fixups from the end of the list.  This
523  /// should only be called by procedures which have just popped a
524  /// cleanup or resolved one or more fixups.
525  void popNullFixups();
526
527  /// Clears the branch-fixups list.  This should only be called by
528  /// ResolveAllBranchFixups.
529  void clearFixups() { BranchFixups.clear(); }
530};
531
532/// CodeGenFunction - This class organizes the per-function state that is used
533/// while generating LLVM code.
534class CodeGenFunction : public CodeGenTypeCache {
535  CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
536  void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
537
538  friend class CGCXXABI;
539public:
540  /// A jump destination is an abstract label, branching to which may
541  /// require a jump out through normal cleanups.
542  struct JumpDest {
543    JumpDest() : Block(0), ScopeDepth(), Index(0) {}
544    JumpDest(llvm::BasicBlock *Block,
545             EHScopeStack::stable_iterator Depth,
546             unsigned Index)
547      : Block(Block), ScopeDepth(Depth), Index(Index) {}
548
549    bool isValid() const { return Block != 0; }
550    llvm::BasicBlock *getBlock() const { return Block; }
551    EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
552    unsigned getDestIndex() const { return Index; }
553
554  private:
555    llvm::BasicBlock *Block;
556    EHScopeStack::stable_iterator ScopeDepth;
557    unsigned Index;
558  };
559
560  CodeGenModule &CGM;  // Per-module state.
561  const TargetInfo &Target;
562
563  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
564  CGBuilderTy Builder;
565
566  /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
567  /// This excludes BlockDecls.
568  const Decl *CurFuncDecl;
569  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
570  const Decl *CurCodeDecl;
571  const CGFunctionInfo *CurFnInfo;
572  QualType FnRetTy;
573  llvm::Function *CurFn;
574
575  /// CurGD - The GlobalDecl for the current function being compiled.
576  GlobalDecl CurGD;
577
578  /// PrologueCleanupDepth - The cleanup depth enclosing all the
579  /// cleanups associated with the parameters.
580  EHScopeStack::stable_iterator PrologueCleanupDepth;
581
582  /// ReturnBlock - Unified return block.
583  JumpDest ReturnBlock;
584
585  /// ReturnValue - The temporary alloca to hold the return value. This is null
586  /// iff the function has no return value.
587  llvm::Value *ReturnValue;
588
589  /// AllocaInsertPoint - This is an instruction in the entry block before which
590  /// we prefer to insert allocas.
591  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
592
593  /// BoundsChecking - Emit run-time bounds checks. Higher values mean
594  /// potentially higher performance penalties.
595  unsigned char BoundsChecking;
596
597  /// \brief Whether any type-checking sanitizers are enabled. If \c false,
598  /// calls to EmitTypeCheck can be skipped.
599  bool SanitizePerformTypeCheck;
600
601  /// In ARC, whether we should autorelease the return value.
602  bool AutoreleaseResult;
603
604  const CodeGen::CGBlockInfo *BlockInfo;
605  llvm::Value *BlockPointer;
606
607  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
608  FieldDecl *LambdaThisCaptureField;
609
610  /// \brief A mapping from NRVO variables to the flags used to indicate
611  /// when the NRVO has been applied to this variable.
612  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
613
614  EHScopeStack EHStack;
615
616  /// i32s containing the indexes of the cleanup destinations.
617  llvm::AllocaInst *NormalCleanupDest;
618
619  unsigned NextCleanupDestIndex;
620
621  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
622  CGBlockInfo *FirstBlockInfo;
623
624  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
625  llvm::BasicBlock *EHResumeBlock;
626
627  /// The exception slot.  All landing pads write the current exception pointer
628  /// into this alloca.
629  llvm::Value *ExceptionSlot;
630
631  /// The selector slot.  Under the MandatoryCleanup model, all landing pads
632  /// write the current selector value into this alloca.
633  llvm::AllocaInst *EHSelectorSlot;
634
635  /// Emits a landing pad for the current EH stack.
636  llvm::BasicBlock *EmitLandingPad();
637
638  llvm::BasicBlock *getInvokeDestImpl();
639
640  template <class T>
641  typename DominatingValue<T>::saved_type saveValueInCond(T value) {
642    return DominatingValue<T>::save(*this, value);
643  }
644
645public:
646  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
647  /// rethrows.
648  SmallVector<llvm::Value*, 8> ObjCEHValueStack;
649
650  /// A class controlling the emission of a finally block.
651  class FinallyInfo {
652    /// Where the catchall's edge through the cleanup should go.
653    JumpDest RethrowDest;
654
655    /// A function to call to enter the catch.
656    llvm::Constant *BeginCatchFn;
657
658    /// An i1 variable indicating whether or not the @finally is
659    /// running for an exception.
660    llvm::AllocaInst *ForEHVar;
661
662    /// An i8* variable into which the exception pointer to rethrow
663    /// has been saved.
664    llvm::AllocaInst *SavedExnVar;
665
666  public:
667    void enter(CodeGenFunction &CGF, const Stmt *Finally,
668               llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
669               llvm::Constant *rethrowFn);
670    void exit(CodeGenFunction &CGF);
671  };
672
673  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
674  /// current full-expression.  Safe against the possibility that
675  /// we're currently inside a conditionally-evaluated expression.
676  template <class T, class A0>
677  void pushFullExprCleanup(CleanupKind kind, A0 a0) {
678    // If we're not in a conditional branch, or if none of the
679    // arguments requires saving, then use the unconditional cleanup.
680    if (!isInConditionalBranch())
681      return EHStack.pushCleanup<T>(kind, a0);
682
683    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
684
685    typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
686    EHStack.pushCleanup<CleanupType>(kind, a0_saved);
687    initFullExprCleanup();
688  }
689
690  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
691  /// current full-expression.  Safe against the possibility that
692  /// we're currently inside a conditionally-evaluated expression.
693  template <class T, class A0, class A1>
694  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
695    // If we're not in a conditional branch, or if none of the
696    // arguments requires saving, then use the unconditional cleanup.
697    if (!isInConditionalBranch())
698      return EHStack.pushCleanup<T>(kind, a0, a1);
699
700    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
701    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
702
703    typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
704    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
705    initFullExprCleanup();
706  }
707
708  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
709  /// current full-expression.  Safe against the possibility that
710  /// we're currently inside a conditionally-evaluated expression.
711  template <class T, class A0, class A1, class A2>
712  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
713    // If we're not in a conditional branch, or if none of the
714    // arguments requires saving, then use the unconditional cleanup.
715    if (!isInConditionalBranch()) {
716      return EHStack.pushCleanup<T>(kind, a0, a1, a2);
717    }
718
719    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
720    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
721    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
722
723    typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
724    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
725    initFullExprCleanup();
726  }
727
728  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
729  /// current full-expression.  Safe against the possibility that
730  /// we're currently inside a conditionally-evaluated expression.
731  template <class T, class A0, class A1, class A2, class A3>
732  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
733    // If we're not in a conditional branch, or if none of the
734    // arguments requires saving, then use the unconditional cleanup.
735    if (!isInConditionalBranch()) {
736      return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
737    }
738
739    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
740    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
741    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
742    typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
743
744    typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
745    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
746                                     a2_saved, a3_saved);
747    initFullExprCleanup();
748  }
749
750  /// Set up the last cleaup that was pushed as a conditional
751  /// full-expression cleanup.
752  void initFullExprCleanup();
753
754  /// PushDestructorCleanup - Push a cleanup to call the
755  /// complete-object destructor of an object of the given type at the
756  /// given address.  Does nothing if T is not a C++ class type with a
757  /// non-trivial destructor.
758  void PushDestructorCleanup(QualType T, llvm::Value *Addr);
759
760  /// PushDestructorCleanup - Push a cleanup to call the
761  /// complete-object variant of the given destructor on the object at
762  /// the given address.
763  void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
764                             llvm::Value *Addr);
765
766  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
767  /// process all branch fixups.
768  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
769
770  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
771  /// The block cannot be reactivated.  Pops it if it's the top of the
772  /// stack.
773  ///
774  /// \param DominatingIP - An instruction which is known to
775  ///   dominate the current IP (if set) and which lies along
776  ///   all paths of execution between the current IP and the
777  ///   the point at which the cleanup comes into scope.
778  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
779                              llvm::Instruction *DominatingIP);
780
781  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
782  /// Cannot be used to resurrect a deactivated cleanup.
783  ///
784  /// \param DominatingIP - An instruction which is known to
785  ///   dominate the current IP (if set) and which lies along
786  ///   all paths of execution between the current IP and the
787  ///   the point at which the cleanup comes into scope.
788  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
789                            llvm::Instruction *DominatingIP);
790
791  /// \brief Enters a new scope for capturing cleanups, all of which
792  /// will be executed once the scope is exited.
793  class RunCleanupsScope {
794    EHScopeStack::stable_iterator CleanupStackDepth;
795    bool OldDidCallStackSave;
796    bool PerformCleanup;
797
798    RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
799    void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
800
801  protected:
802    CodeGenFunction& CGF;
803
804  public:
805    /// \brief Enter a new cleanup scope.
806    explicit RunCleanupsScope(CodeGenFunction &CGF)
807      : PerformCleanup(true), CGF(CGF)
808    {
809      CleanupStackDepth = CGF.EHStack.stable_begin();
810      OldDidCallStackSave = CGF.DidCallStackSave;
811      CGF.DidCallStackSave = false;
812    }
813
814    /// \brief Exit this cleanup scope, emitting any accumulated
815    /// cleanups.
816    ~RunCleanupsScope() {
817      if (PerformCleanup) {
818        CGF.DidCallStackSave = OldDidCallStackSave;
819        CGF.PopCleanupBlocks(CleanupStackDepth);
820      }
821    }
822
823    /// \brief Determine whether this scope requires any cleanups.
824    bool requiresCleanups() const {
825      return CGF.EHStack.stable_begin() != CleanupStackDepth;
826    }
827
828    /// \brief Force the emission of cleanups now, instead of waiting
829    /// until this object is destroyed.
830    void ForceCleanup() {
831      assert(PerformCleanup && "Already forced cleanup");
832      CGF.DidCallStackSave = OldDidCallStackSave;
833      CGF.PopCleanupBlocks(CleanupStackDepth);
834      PerformCleanup = false;
835    }
836  };
837
838  class LexicalScope: protected RunCleanupsScope {
839    SourceRange Range;
840    bool PopDebugStack;
841
842    LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
843    void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
844
845  public:
846    /// \brief Enter a new cleanup scope.
847    explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
848      : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
849      if (CGDebugInfo *DI = CGF.getDebugInfo())
850        DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
851    }
852
853    /// \brief Exit this cleanup scope, emitting any accumulated
854    /// cleanups.
855    ~LexicalScope() {
856      if (PopDebugStack) {
857        CGDebugInfo *DI = CGF.getDebugInfo();
858        if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
859      }
860    }
861
862    /// \brief Force the emission of cleanups now, instead of waiting
863    /// until this object is destroyed.
864    void ForceCleanup() {
865      RunCleanupsScope::ForceCleanup();
866      if (CGDebugInfo *DI = CGF.getDebugInfo()) {
867        DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
868        PopDebugStack = false;
869      }
870    }
871  };
872
873
874  /// PopCleanupBlocks - Takes the old cleanup stack size and emits
875  /// the cleanup blocks that have been added.
876  void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
877
878  void ResolveBranchFixups(llvm::BasicBlock *Target);
879
880  /// The given basic block lies in the current EH scope, but may be a
881  /// target of a potentially scope-crossing jump; get a stable handle
882  /// to which we can perform this jump later.
883  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
884    return JumpDest(Target,
885                    EHStack.getInnermostNormalCleanup(),
886                    NextCleanupDestIndex++);
887  }
888
889  /// The given basic block lies in the current EH scope, but may be a
890  /// target of a potentially scope-crossing jump; get a stable handle
891  /// to which we can perform this jump later.
892  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
893    return getJumpDestInCurrentScope(createBasicBlock(Name));
894  }
895
896  /// EmitBranchThroughCleanup - Emit a branch from the current insert
897  /// block through the normal cleanup handling code (if any) and then
898  /// on to \arg Dest.
899  void EmitBranchThroughCleanup(JumpDest Dest);
900
901  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
902  /// specified destination obviously has no cleanups to run.  'false' is always
903  /// a conservatively correct answer for this method.
904  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
905
906  /// popCatchScope - Pops the catch scope at the top of the EHScope
907  /// stack, emitting any required code (other than the catch handlers
908  /// themselves).
909  void popCatchScope();
910
911  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
912  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
913
914  /// An object to manage conditionally-evaluated expressions.
915  class ConditionalEvaluation {
916    llvm::BasicBlock *StartBB;
917
918  public:
919    ConditionalEvaluation(CodeGenFunction &CGF)
920      : StartBB(CGF.Builder.GetInsertBlock()) {}
921
922    void begin(CodeGenFunction &CGF) {
923      assert(CGF.OutermostConditional != this);
924      if (!CGF.OutermostConditional)
925        CGF.OutermostConditional = this;
926    }
927
928    void end(CodeGenFunction &CGF) {
929      assert(CGF.OutermostConditional != 0);
930      if (CGF.OutermostConditional == this)
931        CGF.OutermostConditional = 0;
932    }
933
934    /// Returns a block which will be executed prior to each
935    /// evaluation of the conditional code.
936    llvm::BasicBlock *getStartingBlock() const {
937      return StartBB;
938    }
939  };
940
941  /// isInConditionalBranch - Return true if we're currently emitting
942  /// one branch or the other of a conditional expression.
943  bool isInConditionalBranch() const { return OutermostConditional != 0; }
944
945  void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
946    assert(isInConditionalBranch());
947    llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
948    new llvm::StoreInst(value, addr, &block->back());
949  }
950
951  /// An RAII object to record that we're evaluating a statement
952  /// expression.
953  class StmtExprEvaluation {
954    CodeGenFunction &CGF;
955
956    /// We have to save the outermost conditional: cleanups in a
957    /// statement expression aren't conditional just because the
958    /// StmtExpr is.
959    ConditionalEvaluation *SavedOutermostConditional;
960
961  public:
962    StmtExprEvaluation(CodeGenFunction &CGF)
963      : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
964      CGF.OutermostConditional = 0;
965    }
966
967    ~StmtExprEvaluation() {
968      CGF.OutermostConditional = SavedOutermostConditional;
969      CGF.EnsureInsertPoint();
970    }
971  };
972
973  /// An object which temporarily prevents a value from being
974  /// destroyed by aggressive peephole optimizations that assume that
975  /// all uses of a value have been realized in the IR.
976  class PeepholeProtection {
977    llvm::Instruction *Inst;
978    friend class CodeGenFunction;
979
980  public:
981    PeepholeProtection() : Inst(0) {}
982  };
983
984  /// A non-RAII class containing all the information about a bound
985  /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
986  /// this which makes individual mappings very simple; using this
987  /// class directly is useful when you have a variable number of
988  /// opaque values or don't want the RAII functionality for some
989  /// reason.
990  class OpaqueValueMappingData {
991    const OpaqueValueExpr *OpaqueValue;
992    bool BoundLValue;
993    CodeGenFunction::PeepholeProtection Protection;
994
995    OpaqueValueMappingData(const OpaqueValueExpr *ov,
996                           bool boundLValue)
997      : OpaqueValue(ov), BoundLValue(boundLValue) {}
998  public:
999    OpaqueValueMappingData() : OpaqueValue(0) {}
1000
1001    static bool shouldBindAsLValue(const Expr *expr) {
1002      // gl-values should be bound as l-values for obvious reasons.
1003      // Records should be bound as l-values because IR generation
1004      // always keeps them in memory.  Expressions of function type
1005      // act exactly like l-values but are formally required to be
1006      // r-values in C.
1007      return expr->isGLValue() ||
1008             expr->getType()->isRecordType() ||
1009             expr->getType()->isFunctionType();
1010    }
1011
1012    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1013                                       const OpaqueValueExpr *ov,
1014                                       const Expr *e) {
1015      if (shouldBindAsLValue(ov))
1016        return bind(CGF, ov, CGF.EmitLValue(e));
1017      return bind(CGF, ov, CGF.EmitAnyExpr(e));
1018    }
1019
1020    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1021                                       const OpaqueValueExpr *ov,
1022                                       const LValue &lv) {
1023      assert(shouldBindAsLValue(ov));
1024      CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1025      return OpaqueValueMappingData(ov, true);
1026    }
1027
1028    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1029                                       const OpaqueValueExpr *ov,
1030                                       const RValue &rv) {
1031      assert(!shouldBindAsLValue(ov));
1032      CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1033
1034      OpaqueValueMappingData data(ov, false);
1035
1036      // Work around an extremely aggressive peephole optimization in
1037      // EmitScalarConversion which assumes that all other uses of a
1038      // value are extant.
1039      data.Protection = CGF.protectFromPeepholes(rv);
1040
1041      return data;
1042    }
1043
1044    bool isValid() const { return OpaqueValue != 0; }
1045    void clear() { OpaqueValue = 0; }
1046
1047    void unbind(CodeGenFunction &CGF) {
1048      assert(OpaqueValue && "no data to unbind!");
1049
1050      if (BoundLValue) {
1051        CGF.OpaqueLValues.erase(OpaqueValue);
1052      } else {
1053        CGF.OpaqueRValues.erase(OpaqueValue);
1054        CGF.unprotectFromPeepholes(Protection);
1055      }
1056    }
1057  };
1058
1059  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1060  class OpaqueValueMapping {
1061    CodeGenFunction &CGF;
1062    OpaqueValueMappingData Data;
1063
1064  public:
1065    static bool shouldBindAsLValue(const Expr *expr) {
1066      return OpaqueValueMappingData::shouldBindAsLValue(expr);
1067    }
1068
1069    /// Build the opaque value mapping for the given conditional
1070    /// operator if it's the GNU ?: extension.  This is a common
1071    /// enough pattern that the convenience operator is really
1072    /// helpful.
1073    ///
1074    OpaqueValueMapping(CodeGenFunction &CGF,
1075                       const AbstractConditionalOperator *op) : CGF(CGF) {
1076      if (isa<ConditionalOperator>(op))
1077        // Leave Data empty.
1078        return;
1079
1080      const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1081      Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1082                                          e->getCommon());
1083    }
1084
1085    OpaqueValueMapping(CodeGenFunction &CGF,
1086                       const OpaqueValueExpr *opaqueValue,
1087                       LValue lvalue)
1088      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1089    }
1090
1091    OpaqueValueMapping(CodeGenFunction &CGF,
1092                       const OpaqueValueExpr *opaqueValue,
1093                       RValue rvalue)
1094      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1095    }
1096
1097    void pop() {
1098      Data.unbind(CGF);
1099      Data.clear();
1100    }
1101
1102    ~OpaqueValueMapping() {
1103      if (Data.isValid()) Data.unbind(CGF);
1104    }
1105  };
1106
1107  /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1108  /// number that holds the value.
1109  unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1110
1111  /// BuildBlockByrefAddress - Computes address location of the
1112  /// variable which is declared as __block.
1113  llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1114                                      const VarDecl *V);
1115private:
1116  CGDebugInfo *DebugInfo;
1117  bool DisableDebugInfo;
1118
1119  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1120  /// calling llvm.stacksave for multiple VLAs in the same scope.
1121  bool DidCallStackSave;
1122
1123  /// IndirectBranch - The first time an indirect goto is seen we create a block
1124  /// with an indirect branch.  Every time we see the address of a label taken,
1125  /// we add the label to the indirect goto.  Every subsequent indirect goto is
1126  /// codegen'd as a jump to the IndirectBranch's basic block.
1127  llvm::IndirectBrInst *IndirectBranch;
1128
1129  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1130  /// decls.
1131  typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1132  DeclMapTy LocalDeclMap;
1133
1134  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1135  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1136
1137  // BreakContinueStack - This keeps track of where break and continue
1138  // statements should jump to.
1139  struct BreakContinue {
1140    BreakContinue(JumpDest Break, JumpDest Continue)
1141      : BreakBlock(Break), ContinueBlock(Continue) {}
1142
1143    JumpDest BreakBlock;
1144    JumpDest ContinueBlock;
1145  };
1146  SmallVector<BreakContinue, 8> BreakContinueStack;
1147
1148  /// SwitchInsn - This is nearest current switch instruction. It is null if
1149  /// current context is not in a switch.
1150  llvm::SwitchInst *SwitchInsn;
1151
1152  /// CaseRangeBlock - This block holds if condition check for last case
1153  /// statement range in current switch instruction.
1154  llvm::BasicBlock *CaseRangeBlock;
1155
1156  /// OpaqueLValues - Keeps track of the current set of opaque value
1157  /// expressions.
1158  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1159  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1160
1161  // VLASizeMap - This keeps track of the associated size for each VLA type.
1162  // We track this by the size expression rather than the type itself because
1163  // in certain situations, like a const qualifier applied to an VLA typedef,
1164  // multiple VLA types can share the same size expression.
1165  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1166  // enter/leave scopes.
1167  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1168
1169  /// A block containing a single 'unreachable' instruction.  Created
1170  /// lazily by getUnreachableBlock().
1171  llvm::BasicBlock *UnreachableBlock;
1172
1173  /// CXXThisDecl - When generating code for a C++ member function,
1174  /// this will hold the implicit 'this' declaration.
1175  ImplicitParamDecl *CXXABIThisDecl;
1176  llvm::Value *CXXABIThisValue;
1177  llvm::Value *CXXThisValue;
1178
1179  /// CXXVTTDecl - When generating code for a base object constructor or
1180  /// base object destructor with virtual bases, this will hold the implicit
1181  /// VTT parameter.
1182  ImplicitParamDecl *CXXVTTDecl;
1183  llvm::Value *CXXVTTValue;
1184
1185  /// OutermostConditional - Points to the outermost active
1186  /// conditional control.  This is used so that we know if a
1187  /// temporary should be destroyed conditionally.
1188  ConditionalEvaluation *OutermostConditional;
1189
1190
1191  /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1192  /// type as well as the field number that contains the actual data.
1193  llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1194                                              unsigned> > ByRefValueInfo;
1195
1196  llvm::BasicBlock *TerminateLandingPad;
1197  llvm::BasicBlock *TerminateHandler;
1198  llvm::BasicBlock *TrapBB;
1199
1200  /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
1201  /// In the kernel metadata node, reference the kernel function and metadata
1202  /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
1203  /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
1204  ///   "work_group_size_hint", and three 32-bit integers X, Y and Z.
1205  /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
1206  ///   "reqd_work_group_size", and three 32-bit integers X, Y and Z.
1207  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1208                                llvm::Function *Fn);
1209
1210public:
1211  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1212  ~CodeGenFunction();
1213
1214  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1215  ASTContext &getContext() const { return CGM.getContext(); }
1216  /// Returns true if DebugInfo is actually initialized.
1217  bool maybeInitializeDebugInfo() {
1218    if (CGM.getModuleDebugInfo()) {
1219      DebugInfo = CGM.getModuleDebugInfo();
1220      return true;
1221    }
1222    return false;
1223  }
1224  CGDebugInfo *getDebugInfo() {
1225    if (DisableDebugInfo)
1226      return NULL;
1227    return DebugInfo;
1228  }
1229  void disableDebugInfo() { DisableDebugInfo = true; }
1230  void enableDebugInfo() { DisableDebugInfo = false; }
1231
1232  bool shouldUseFusedARCCalls() {
1233    return CGM.getCodeGenOpts().OptimizationLevel == 0;
1234  }
1235
1236  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1237
1238  /// Returns a pointer to the function's exception object and selector slot,
1239  /// which is assigned in every landing pad.
1240  llvm::Value *getExceptionSlot();
1241  llvm::Value *getEHSelectorSlot();
1242
1243  /// Returns the contents of the function's exception object and selector
1244  /// slots.
1245  llvm::Value *getExceptionFromSlot();
1246  llvm::Value *getSelectorFromSlot();
1247
1248  llvm::Value *getNormalCleanupDestSlot();
1249
1250  llvm::BasicBlock *getUnreachableBlock() {
1251    if (!UnreachableBlock) {
1252      UnreachableBlock = createBasicBlock("unreachable");
1253      new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1254    }
1255    return UnreachableBlock;
1256  }
1257
1258  llvm::BasicBlock *getInvokeDest() {
1259    if (!EHStack.requiresLandingPad()) return 0;
1260    return getInvokeDestImpl();
1261  }
1262
1263  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1264
1265  //===--------------------------------------------------------------------===//
1266  //                                  Cleanups
1267  //===--------------------------------------------------------------------===//
1268
1269  typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1270
1271  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1272                                        llvm::Value *arrayEndPointer,
1273                                        QualType elementType,
1274                                        Destroyer *destroyer);
1275  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1276                                      llvm::Value *arrayEnd,
1277                                      QualType elementType,
1278                                      Destroyer *destroyer);
1279
1280  void pushDestroy(QualType::DestructionKind dtorKind,
1281                   llvm::Value *addr, QualType type);
1282  void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1283                   Destroyer *destroyer, bool useEHCleanupForArray);
1284  void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1285                   bool useEHCleanupForArray);
1286  llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1287                                        QualType type,
1288                                        Destroyer *destroyer,
1289                                        bool useEHCleanupForArray);
1290  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1291                        QualType type, Destroyer *destroyer,
1292                        bool checkZeroLength, bool useEHCleanup);
1293
1294  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1295
1296  /// Determines whether an EH cleanup is required to destroy a type
1297  /// with the given destruction kind.
1298  bool needsEHCleanup(QualType::DestructionKind kind) {
1299    switch (kind) {
1300    case QualType::DK_none:
1301      return false;
1302    case QualType::DK_cxx_destructor:
1303    case QualType::DK_objc_weak_lifetime:
1304      return getLangOpts().Exceptions;
1305    case QualType::DK_objc_strong_lifetime:
1306      return getLangOpts().Exceptions &&
1307             CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1308    }
1309    llvm_unreachable("bad destruction kind");
1310  }
1311
1312  CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1313    return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1314  }
1315
1316  //===--------------------------------------------------------------------===//
1317  //                                  Objective-C
1318  //===--------------------------------------------------------------------===//
1319
1320  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1321
1322  void StartObjCMethod(const ObjCMethodDecl *MD,
1323                       const ObjCContainerDecl *CD,
1324                       SourceLocation StartLoc);
1325
1326  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1327  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1328                          const ObjCPropertyImplDecl *PID);
1329  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1330                              const ObjCPropertyImplDecl *propImpl,
1331                              const ObjCMethodDecl *GetterMothodDecl,
1332                              llvm::Constant *AtomicHelperFn);
1333
1334  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1335                                  ObjCMethodDecl *MD, bool ctor);
1336
1337  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1338  /// for the given property.
1339  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1340                          const ObjCPropertyImplDecl *PID);
1341  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1342                              const ObjCPropertyImplDecl *propImpl,
1343                              llvm::Constant *AtomicHelperFn);
1344  bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1345  bool IvarTypeWithAggrGCObjects(QualType Ty);
1346
1347  //===--------------------------------------------------------------------===//
1348  //                                  Block Bits
1349  //===--------------------------------------------------------------------===//
1350
1351  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1352  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1353  static void destroyBlockInfos(CGBlockInfo *info);
1354  llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1355                                           const CGBlockInfo &Info,
1356                                           llvm::StructType *,
1357                                           llvm::Constant *BlockVarLayout);
1358
1359  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1360                                        const CGBlockInfo &Info,
1361                                        const Decl *OuterFuncDecl,
1362                                        const DeclMapTy &ldm,
1363                                        bool IsLambdaConversionToBlock);
1364
1365  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1366  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1367  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1368                                             const ObjCPropertyImplDecl *PID);
1369  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1370                                             const ObjCPropertyImplDecl *PID);
1371  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1372
1373  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1374
1375  class AutoVarEmission;
1376
1377  void emitByrefStructureInit(const AutoVarEmission &emission);
1378  void enterByrefCleanup(const AutoVarEmission &emission);
1379
1380  llvm::Value *LoadBlockStruct() {
1381    assert(BlockPointer && "no block pointer set!");
1382    return BlockPointer;
1383  }
1384
1385  void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1386  void AllocateBlockDecl(const DeclRefExpr *E);
1387  llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1388  llvm::Type *BuildByRefType(const VarDecl *var);
1389
1390  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1391                    const CGFunctionInfo &FnInfo);
1392  void StartFunction(GlobalDecl GD, QualType RetTy,
1393                     llvm::Function *Fn,
1394                     const CGFunctionInfo &FnInfo,
1395                     const FunctionArgList &Args,
1396                     SourceLocation StartLoc);
1397
1398  void EmitConstructorBody(FunctionArgList &Args);
1399  void EmitDestructorBody(FunctionArgList &Args);
1400  void EmitFunctionBody(FunctionArgList &Args);
1401
1402  void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
1403                                  CallArgList &CallArgs);
1404  void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1405  void EmitLambdaBlockInvokeBody();
1406  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1407  void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1408
1409  /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1410  /// emission when possible.
1411  void EmitReturnBlock();
1412
1413  /// FinishFunction - Complete IR generation of the current function. It is
1414  /// legal to call this function even if there is no current insertion point.
1415  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1416
1417  /// GenerateThunk - Generate a thunk for the given method.
1418  void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1419                     GlobalDecl GD, const ThunkInfo &Thunk);
1420
1421  void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1422                            GlobalDecl GD, const ThunkInfo &Thunk);
1423
1424  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1425                        FunctionArgList &Args);
1426
1427  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1428                               ArrayRef<VarDecl *> ArrayIndexes);
1429
1430  /// InitializeVTablePointer - Initialize the vtable pointer of the given
1431  /// subobject.
1432  ///
1433  void InitializeVTablePointer(BaseSubobject Base,
1434                               const CXXRecordDecl *NearestVBase,
1435                               CharUnits OffsetFromNearestVBase,
1436                               llvm::Constant *VTable,
1437                               const CXXRecordDecl *VTableClass);
1438
1439  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1440  void InitializeVTablePointers(BaseSubobject Base,
1441                                const CXXRecordDecl *NearestVBase,
1442                                CharUnits OffsetFromNearestVBase,
1443                                bool BaseIsNonVirtualPrimaryBase,
1444                                llvm::Constant *VTable,
1445                                const CXXRecordDecl *VTableClass,
1446                                VisitedVirtualBasesSetTy& VBases);
1447
1448  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1449
1450  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1451  /// to by This.
1452  llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1453
1454  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1455  /// given phase of destruction for a destructor.  The end result
1456  /// should call destructors on members and base classes in reverse
1457  /// order of their construction.
1458  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1459
1460  /// ShouldInstrumentFunction - Return true if the current function should be
1461  /// instrumented with __cyg_profile_func_* calls
1462  bool ShouldInstrumentFunction();
1463
1464  /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1465  /// instrumentation function with the current function and the call site, if
1466  /// function instrumentation is enabled.
1467  void EmitFunctionInstrumentation(const char *Fn);
1468
1469  /// EmitMCountInstrumentation - Emit call to .mcount.
1470  void EmitMCountInstrumentation();
1471
1472  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1473  /// arguments for the given function. This is also responsible for naming the
1474  /// LLVM function arguments.
1475  void EmitFunctionProlog(const CGFunctionInfo &FI,
1476                          llvm::Function *Fn,
1477                          const FunctionArgList &Args);
1478
1479  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1480  /// given temporary.
1481  void EmitFunctionEpilog(const CGFunctionInfo &FI);
1482
1483  /// EmitStartEHSpec - Emit the start of the exception spec.
1484  void EmitStartEHSpec(const Decl *D);
1485
1486  /// EmitEndEHSpec - Emit the end of the exception spec.
1487  void EmitEndEHSpec(const Decl *D);
1488
1489  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1490  llvm::BasicBlock *getTerminateLandingPad();
1491
1492  /// getTerminateHandler - Return a handler (not a landing pad, just
1493  /// a catch handler) that just calls terminate.  This is used when
1494  /// a terminate scope encloses a try.
1495  llvm::BasicBlock *getTerminateHandler();
1496
1497  llvm::Type *ConvertTypeForMem(QualType T);
1498  llvm::Type *ConvertType(QualType T);
1499  llvm::Type *ConvertType(const TypeDecl *T) {
1500    return ConvertType(getContext().getTypeDeclType(T));
1501  }
1502
1503  /// LoadObjCSelf - Load the value of self. This function is only valid while
1504  /// generating code for an Objective-C method.
1505  llvm::Value *LoadObjCSelf();
1506
1507  /// TypeOfSelfObject - Return type of object that this self represents.
1508  QualType TypeOfSelfObject();
1509
1510  /// hasAggregateLLVMType - Return true if the specified AST type will map into
1511  /// an aggregate LLVM type or is void.
1512  static bool hasAggregateLLVMType(QualType T);
1513
1514  /// createBasicBlock - Create an LLVM basic block.
1515  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
1516                                     llvm::Function *parent = 0,
1517                                     llvm::BasicBlock *before = 0) {
1518#ifdef NDEBUG
1519    return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1520#else
1521    return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1522#endif
1523  }
1524
1525  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1526  /// label maps to.
1527  JumpDest getJumpDestForLabel(const LabelDecl *S);
1528
1529  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1530  /// another basic block, simplify it. This assumes that no other code could
1531  /// potentially reference the basic block.
1532  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1533
1534  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1535  /// adding a fall-through branch from the current insert block if
1536  /// necessary. It is legal to call this function even if there is no current
1537  /// insertion point.
1538  ///
1539  /// IsFinished - If true, indicates that the caller has finished emitting
1540  /// branches to the given block and does not expect to emit code into it. This
1541  /// means the block can be ignored if it is unreachable.
1542  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1543
1544  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1545  /// near its uses, and leave the insertion point in it.
1546  void EmitBlockAfterUses(llvm::BasicBlock *BB);
1547
1548  /// EmitBranch - Emit a branch to the specified basic block from the current
1549  /// insert block, taking care to avoid creation of branches from dummy
1550  /// blocks. It is legal to call this function even if there is no current
1551  /// insertion point.
1552  ///
1553  /// This function clears the current insertion point. The caller should follow
1554  /// calls to this function with calls to Emit*Block prior to generation new
1555  /// code.
1556  void EmitBranch(llvm::BasicBlock *Block);
1557
1558  /// HaveInsertPoint - True if an insertion point is defined. If not, this
1559  /// indicates that the current code being emitted is unreachable.
1560  bool HaveInsertPoint() const {
1561    return Builder.GetInsertBlock() != 0;
1562  }
1563
1564  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1565  /// emitted IR has a place to go. Note that by definition, if this function
1566  /// creates a block then that block is unreachable; callers may do better to
1567  /// detect when no insertion point is defined and simply skip IR generation.
1568  void EnsureInsertPoint() {
1569    if (!HaveInsertPoint())
1570      EmitBlock(createBasicBlock());
1571  }
1572
1573  /// ErrorUnsupported - Print out an error that codegen doesn't support the
1574  /// specified stmt yet.
1575  void ErrorUnsupported(const Stmt *S, const char *Type,
1576                        bool OmitOnError=false);
1577
1578  //===--------------------------------------------------------------------===//
1579  //                                  Helpers
1580  //===--------------------------------------------------------------------===//
1581
1582  LValue MakeAddrLValue(llvm::Value *V, QualType T,
1583                        CharUnits Alignment = CharUnits()) {
1584    return LValue::MakeAddr(V, T, Alignment, getContext(),
1585                            CGM.getTBAAInfo(T));
1586  }
1587
1588  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1589    CharUnits Alignment;
1590    if (!T->isIncompleteType())
1591      Alignment = getContext().getTypeAlignInChars(T);
1592    return LValue::MakeAddr(V, T, Alignment, getContext(),
1593                            CGM.getTBAAInfo(T));
1594  }
1595
1596  /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1597  /// block. The caller is responsible for setting an appropriate alignment on
1598  /// the alloca.
1599  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1600                                     const Twine &Name = "tmp");
1601
1602  /// InitTempAlloca - Provide an initial value for the given alloca.
1603  void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1604
1605  /// CreateIRTemp - Create a temporary IR object of the given type, with
1606  /// appropriate alignment. This routine should only be used when an temporary
1607  /// value needs to be stored into an alloca (for example, to avoid explicit
1608  /// PHI construction), but the type is the IR type, not the type appropriate
1609  /// for storing in memory.
1610  llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1611
1612  /// CreateMemTemp - Create a temporary memory object of the given type, with
1613  /// appropriate alignment.
1614  llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1615
1616  /// CreateAggTemp - Create a temporary memory object for the given
1617  /// aggregate type.
1618  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1619    CharUnits Alignment = getContext().getTypeAlignInChars(T);
1620    return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1621                                 T.getQualifiers(),
1622                                 AggValueSlot::IsNotDestructed,
1623                                 AggValueSlot::DoesNotNeedGCBarriers,
1624                                 AggValueSlot::IsNotAliased);
1625  }
1626
1627  /// Emit a cast to void* in the appropriate address space.
1628  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1629
1630  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1631  /// expression and compare the result against zero, returning an Int1Ty value.
1632  llvm::Value *EvaluateExprAsBool(const Expr *E);
1633
1634  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1635  void EmitIgnoredExpr(const Expr *E);
1636
1637  /// EmitAnyExpr - Emit code to compute the specified expression which can have
1638  /// any type.  The result is returned as an RValue struct.  If this is an
1639  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1640  /// the result should be returned.
1641  ///
1642  /// \param ignoreResult True if the resulting value isn't used.
1643  RValue EmitAnyExpr(const Expr *E,
1644                     AggValueSlot aggSlot = AggValueSlot::ignored(),
1645                     bool ignoreResult = false);
1646
1647  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1648  // or the value of the expression, depending on how va_list is defined.
1649  llvm::Value *EmitVAListRef(const Expr *E);
1650
1651  /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1652  /// always be accessible even if no aggregate location is provided.
1653  RValue EmitAnyExprToTemp(const Expr *E);
1654
1655  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1656  /// arbitrary expression into the given memory location.
1657  void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1658                        Qualifiers Quals, bool IsInitializer);
1659
1660  /// EmitExprAsInit - Emits the code necessary to initialize a
1661  /// location in memory with the given initializer.
1662  void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1663                      LValue lvalue, bool capturedByInit);
1664
1665  /// EmitAggregateCopy - Emit an aggrate assignment.
1666  ///
1667  /// The difference to EmitAggregateCopy is that tail padding is not copied.
1668  /// This is required for correctness when assigning non-POD structures in C++.
1669  void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1670                           QualType EltTy, bool isVolatile=false,
1671                           CharUnits Alignment = CharUnits::Zero()) {
1672    EmitAggregateCopy(DestPtr, SrcPtr, EltTy, isVolatile, Alignment, true);
1673  }
1674
1675  /// EmitAggregateCopy - Emit an aggrate copy.
1676  ///
1677  /// \param isVolatile - True iff either the source or the destination is
1678  /// volatile.
1679  /// \param isAssignment - If false, allow padding to be copied.  This often
1680  /// yields more efficient.
1681  void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1682                         QualType EltTy, bool isVolatile=false,
1683                         CharUnits Alignment = CharUnits::Zero(),
1684                         bool isAssignment = false);
1685
1686  /// StartBlock - Start new block named N. If insert block is a dummy block
1687  /// then reuse it.
1688  void StartBlock(const char *N);
1689
1690  /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1691  llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1692    return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1693  }
1694
1695  /// GetAddrOfLocalVar - Return the address of a local variable.
1696  llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1697    llvm::Value *Res = LocalDeclMap[VD];
1698    assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1699    return Res;
1700  }
1701
1702  /// getOpaqueLValueMapping - Given an opaque value expression (which
1703  /// must be mapped to an l-value), return its mapping.
1704  const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1705    assert(OpaqueValueMapping::shouldBindAsLValue(e));
1706
1707    llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1708      it = OpaqueLValues.find(e);
1709    assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1710    return it->second;
1711  }
1712
1713  /// getOpaqueRValueMapping - Given an opaque value expression (which
1714  /// must be mapped to an r-value), return its mapping.
1715  const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1716    assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1717
1718    llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1719      it = OpaqueRValues.find(e);
1720    assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1721    return it->second;
1722  }
1723
1724  /// getAccessedFieldNo - Given an encoded value and a result number, return
1725  /// the input field number being accessed.
1726  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1727
1728  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1729  llvm::BasicBlock *GetIndirectGotoBlock();
1730
1731  /// EmitNullInitialization - Generate code to set a value of the given type to
1732  /// null, If the type contains data member pointers, they will be initialized
1733  /// to -1 in accordance with the Itanium C++ ABI.
1734  void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1735
1736  // EmitVAArg - Generate code to get an argument from the passed in pointer
1737  // and update it accordingly. The return value is a pointer to the argument.
1738  // FIXME: We should be able to get rid of this method and use the va_arg
1739  // instruction in LLVM instead once it works well enough.
1740  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1741
1742  /// emitArrayLength - Compute the length of an array, even if it's a
1743  /// VLA, and drill down to the base element type.
1744  llvm::Value *emitArrayLength(const ArrayType *arrayType,
1745                               QualType &baseType,
1746                               llvm::Value *&addr);
1747
1748  /// EmitVLASize - Capture all the sizes for the VLA expressions in
1749  /// the given variably-modified type and store them in the VLASizeMap.
1750  ///
1751  /// This function can be called with a null (unreachable) insert point.
1752  void EmitVariablyModifiedType(QualType Ty);
1753
1754  /// getVLASize - Returns an LLVM value that corresponds to the size,
1755  /// in non-variably-sized elements, of a variable length array type,
1756  /// plus that largest non-variably-sized element type.  Assumes that
1757  /// the type has already been emitted with EmitVariablyModifiedType.
1758  std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1759  std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1760
1761  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1762  /// generating code for an C++ member function.
1763  llvm::Value *LoadCXXThis() {
1764    assert(CXXThisValue && "no 'this' value for this function");
1765    return CXXThisValue;
1766  }
1767
1768  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1769  /// virtual bases.
1770  llvm::Value *LoadCXXVTT() {
1771    assert(CXXVTTValue && "no VTT value for this function");
1772    return CXXVTTValue;
1773  }
1774
1775  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1776  /// complete class to the given direct base.
1777  llvm::Value *
1778  GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1779                                        const CXXRecordDecl *Derived,
1780                                        const CXXRecordDecl *Base,
1781                                        bool BaseIsVirtual);
1782
1783  /// GetAddressOfBaseClass - This function will add the necessary delta to the
1784  /// load of 'this' and returns address of the base class.
1785  llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1786                                     const CXXRecordDecl *Derived,
1787                                     CastExpr::path_const_iterator PathBegin,
1788                                     CastExpr::path_const_iterator PathEnd,
1789                                     bool NullCheckValue);
1790
1791  llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1792                                        const CXXRecordDecl *Derived,
1793                                        CastExpr::path_const_iterator PathBegin,
1794                                        CastExpr::path_const_iterator PathEnd,
1795                                        bool NullCheckValue);
1796
1797  llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1798                                         const CXXRecordDecl *ClassDecl,
1799                                         const CXXRecordDecl *BaseClassDecl);
1800
1801  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1802                                      CXXCtorType CtorType,
1803                                      const FunctionArgList &Args);
1804  // It's important not to confuse this and the previous function. Delegating
1805  // constructors are the C++0x feature. The constructor delegate optimization
1806  // is used to reduce duplication in the base and complete consturctors where
1807  // they are substantially the same.
1808  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1809                                        const FunctionArgList &Args);
1810  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1811                              bool ForVirtualBase, llvm::Value *This,
1812                              CallExpr::const_arg_iterator ArgBeg,
1813                              CallExpr::const_arg_iterator ArgEnd);
1814
1815  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1816                              llvm::Value *This, llvm::Value *Src,
1817                              CallExpr::const_arg_iterator ArgBeg,
1818                              CallExpr::const_arg_iterator ArgEnd);
1819
1820  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1821                                  const ConstantArrayType *ArrayTy,
1822                                  llvm::Value *ArrayPtr,
1823                                  CallExpr::const_arg_iterator ArgBeg,
1824                                  CallExpr::const_arg_iterator ArgEnd,
1825                                  bool ZeroInitialization = false);
1826
1827  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1828                                  llvm::Value *NumElements,
1829                                  llvm::Value *ArrayPtr,
1830                                  CallExpr::const_arg_iterator ArgBeg,
1831                                  CallExpr::const_arg_iterator ArgEnd,
1832                                  bool ZeroInitialization = false);
1833
1834  static Destroyer destroyCXXObject;
1835
1836  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1837                             bool ForVirtualBase, llvm::Value *This);
1838
1839  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1840                               llvm::Value *NewPtr, llvm::Value *NumElements);
1841
1842  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1843                        llvm::Value *Ptr);
1844
1845  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1846  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1847
1848  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1849                      QualType DeleteTy);
1850
1851  llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1852  llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1853  llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
1854
1855  void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
1856  void EmitStdInitializerListCleanup(llvm::Value *loc,
1857                                     const InitListExpr *init);
1858
1859  /// \brief Situations in which we might emit a check for the suitability of a
1860  ///        pointer or glvalue.
1861  enum TypeCheckKind {
1862    /// Checking the operand of a load. Must be suitably sized and aligned.
1863    TCK_Load,
1864    /// Checking the destination of a store. Must be suitably sized and aligned.
1865    TCK_Store,
1866    /// Checking the bound value in a reference binding. Must be suitably sized
1867    /// and aligned, but is not required to refer to an object (until the
1868    /// reference is used), per core issue 453.
1869    TCK_ReferenceBinding,
1870    /// Checking the object expression in a non-static data member access. Must
1871    /// be an object within its lifetime.
1872    TCK_MemberAccess,
1873    /// Checking the 'this' pointer for a call to a non-static member function.
1874    /// Must be an object within its lifetime.
1875    TCK_MemberCall,
1876    /// Checking the 'this' pointer for a constructor call.
1877    TCK_ConstructorCall
1878  };
1879
1880  /// \brief Emit a check that \p V is the address of storage of the
1881  /// appropriate size and alignment for an object of type \p Type.
1882  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
1883                     QualType Type, CharUnits Alignment = CharUnits::Zero());
1884
1885  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1886                                       bool isInc, bool isPre);
1887  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1888                                         bool isInc, bool isPre);
1889  //===--------------------------------------------------------------------===//
1890  //                            Declaration Emission
1891  //===--------------------------------------------------------------------===//
1892
1893  /// EmitDecl - Emit a declaration.
1894  ///
1895  /// This function can be called with a null (unreachable) insert point.
1896  void EmitDecl(const Decl &D);
1897
1898  /// EmitVarDecl - Emit a local variable declaration.
1899  ///
1900  /// This function can be called with a null (unreachable) insert point.
1901  void EmitVarDecl(const VarDecl &D);
1902
1903  void EmitScalarInit(const Expr *init, const ValueDecl *D,
1904                      LValue lvalue, bool capturedByInit);
1905  void EmitScalarInit(llvm::Value *init, LValue lvalue);
1906
1907  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1908                             llvm::Value *Address);
1909
1910  /// EmitAutoVarDecl - Emit an auto variable declaration.
1911  ///
1912  /// This function can be called with a null (unreachable) insert point.
1913  void EmitAutoVarDecl(const VarDecl &D);
1914
1915  class AutoVarEmission {
1916    friend class CodeGenFunction;
1917
1918    const VarDecl *Variable;
1919
1920    /// The alignment of the variable.
1921    CharUnits Alignment;
1922
1923    /// The address of the alloca.  Null if the variable was emitted
1924    /// as a global constant.
1925    llvm::Value *Address;
1926
1927    llvm::Value *NRVOFlag;
1928
1929    /// True if the variable is a __block variable.
1930    bool IsByRef;
1931
1932    /// True if the variable is of aggregate type and has a constant
1933    /// initializer.
1934    bool IsConstantAggregate;
1935
1936    struct Invalid {};
1937    AutoVarEmission(Invalid) : Variable(0) {}
1938
1939    AutoVarEmission(const VarDecl &variable)
1940      : Variable(&variable), Address(0), NRVOFlag(0),
1941        IsByRef(false), IsConstantAggregate(false) {}
1942
1943    bool wasEmittedAsGlobal() const { return Address == 0; }
1944
1945  public:
1946    static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1947
1948    /// Returns the address of the object within this declaration.
1949    /// Note that this does not chase the forwarding pointer for
1950    /// __block decls.
1951    llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1952      if (!IsByRef) return Address;
1953
1954      return CGF.Builder.CreateStructGEP(Address,
1955                                         CGF.getByRefValueLLVMField(Variable),
1956                                         Variable->getNameAsString());
1957    }
1958  };
1959  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1960  void EmitAutoVarInit(const AutoVarEmission &emission);
1961  void EmitAutoVarCleanups(const AutoVarEmission &emission);
1962  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1963                              QualType::DestructionKind dtorKind);
1964
1965  void EmitStaticVarDecl(const VarDecl &D,
1966                         llvm::GlobalValue::LinkageTypes Linkage);
1967
1968  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1969  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1970
1971  /// protectFromPeepholes - Protect a value that we're intending to
1972  /// store to the side, but which will probably be used later, from
1973  /// aggressive peepholing optimizations that might delete it.
1974  ///
1975  /// Pass the result to unprotectFromPeepholes to declare that
1976  /// protection is no longer required.
1977  ///
1978  /// There's no particular reason why this shouldn't apply to
1979  /// l-values, it's just that no existing peepholes work on pointers.
1980  PeepholeProtection protectFromPeepholes(RValue rvalue);
1981  void unprotectFromPeepholes(PeepholeProtection protection);
1982
1983  //===--------------------------------------------------------------------===//
1984  //                             Statement Emission
1985  //===--------------------------------------------------------------------===//
1986
1987  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1988  void EmitStopPoint(const Stmt *S);
1989
1990  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1991  /// this function even if there is no current insertion point.
1992  ///
1993  /// This function may clear the current insertion point; callers should use
1994  /// EnsureInsertPoint if they wish to subsequently generate code without first
1995  /// calling EmitBlock, EmitBranch, or EmitStmt.
1996  void EmitStmt(const Stmt *S);
1997
1998  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1999  /// necessarily require an insertion point or debug information; typically
2000  /// because the statement amounts to a jump or a container of other
2001  /// statements.
2002  ///
2003  /// \return True if the statement was handled.
2004  bool EmitSimpleStmt(const Stmt *S);
2005
2006  RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2007                          AggValueSlot AVS = AggValueSlot::ignored());
2008
2009  /// EmitLabel - Emit the block for the given label. It is legal to call this
2010  /// function even if there is no current insertion point.
2011  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2012
2013  void EmitLabelStmt(const LabelStmt &S);
2014  void EmitAttributedStmt(const AttributedStmt &S);
2015  void EmitGotoStmt(const GotoStmt &S);
2016  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2017  void EmitIfStmt(const IfStmt &S);
2018  void EmitWhileStmt(const WhileStmt &S);
2019  void EmitDoStmt(const DoStmt &S);
2020  void EmitForStmt(const ForStmt &S);
2021  void EmitReturnStmt(const ReturnStmt &S);
2022  void EmitDeclStmt(const DeclStmt &S);
2023  void EmitBreakStmt(const BreakStmt &S);
2024  void EmitContinueStmt(const ContinueStmt &S);
2025  void EmitSwitchStmt(const SwitchStmt &S);
2026  void EmitDefaultStmt(const DefaultStmt &S);
2027  void EmitCaseStmt(const CaseStmt &S);
2028  void EmitCaseStmtRange(const CaseStmt &S);
2029  void EmitAsmStmt(const AsmStmt &S);
2030
2031  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2032  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2033  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2034  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2035  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2036
2037  llvm::Constant *getUnwindResumeFn();
2038  llvm::Constant *getUnwindResumeOrRethrowFn();
2039  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2040  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2041
2042  void EmitCXXTryStmt(const CXXTryStmt &S);
2043  void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
2044
2045  //===--------------------------------------------------------------------===//
2046  //                         LValue Expression Emission
2047  //===--------------------------------------------------------------------===//
2048
2049  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
2050  RValue GetUndefRValue(QualType Ty);
2051
2052  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
2053  /// and issue an ErrorUnsupported style diagnostic (using the
2054  /// provided Name).
2055  RValue EmitUnsupportedRValue(const Expr *E,
2056                               const char *Name);
2057
2058  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
2059  /// an ErrorUnsupported style diagnostic (using the provided Name).
2060  LValue EmitUnsupportedLValue(const Expr *E,
2061                               const char *Name);
2062
2063  /// EmitLValue - Emit code to compute a designator that specifies the location
2064  /// of the expression.
2065  ///
2066  /// This can return one of two things: a simple address or a bitfield
2067  /// reference.  In either case, the LLVM Value* in the LValue structure is
2068  /// guaranteed to be an LLVM pointer type.
2069  ///
2070  /// If this returns a bitfield reference, nothing about the pointee type of
2071  /// the LLVM value is known: For example, it may not be a pointer to an
2072  /// integer.
2073  ///
2074  /// If this returns a normal address, and if the lvalue's C type is fixed
2075  /// size, this method guarantees that the returned pointer type will point to
2076  /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2077  /// variable length type, this is not possible.
2078  ///
2079  LValue EmitLValue(const Expr *E);
2080
2081  /// \brief Same as EmitLValue but additionally we generate checking code to
2082  /// guard against undefined behavior.  This is only suitable when we know
2083  /// that the address will be used to access the object.
2084  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
2085
2086  /// EmitToMemory - Change a scalar value from its value
2087  /// representation to its in-memory representation.
2088  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2089
2090  /// EmitFromMemory - Change a scalar value from its memory
2091  /// representation to its value representation.
2092  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2093
2094  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2095  /// care to appropriately convert from the memory representation to
2096  /// the LLVM value representation.
2097  llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2098                                unsigned Alignment, QualType Ty,
2099                                llvm::MDNode *TBAAInfo = 0);
2100
2101  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2102  /// care to appropriately convert from the memory representation to
2103  /// the LLVM value representation.  The l-value must be a simple
2104  /// l-value.
2105  llvm::Value *EmitLoadOfScalar(LValue lvalue);
2106
2107  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2108  /// care to appropriately convert from the memory representation to
2109  /// the LLVM value representation.
2110  void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2111                         bool Volatile, unsigned Alignment, QualType Ty,
2112                         llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2113
2114  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2115  /// care to appropriately convert from the memory representation to
2116  /// the LLVM value representation.  The l-value must be a simple
2117  /// l-value.  The isInit flag indicates whether this is an initialization.
2118  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2119  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2120
2121  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2122  /// this method emits the address of the lvalue, then loads the result as an
2123  /// rvalue, returning the rvalue.
2124  RValue EmitLoadOfLValue(LValue V);
2125  RValue EmitLoadOfExtVectorElementLValue(LValue V);
2126  RValue EmitLoadOfBitfieldLValue(LValue LV);
2127
2128  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2129  /// lvalue, where both are guaranteed to the have the same type, and that type
2130  /// is 'Ty'.
2131  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2132  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2133
2134  /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2135  /// EmitStoreThroughLValue.
2136  ///
2137  /// \param Result [out] - If non-null, this will be set to a Value* for the
2138  /// bit-field contents after the store, appropriate for use as the result of
2139  /// an assignment to the bit-field.
2140  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2141                                      llvm::Value **Result=0);
2142
2143  /// Emit an l-value for an assignment (simple or compound) of complex type.
2144  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2145  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2146
2147  // Note: only available for agg return types
2148  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2149  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2150  // Note: only available for agg return types
2151  LValue EmitCallExprLValue(const CallExpr *E);
2152  // Note: only available for agg return types
2153  LValue EmitVAArgExprLValue(const VAArgExpr *E);
2154  LValue EmitDeclRefLValue(const DeclRefExpr *E);
2155  LValue EmitStringLiteralLValue(const StringLiteral *E);
2156  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2157  LValue EmitPredefinedLValue(const PredefinedExpr *E);
2158  LValue EmitUnaryOpLValue(const UnaryOperator *E);
2159  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2160  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2161  LValue EmitMemberExpr(const MemberExpr *E);
2162  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2163  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2164  LValue EmitInitListLValue(const InitListExpr *E);
2165  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2166  LValue EmitCastLValue(const CastExpr *E);
2167  LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2168  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2169  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2170
2171  RValue EmitRValueForField(LValue LV, const FieldDecl *FD);
2172
2173  class ConstantEmission {
2174    llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
2175    ConstantEmission(llvm::Constant *C, bool isReference)
2176      : ValueAndIsReference(C, isReference) {}
2177  public:
2178    ConstantEmission() {}
2179    static ConstantEmission forReference(llvm::Constant *C) {
2180      return ConstantEmission(C, true);
2181    }
2182    static ConstantEmission forValue(llvm::Constant *C) {
2183      return ConstantEmission(C, false);
2184    }
2185
2186    operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
2187
2188    bool isReference() const { return ValueAndIsReference.getInt(); }
2189    LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
2190      assert(isReference());
2191      return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
2192                                            refExpr->getType());
2193    }
2194
2195    llvm::Constant *getValue() const {
2196      assert(!isReference());
2197      return ValueAndIsReference.getPointer();
2198    }
2199  };
2200
2201  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
2202
2203  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2204                                AggValueSlot slot = AggValueSlot::ignored());
2205  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2206
2207  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2208                              const ObjCIvarDecl *Ivar);
2209  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
2210
2211  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2212  /// if the Field is a reference, this will return the address of the reference
2213  /// and not the address of the value stored in the reference.
2214  LValue EmitLValueForFieldInitialization(LValue Base,
2215                                          const FieldDecl* Field);
2216
2217  LValue EmitLValueForIvar(QualType ObjectTy,
2218                           llvm::Value* Base, const ObjCIvarDecl *Ivar,
2219                           unsigned CVRQualifiers);
2220
2221  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2222  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2223  LValue EmitLambdaLValue(const LambdaExpr *E);
2224  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2225  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
2226
2227  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2228  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2229  LValue EmitStmtExprLValue(const StmtExpr *E);
2230  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2231  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2232  void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2233
2234  //===--------------------------------------------------------------------===//
2235  //                         Scalar Expression Emission
2236  //===--------------------------------------------------------------------===//
2237
2238  /// EmitCall - Generate a call of the given function, expecting the given
2239  /// result type, and using the given argument list which specifies both the
2240  /// LLVM arguments and the types they were derived from.
2241  ///
2242  /// \param TargetDecl - If given, the decl of the function in a direct call;
2243  /// used to set attributes on the call (noreturn, etc.).
2244  RValue EmitCall(const CGFunctionInfo &FnInfo,
2245                  llvm::Value *Callee,
2246                  ReturnValueSlot ReturnValue,
2247                  const CallArgList &Args,
2248                  const Decl *TargetDecl = 0,
2249                  llvm::Instruction **callOrInvoke = 0);
2250
2251  RValue EmitCall(QualType FnType, llvm::Value *Callee,
2252                  ReturnValueSlot ReturnValue,
2253                  CallExpr::const_arg_iterator ArgBeg,
2254                  CallExpr::const_arg_iterator ArgEnd,
2255                  const Decl *TargetDecl = 0);
2256  RValue EmitCallExpr(const CallExpr *E,
2257                      ReturnValueSlot ReturnValue = ReturnValueSlot());
2258
2259  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2260                                  ArrayRef<llvm::Value *> Args,
2261                                  const Twine &Name = "");
2262  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2263                                  const Twine &Name = "");
2264
2265  llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2266                                llvm::Type *Ty);
2267  llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2268                                llvm::Value *This, llvm::Type *Ty);
2269  llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2270                                         NestedNameSpecifier *Qual,
2271                                         llvm::Type *Ty);
2272
2273  llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2274                                                   CXXDtorType Type,
2275                                                   const CXXRecordDecl *RD);
2276
2277  RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2278                           SourceLocation CallLoc,
2279                           llvm::Value *Callee,
2280                           ReturnValueSlot ReturnValue,
2281                           llvm::Value *This,
2282                           llvm::Value *VTT,
2283                           CallExpr::const_arg_iterator ArgBeg,
2284                           CallExpr::const_arg_iterator ArgEnd);
2285  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2286                               ReturnValueSlot ReturnValue);
2287  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2288                                      ReturnValueSlot ReturnValue);
2289
2290  llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2291                                           const CXXMethodDecl *MD,
2292                                           llvm::Value *This);
2293  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2294                                       const CXXMethodDecl *MD,
2295                                       ReturnValueSlot ReturnValue);
2296
2297  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2298                                ReturnValueSlot ReturnValue);
2299
2300
2301  RValue EmitBuiltinExpr(const FunctionDecl *FD,
2302                         unsigned BuiltinID, const CallExpr *E);
2303
2304  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2305
2306  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2307  /// is unhandled by the current target.
2308  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2309
2310  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2311  llvm::Value *EmitNeonCall(llvm::Function *F,
2312                            SmallVectorImpl<llvm::Value*> &O,
2313                            const char *name,
2314                            unsigned shift = 0, bool rightshift = false);
2315  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2316  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2317                                   bool negateForRightShift);
2318
2319  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2320  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2321  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2322
2323  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2324  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2325  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
2326  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
2327  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2328  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
2329                                const ObjCMethodDecl *MethodWithObjects);
2330  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2331  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2332                             ReturnValueSlot Return = ReturnValueSlot());
2333
2334  /// Retrieves the default cleanup kind for an ARC cleanup.
2335  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2336  CleanupKind getARCCleanupKind() {
2337    return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2338             ? NormalAndEHCleanup : NormalCleanup;
2339  }
2340
2341  // ARC primitives.
2342  void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2343  void EmitARCDestroyWeak(llvm::Value *addr);
2344  llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2345  llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2346  llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2347                                bool ignored);
2348  void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2349  void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2350  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2351  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2352  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2353                                  bool ignored);
2354  llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2355                                      bool ignored);
2356  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2357  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2358  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2359  void EmitARCDestroyStrong(llvm::Value *addr, bool precise);
2360  void EmitARCRelease(llvm::Value *value, bool precise);
2361  llvm::Value *EmitARCAutorelease(llvm::Value *value);
2362  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2363  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2364  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2365
2366  std::pair<LValue,llvm::Value*>
2367  EmitARCStoreAutoreleasing(const BinaryOperator *e);
2368  std::pair<LValue,llvm::Value*>
2369  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2370
2371  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2372
2373  llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2374  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2375  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2376
2377  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2378  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2379  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2380
2381  static Destroyer destroyARCStrongImprecise;
2382  static Destroyer destroyARCStrongPrecise;
2383  static Destroyer destroyARCWeak;
2384
2385  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2386  llvm::Value *EmitObjCAutoreleasePoolPush();
2387  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2388  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2389  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2390
2391  /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2392  /// expression. Will emit a temporary variable if E is not an LValue.
2393  RValue EmitReferenceBindingToExpr(const Expr* E,
2394                                    const NamedDecl *InitializedDecl);
2395
2396  //===--------------------------------------------------------------------===//
2397  //                           Expression Emission
2398  //===--------------------------------------------------------------------===//
2399
2400  // Expressions are broken into three classes: scalar, complex, aggregate.
2401
2402  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2403  /// scalar type, returning the result.
2404  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2405
2406  /// EmitScalarConversion - Emit a conversion from the specified type to the
2407  /// specified destination type, both of which are LLVM scalar types.
2408  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2409                                    QualType DstTy);
2410
2411  /// EmitComplexToScalarConversion - Emit a conversion from the specified
2412  /// complex type to the specified destination type, where the destination type
2413  /// is an LLVM scalar type.
2414  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2415                                             QualType DstTy);
2416
2417
2418  /// EmitAggExpr - Emit the computation of the specified expression
2419  /// of aggregate type.  The result is computed into the given slot,
2420  /// which may be null to indicate that the value is not needed.
2421  void EmitAggExpr(const Expr *E, AggValueSlot AS);
2422
2423  /// EmitAggExprToLValue - Emit the computation of the specified expression of
2424  /// aggregate type into a temporary LValue.
2425  LValue EmitAggExprToLValue(const Expr *E);
2426
2427  /// EmitGCMemmoveCollectable - Emit special API for structs with object
2428  /// pointers.
2429  void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2430                                QualType Ty);
2431
2432  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2433  /// make sure it survives garbage collection until this point.
2434  void EmitExtendGCLifetime(llvm::Value *object);
2435
2436  /// EmitComplexExpr - Emit the computation of the specified expression of
2437  /// complex type, returning the result.
2438  ComplexPairTy EmitComplexExpr(const Expr *E,
2439                                bool IgnoreReal = false,
2440                                bool IgnoreImag = false);
2441
2442  /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2443  /// of complex type, storing into the specified Value*.
2444  void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2445                               bool DestIsVolatile);
2446
2447  /// StoreComplexToAddr - Store a complex number into the specified address.
2448  void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2449                          bool DestIsVolatile);
2450  /// LoadComplexFromAddr - Load a complex number from the specified address.
2451  ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2452
2453  /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2454  /// a static local variable.
2455  llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2456                                            const char *Separator,
2457                                       llvm::GlobalValue::LinkageTypes Linkage);
2458
2459  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2460  /// global variable that has already been created for it.  If the initializer
2461  /// has a different type than GV does, this may free GV and return a different
2462  /// one.  Otherwise it just returns GV.
2463  llvm::GlobalVariable *
2464  AddInitializerToStaticVarDecl(const VarDecl &D,
2465                                llvm::GlobalVariable *GV);
2466
2467
2468  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2469  /// variable with global storage.
2470  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2471                                bool PerformInit);
2472
2473  /// Call atexit() with a function that passes the given argument to
2474  /// the given function.
2475  void registerGlobalDtorWithAtExit(llvm::Constant *fn, llvm::Constant *addr);
2476
2477  /// Emit code in this function to perform a guarded variable
2478  /// initialization.  Guarded initializations are used when it's not
2479  /// possible to prove that an initialization will be done exactly
2480  /// once, e.g. with a static local variable or a static data member
2481  /// of a class template.
2482  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2483                          bool PerformInit);
2484
2485  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2486  /// variables.
2487  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2488                                 llvm::Constant **Decls,
2489                                 unsigned NumDecls);
2490
2491  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
2492  /// variables.
2493  void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
2494                                  const std::vector<std::pair<llvm::WeakVH,
2495                                  llvm::Constant*> > &DtorsAndObjects);
2496
2497  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2498                                        const VarDecl *D,
2499                                        llvm::GlobalVariable *Addr,
2500                                        bool PerformInit);
2501
2502  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2503
2504  void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2505                                  const Expr *Exp);
2506
2507  void enterFullExpression(const ExprWithCleanups *E) {
2508    if (E->getNumObjects() == 0) return;
2509    enterNonTrivialFullExpression(E);
2510  }
2511  void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2512
2513  void EmitCXXThrowExpr(const CXXThrowExpr *E);
2514
2515  void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2516
2517  RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2518
2519  //===--------------------------------------------------------------------===//
2520  //                         Annotations Emission
2521  //===--------------------------------------------------------------------===//
2522
2523  /// Emit an annotation call (intrinsic or builtin).
2524  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2525                                  llvm::Value *AnnotatedVal,
2526                                  llvm::StringRef AnnotationStr,
2527                                  SourceLocation Location);
2528
2529  /// Emit local annotations for the local variable V, declared by D.
2530  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2531
2532  /// Emit field annotations for the given field & value. Returns the
2533  /// annotation result.
2534  llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2535
2536  //===--------------------------------------------------------------------===//
2537  //                             Internal Helpers
2538  //===--------------------------------------------------------------------===//
2539
2540  /// ContainsLabel - Return true if the statement contains a label in it.  If
2541  /// this statement is not executed normally, it not containing a label means
2542  /// that we can just remove the code.
2543  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2544
2545  /// containsBreak - Return true if the statement contains a break out of it.
2546  /// If the statement (recursively) contains a switch or loop with a break
2547  /// inside of it, this is fine.
2548  static bool containsBreak(const Stmt *S);
2549
2550  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2551  /// to a constant, or if it does but contains a label, return false.  If it
2552  /// constant folds return true and set the boolean result in Result.
2553  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2554
2555  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2556  /// to a constant, or if it does but contains a label, return false.  If it
2557  /// constant folds return true and set the folded value.
2558  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
2559
2560  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2561  /// if statement) to the specified blocks.  Based on the condition, this might
2562  /// try to simplify the codegen of the conditional based on the branch.
2563  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2564                            llvm::BasicBlock *FalseBlock);
2565
2566  /// \brief Emit a description of a type in a format suitable for passing to
2567  /// a runtime sanitizer handler.
2568  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
2569
2570  /// \brief Convert a value into a format suitable for passing to a runtime
2571  /// sanitizer handler.
2572  llvm::Value *EmitCheckValue(llvm::Value *V);
2573
2574  /// \brief Emit a description of a source location in a format suitable for
2575  /// passing to a runtime sanitizer handler.
2576  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
2577
2578  /// \brief Specify under what conditions this check can be recovered
2579  enum CheckRecoverableKind {
2580    /// Always terminate program execution if this check fails
2581    CRK_Unrecoverable,
2582    /// Check supports recovering, allows user to specify which
2583    CRK_Recoverable,
2584    /// Runtime conditionally aborts, always need to support recovery.
2585    CRK_AlwaysRecoverable
2586  };
2587
2588  /// \brief Create a basic block that will call a handler function in a
2589  /// sanitizer runtime with the provided arguments, and create a conditional
2590  /// branch to it.
2591  void EmitCheck(llvm::Value *Checked, StringRef CheckName,
2592                 llvm::ArrayRef<llvm::Constant *> StaticArgs,
2593                 llvm::ArrayRef<llvm::Value *> DynamicArgs,
2594                 CheckRecoverableKind Recoverable);
2595
2596  /// \brief Create a basic block that will call the trap intrinsic, and emit a
2597  /// conditional branch to it, for the -ftrapv checks.
2598  void EmitTrapvCheck(llvm::Value *Checked);
2599
2600  /// EmitCallArg - Emit a single call argument.
2601  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2602
2603  /// EmitDelegateCallArg - We are performing a delegate call; that
2604  /// is, the current function is delegating to another one.  Produce
2605  /// a r-value suitable for passing the given parameter.
2606  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2607
2608  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2609  /// point operation, expressed as the maximum relative error in ulp.
2610  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
2611
2612private:
2613  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
2614  void EmitReturnOfRValue(RValue RV, QualType Ty);
2615
2616  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2617  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2618  ///
2619  /// \param AI - The first function argument of the expansion.
2620  /// \return The argument following the last expanded function
2621  /// argument.
2622  llvm::Function::arg_iterator
2623  ExpandTypeFromArgs(QualType Ty, LValue Dst,
2624                     llvm::Function::arg_iterator AI);
2625
2626  /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2627  /// Ty, into individual arguments on the provided vector \arg Args. See
2628  /// ABIArgInfo::Expand.
2629  void ExpandTypeToArgs(QualType Ty, RValue Src,
2630                        SmallVector<llvm::Value*, 16> &Args,
2631                        llvm::FunctionType *IRFuncTy);
2632
2633  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2634                            const Expr *InputExpr, std::string &ConstraintStr);
2635
2636  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2637                                  LValue InputValue, QualType InputType,
2638                                  std::string &ConstraintStr);
2639
2640  /// EmitCallArgs - Emit call arguments for a function.
2641  /// The CallArgTypeInfo parameter is used for iterating over the known
2642  /// argument types of the function being called.
2643  template<typename T>
2644  void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2645                    CallExpr::const_arg_iterator ArgBeg,
2646                    CallExpr::const_arg_iterator ArgEnd) {
2647      CallExpr::const_arg_iterator Arg = ArgBeg;
2648
2649    // First, use the argument types that the type info knows about
2650    if (CallArgTypeInfo) {
2651      for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2652           E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2653        assert(Arg != ArgEnd && "Running over edge of argument list!");
2654        QualType ArgType = *I;
2655#ifndef NDEBUG
2656        QualType ActualArgType = Arg->getType();
2657        if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2658          QualType ActualBaseType =
2659            ActualArgType->getAs<PointerType>()->getPointeeType();
2660          QualType ArgBaseType =
2661            ArgType->getAs<PointerType>()->getPointeeType();
2662          if (ArgBaseType->isVariableArrayType()) {
2663            if (const VariableArrayType *VAT =
2664                getContext().getAsVariableArrayType(ActualBaseType)) {
2665              if (!VAT->getSizeExpr())
2666                ActualArgType = ArgType;
2667            }
2668          }
2669        }
2670        assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2671               getTypePtr() ==
2672               getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2673               "type mismatch in call argument!");
2674#endif
2675        EmitCallArg(Args, *Arg, ArgType);
2676      }
2677
2678      // Either we've emitted all the call args, or we have a call to a
2679      // variadic function.
2680      assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2681             "Extra arguments in non-variadic function!");
2682
2683    }
2684
2685    // If we still have any arguments, emit them using the type of the argument.
2686    for (; Arg != ArgEnd; ++Arg)
2687      EmitCallArg(Args, *Arg, Arg->getType());
2688  }
2689
2690  const TargetCodeGenInfo &getTargetHooks() const {
2691    return CGM.getTargetCodeGenInfo();
2692  }
2693
2694  void EmitDeclMetadata();
2695
2696  CodeGenModule::ByrefHelpers *
2697  buildByrefHelpers(llvm::StructType &byrefType,
2698                    const AutoVarEmission &emission);
2699
2700  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2701
2702  /// GetPointeeAlignment - Given an expression with a pointer type, emit the
2703  /// value and compute our best estimate of the alignment of the pointee.
2704  std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
2705};
2706
2707/// Helper class with most of the code for saving a value for a
2708/// conditional expression cleanup.
2709struct DominatingLLVMValue {
2710  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2711
2712  /// Answer whether the given value needs extra work to be saved.
2713  static bool needsSaving(llvm::Value *value) {
2714    // If it's not an instruction, we don't need to save.
2715    if (!isa<llvm::Instruction>(value)) return false;
2716
2717    // If it's an instruction in the entry block, we don't need to save.
2718    llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2719    return (block != &block->getParent()->getEntryBlock());
2720  }
2721
2722  /// Try to save the given value.
2723  static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2724    if (!needsSaving(value)) return saved_type(value, false);
2725
2726    // Otherwise we need an alloca.
2727    llvm::Value *alloca =
2728      CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2729    CGF.Builder.CreateStore(value, alloca);
2730
2731    return saved_type(alloca, true);
2732  }
2733
2734  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2735    if (!value.getInt()) return value.getPointer();
2736    return CGF.Builder.CreateLoad(value.getPointer());
2737  }
2738};
2739
2740/// A partial specialization of DominatingValue for llvm::Values that
2741/// might be llvm::Instructions.
2742template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2743  typedef T *type;
2744  static type restore(CodeGenFunction &CGF, saved_type value) {
2745    return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2746  }
2747};
2748
2749/// A specialization of DominatingValue for RValue.
2750template <> struct DominatingValue<RValue> {
2751  typedef RValue type;
2752  class saved_type {
2753    enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2754                AggregateAddress, ComplexAddress };
2755
2756    llvm::Value *Value;
2757    Kind K;
2758    saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2759
2760  public:
2761    static bool needsSaving(RValue value);
2762    static saved_type save(CodeGenFunction &CGF, RValue value);
2763    RValue restore(CodeGenFunction &CGF);
2764
2765    // implementations in CGExprCXX.cpp
2766  };
2767
2768  static bool needsSaving(type value) {
2769    return saved_type::needsSaving(value);
2770  }
2771  static saved_type save(CodeGenFunction &CGF, type value) {
2772    return saved_type::save(CGF, value);
2773  }
2774  static type restore(CodeGenFunction &CGF, saved_type value) {
2775    return value.restore(CGF);
2776  }
2777};
2778
2779}  // end namespace CodeGen
2780}  // end namespace clang
2781
2782#endif
2783