CodeGenFunction.h revision 3ac83d69c61238cd0d38e90fcdd03390530ab2fb
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the internal per-function state used for llvm translation.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15#define CLANG_CODEGEN_CODEGENFUNCTION_H
16
17#include "CGBuilder.h"
18#include "CGDebugInfo.h"
19#include "CGValue.h"
20#include "CodeGenModule.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/ExprObjC.h"
24#include "clang/AST/Type.h"
25#include "clang/Basic/ABI.h"
26#include "clang/Basic/TargetInfo.h"
27#include "clang/Frontend/CodeGenOptions.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ValueHandle.h"
33
34namespace llvm {
35  class BasicBlock;
36  class LLVMContext;
37  class MDNode;
38  class Module;
39  class SwitchInst;
40  class Twine;
41  class Value;
42  class CallSite;
43}
44
45namespace clang {
46  class ASTContext;
47  class BlockDecl;
48  class CXXDestructorDecl;
49  class CXXForRangeStmt;
50  class CXXTryStmt;
51  class Decl;
52  class LabelDecl;
53  class EnumConstantDecl;
54  class FunctionDecl;
55  class FunctionProtoType;
56  class LabelStmt;
57  class ObjCContainerDecl;
58  class ObjCInterfaceDecl;
59  class ObjCIvarDecl;
60  class ObjCMethodDecl;
61  class ObjCImplementationDecl;
62  class ObjCPropertyImplDecl;
63  class TargetInfo;
64  class TargetCodeGenInfo;
65  class VarDecl;
66  class ObjCForCollectionStmt;
67  class ObjCAtTryStmt;
68  class ObjCAtThrowStmt;
69  class ObjCAtSynchronizedStmt;
70  class ObjCAutoreleasePoolStmt;
71
72namespace CodeGen {
73  class CodeGenTypes;
74  class CGFunctionInfo;
75  class CGRecordLayout;
76  class CGBlockInfo;
77  class CGCXXABI;
78  class BlockFlags;
79  class BlockFieldFlags;
80
81/// A branch fixup.  These are required when emitting a goto to a
82/// label which hasn't been emitted yet.  The goto is optimistically
83/// emitted as a branch to the basic block for the label, and (if it
84/// occurs in a scope with non-trivial cleanups) a fixup is added to
85/// the innermost cleanup.  When a (normal) cleanup is popped, any
86/// unresolved fixups in that scope are threaded through the cleanup.
87struct BranchFixup {
88  /// The block containing the terminator which needs to be modified
89  /// into a switch if this fixup is resolved into the current scope.
90  /// If null, LatestBranch points directly to the destination.
91  llvm::BasicBlock *OptimisticBranchBlock;
92
93  /// The ultimate destination of the branch.
94  ///
95  /// This can be set to null to indicate that this fixup was
96  /// successfully resolved.
97  llvm::BasicBlock *Destination;
98
99  /// The destination index value.
100  unsigned DestinationIndex;
101
102  /// The initial branch of the fixup.
103  llvm::BranchInst *InitialBranch;
104};
105
106template <class T> struct InvariantValue {
107  typedef T type;
108  typedef T saved_type;
109  static bool needsSaving(type value) { return false; }
110  static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111  static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112};
113
114/// A metaprogramming class for ensuring that a value will dominate an
115/// arbitrary position in a function.
116template <class T> struct DominatingValue : InvariantValue<T> {};
117
118template <class T, bool mightBeInstruction =
119            llvm::is_base_of<llvm::Value, T>::value &&
120            !llvm::is_base_of<llvm::Constant, T>::value &&
121            !llvm::is_base_of<llvm::BasicBlock, T>::value>
122struct DominatingPointer;
123template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124// template <class T> struct DominatingPointer<T,true> at end of file
125
126template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127
128enum CleanupKind {
129  EHCleanup = 0x1,
130  NormalCleanup = 0x2,
131  NormalAndEHCleanup = EHCleanup | NormalCleanup,
132
133  InactiveCleanup = 0x4,
134  InactiveEHCleanup = EHCleanup | InactiveCleanup,
135  InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136  InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137};
138
139/// A stack of scopes which respond to exceptions, including cleanups
140/// and catch blocks.
141class EHScopeStack {
142public:
143  /// A saved depth on the scope stack.  This is necessary because
144  /// pushing scopes onto the stack invalidates iterators.
145  class stable_iterator {
146    friend class EHScopeStack;
147
148    /// Offset from StartOfData to EndOfBuffer.
149    ptrdiff_t Size;
150
151    stable_iterator(ptrdiff_t Size) : Size(Size) {}
152
153  public:
154    static stable_iterator invalid() { return stable_iterator(-1); }
155    stable_iterator() : Size(-1) {}
156
157    bool isValid() const { return Size >= 0; }
158
159    /// Returns true if this scope encloses I.
160    /// Returns false if I is invalid.
161    /// This scope must be valid.
162    bool encloses(stable_iterator I) const { return Size <= I.Size; }
163
164    /// Returns true if this scope strictly encloses I: that is,
165    /// if it encloses I and is not I.
166    /// Returns false is I is invalid.
167    /// This scope must be valid.
168    bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169
170    friend bool operator==(stable_iterator A, stable_iterator B) {
171      return A.Size == B.Size;
172    }
173    friend bool operator!=(stable_iterator A, stable_iterator B) {
174      return A.Size != B.Size;
175    }
176  };
177
178  /// Information for lazily generating a cleanup.  Subclasses must be
179  /// POD-like: cleanups will not be destructed, and they will be
180  /// allocated on the cleanup stack and freely copied and moved
181  /// around.
182  ///
183  /// Cleanup implementations should generally be declared in an
184  /// anonymous namespace.
185  class Cleanup {
186    // Anchor the construction vtable.
187    virtual void anchor();
188  public:
189    /// Generation flags.
190    class Flags {
191      enum {
192        F_IsForEH             = 0x1,
193        F_IsNormalCleanupKind = 0x2,
194        F_IsEHCleanupKind     = 0x4
195      };
196      unsigned flags;
197
198    public:
199      Flags() : flags(0) {}
200
201      /// isForEH - true if the current emission is for an EH cleanup.
202      bool isForEHCleanup() const { return flags & F_IsForEH; }
203      bool isForNormalCleanup() const { return !isForEHCleanup(); }
204      void setIsForEHCleanup() { flags |= F_IsForEH; }
205
206      bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207      void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208
209      /// isEHCleanupKind - true if the cleanup was pushed as an EH
210      /// cleanup.
211      bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212      void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213    };
214
215    // Provide a virtual destructor to suppress a very common warning
216    // that unfortunately cannot be suppressed without this.  Cleanups
217    // should not rely on this destructor ever being called.
218    virtual ~Cleanup() {}
219
220    /// Emit the cleanup.  For normal cleanups, this is run in the
221    /// same EH context as when the cleanup was pushed, i.e. the
222    /// immediately-enclosing context of the cleanup scope.  For
223    /// EH cleanups, this is run in a terminate context.
224    ///
225    // \param flags cleanup kind.
226    virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
227  };
228
229  /// ConditionalCleanupN stores the saved form of its N parameters,
230  /// then restores them and performs the cleanup.
231  template <class T, class A0>
232  class ConditionalCleanup1 : public Cleanup {
233    typedef typename DominatingValue<A0>::saved_type A0_saved;
234    A0_saved a0_saved;
235
236    void Emit(CodeGenFunction &CGF, Flags flags) {
237      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
238      T(a0).Emit(CGF, flags);
239    }
240
241  public:
242    ConditionalCleanup1(A0_saved a0)
243      : a0_saved(a0) {}
244  };
245
246  template <class T, class A0, class A1>
247  class ConditionalCleanup2 : public Cleanup {
248    typedef typename DominatingValue<A0>::saved_type A0_saved;
249    typedef typename DominatingValue<A1>::saved_type A1_saved;
250    A0_saved a0_saved;
251    A1_saved a1_saved;
252
253    void Emit(CodeGenFunction &CGF, Flags flags) {
254      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
255      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
256      T(a0, a1).Emit(CGF, flags);
257    }
258
259  public:
260    ConditionalCleanup2(A0_saved a0, A1_saved a1)
261      : a0_saved(a0), a1_saved(a1) {}
262  };
263
264  template <class T, class A0, class A1, class A2>
265  class ConditionalCleanup3 : public Cleanup {
266    typedef typename DominatingValue<A0>::saved_type A0_saved;
267    typedef typename DominatingValue<A1>::saved_type A1_saved;
268    typedef typename DominatingValue<A2>::saved_type A2_saved;
269    A0_saved a0_saved;
270    A1_saved a1_saved;
271    A2_saved a2_saved;
272
273    void Emit(CodeGenFunction &CGF, Flags flags) {
274      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
275      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
276      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
277      T(a0, a1, a2).Emit(CGF, flags);
278    }
279
280  public:
281    ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
282      : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
283  };
284
285  template <class T, class A0, class A1, class A2, class A3>
286  class ConditionalCleanup4 : public Cleanup {
287    typedef typename DominatingValue<A0>::saved_type A0_saved;
288    typedef typename DominatingValue<A1>::saved_type A1_saved;
289    typedef typename DominatingValue<A2>::saved_type A2_saved;
290    typedef typename DominatingValue<A3>::saved_type A3_saved;
291    A0_saved a0_saved;
292    A1_saved a1_saved;
293    A2_saved a2_saved;
294    A3_saved a3_saved;
295
296    void Emit(CodeGenFunction &CGF, Flags flags) {
297      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
298      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
299      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
300      A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
301      T(a0, a1, a2, a3).Emit(CGF, flags);
302    }
303
304  public:
305    ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
306      : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
307  };
308
309private:
310  // The implementation for this class is in CGException.h and
311  // CGException.cpp; the definition is here because it's used as a
312  // member of CodeGenFunction.
313
314  /// The start of the scope-stack buffer, i.e. the allocated pointer
315  /// for the buffer.  All of these pointers are either simultaneously
316  /// null or simultaneously valid.
317  char *StartOfBuffer;
318
319  /// The end of the buffer.
320  char *EndOfBuffer;
321
322  /// The first valid entry in the buffer.
323  char *StartOfData;
324
325  /// The innermost normal cleanup on the stack.
326  stable_iterator InnermostNormalCleanup;
327
328  /// The innermost EH scope on the stack.
329  stable_iterator InnermostEHScope;
330
331  /// The current set of branch fixups.  A branch fixup is a jump to
332  /// an as-yet unemitted label, i.e. a label for which we don't yet
333  /// know the EH stack depth.  Whenever we pop a cleanup, we have
334  /// to thread all the current branch fixups through it.
335  ///
336  /// Fixups are recorded as the Use of the respective branch or
337  /// switch statement.  The use points to the final destination.
338  /// When popping out of a cleanup, these uses are threaded through
339  /// the cleanup and adjusted to point to the new cleanup.
340  ///
341  /// Note that branches are allowed to jump into protected scopes
342  /// in certain situations;  e.g. the following code is legal:
343  ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
344  ///     goto foo;
345  ///     A a;
346  ///    foo:
347  ///     bar();
348  SmallVector<BranchFixup, 8> BranchFixups;
349
350  char *allocate(size_t Size);
351
352  void *pushCleanup(CleanupKind K, size_t DataSize);
353
354public:
355  EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
356                   InnermostNormalCleanup(stable_end()),
357                   InnermostEHScope(stable_end()) {}
358  ~EHScopeStack() { delete[] StartOfBuffer; }
359
360  // Variadic templates would make this not terrible.
361
362  /// Push a lazily-created cleanup on the stack.
363  template <class T>
364  void pushCleanup(CleanupKind Kind) {
365    void *Buffer = pushCleanup(Kind, sizeof(T));
366    Cleanup *Obj = new(Buffer) T();
367    (void) Obj;
368  }
369
370  /// Push a lazily-created cleanup on the stack.
371  template <class T, class A0>
372  void pushCleanup(CleanupKind Kind, A0 a0) {
373    void *Buffer = pushCleanup(Kind, sizeof(T));
374    Cleanup *Obj = new(Buffer) T(a0);
375    (void) Obj;
376  }
377
378  /// Push a lazily-created cleanup on the stack.
379  template <class T, class A0, class A1>
380  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
381    void *Buffer = pushCleanup(Kind, sizeof(T));
382    Cleanup *Obj = new(Buffer) T(a0, a1);
383    (void) Obj;
384  }
385
386  /// Push a lazily-created cleanup on the stack.
387  template <class T, class A0, class A1, class A2>
388  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
389    void *Buffer = pushCleanup(Kind, sizeof(T));
390    Cleanup *Obj = new(Buffer) T(a0, a1, a2);
391    (void) Obj;
392  }
393
394  /// Push a lazily-created cleanup on the stack.
395  template <class T, class A0, class A1, class A2, class A3>
396  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
397    void *Buffer = pushCleanup(Kind, sizeof(T));
398    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
399    (void) Obj;
400  }
401
402  /// Push a lazily-created cleanup on the stack.
403  template <class T, class A0, class A1, class A2, class A3, class A4>
404  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
405    void *Buffer = pushCleanup(Kind, sizeof(T));
406    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
407    (void) Obj;
408  }
409
410  // Feel free to add more variants of the following:
411
412  /// Push a cleanup with non-constant storage requirements on the
413  /// stack.  The cleanup type must provide an additional static method:
414  ///   static size_t getExtraSize(size_t);
415  /// The argument to this method will be the value N, which will also
416  /// be passed as the first argument to the constructor.
417  ///
418  /// The data stored in the extra storage must obey the same
419  /// restrictions as normal cleanup member data.
420  ///
421  /// The pointer returned from this method is valid until the cleanup
422  /// stack is modified.
423  template <class T, class A0, class A1, class A2>
424  T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
425    void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
426    return new (Buffer) T(N, a0, a1, a2);
427  }
428
429  /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
430  void popCleanup();
431
432  /// Push a set of catch handlers on the stack.  The catch is
433  /// uninitialized and will need to have the given number of handlers
434  /// set on it.
435  class EHCatchScope *pushCatch(unsigned NumHandlers);
436
437  /// Pops a catch scope off the stack.  This is private to CGException.cpp.
438  void popCatch();
439
440  /// Push an exceptions filter on the stack.
441  class EHFilterScope *pushFilter(unsigned NumFilters);
442
443  /// Pops an exceptions filter off the stack.
444  void popFilter();
445
446  /// Push a terminate handler on the stack.
447  void pushTerminate();
448
449  /// Pops a terminate handler off the stack.
450  void popTerminate();
451
452  /// Determines whether the exception-scopes stack is empty.
453  bool empty() const { return StartOfData == EndOfBuffer; }
454
455  bool requiresLandingPad() const {
456    return InnermostEHScope != stable_end();
457  }
458
459  /// Determines whether there are any normal cleanups on the stack.
460  bool hasNormalCleanups() const {
461    return InnermostNormalCleanup != stable_end();
462  }
463
464  /// Returns the innermost normal cleanup on the stack, or
465  /// stable_end() if there are no normal cleanups.
466  stable_iterator getInnermostNormalCleanup() const {
467    return InnermostNormalCleanup;
468  }
469  stable_iterator getInnermostActiveNormalCleanup() const;
470
471  stable_iterator getInnermostEHScope() const {
472    return InnermostEHScope;
473  }
474
475  stable_iterator getInnermostActiveEHScope() const;
476
477  /// An unstable reference to a scope-stack depth.  Invalidated by
478  /// pushes but not pops.
479  class iterator;
480
481  /// Returns an iterator pointing to the innermost EH scope.
482  iterator begin() const;
483
484  /// Returns an iterator pointing to the outermost EH scope.
485  iterator end() const;
486
487  /// Create a stable reference to the top of the EH stack.  The
488  /// returned reference is valid until that scope is popped off the
489  /// stack.
490  stable_iterator stable_begin() const {
491    return stable_iterator(EndOfBuffer - StartOfData);
492  }
493
494  /// Create a stable reference to the bottom of the EH stack.
495  static stable_iterator stable_end() {
496    return stable_iterator(0);
497  }
498
499  /// Translates an iterator into a stable_iterator.
500  stable_iterator stabilize(iterator it) const;
501
502  /// Turn a stable reference to a scope depth into a unstable pointer
503  /// to the EH stack.
504  iterator find(stable_iterator save) const;
505
506  /// Removes the cleanup pointed to by the given stable_iterator.
507  void removeCleanup(stable_iterator save);
508
509  /// Add a branch fixup to the current cleanup scope.
510  BranchFixup &addBranchFixup() {
511    assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
512    BranchFixups.push_back(BranchFixup());
513    return BranchFixups.back();
514  }
515
516  unsigned getNumBranchFixups() const { return BranchFixups.size(); }
517  BranchFixup &getBranchFixup(unsigned I) {
518    assert(I < getNumBranchFixups());
519    return BranchFixups[I];
520  }
521
522  /// Pops lazily-removed fixups from the end of the list.  This
523  /// should only be called by procedures which have just popped a
524  /// cleanup or resolved one or more fixups.
525  void popNullFixups();
526
527  /// Clears the branch-fixups list.  This should only be called by
528  /// ResolveAllBranchFixups.
529  void clearFixups() { BranchFixups.clear(); }
530};
531
532/// CodeGenFunction - This class organizes the per-function state that is used
533/// while generating LLVM code.
534class CodeGenFunction : public CodeGenTypeCache {
535  CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
536  void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
537
538  friend class CGCXXABI;
539public:
540  /// A jump destination is an abstract label, branching to which may
541  /// require a jump out through normal cleanups.
542  struct JumpDest {
543    JumpDest() : Block(0), ScopeDepth(), Index(0) {}
544    JumpDest(llvm::BasicBlock *Block,
545             EHScopeStack::stable_iterator Depth,
546             unsigned Index)
547      : Block(Block), ScopeDepth(Depth), Index(Index) {}
548
549    bool isValid() const { return Block != 0; }
550    llvm::BasicBlock *getBlock() const { return Block; }
551    EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
552    unsigned getDestIndex() const { return Index; }
553
554  private:
555    llvm::BasicBlock *Block;
556    EHScopeStack::stable_iterator ScopeDepth;
557    unsigned Index;
558  };
559
560  CodeGenModule &CGM;  // Per-module state.
561  const TargetInfo &Target;
562
563  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
564  CGBuilderTy Builder;
565
566  /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
567  /// This excludes BlockDecls.
568  const Decl *CurFuncDecl;
569  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
570  const Decl *CurCodeDecl;
571  const CGFunctionInfo *CurFnInfo;
572  QualType FnRetTy;
573  llvm::Function *CurFn;
574
575  /// CurGD - The GlobalDecl for the current function being compiled.
576  GlobalDecl CurGD;
577
578  /// PrologueCleanupDepth - The cleanup depth enclosing all the
579  /// cleanups associated with the parameters.
580  EHScopeStack::stable_iterator PrologueCleanupDepth;
581
582  /// ReturnBlock - Unified return block.
583  JumpDest ReturnBlock;
584
585  /// ReturnValue - The temporary alloca to hold the return value. This is null
586  /// iff the function has no return value.
587  llvm::Value *ReturnValue;
588
589  /// AllocaInsertPoint - This is an instruction in the entry block before which
590  /// we prefer to insert allocas.
591  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
592
593  /// BoundsChecking - Emit run-time bounds checks. Higher values mean
594  /// potentially higher performance penalties.
595  unsigned char BoundsChecking;
596
597  /// \brief Whether any type-checking sanitizers are enabled. If \c false,
598  /// calls to EmitTypeCheck can be skipped.
599  bool SanitizePerformTypeCheck;
600
601  /// \brief Sanitizer options to use for this function.
602  const SanitizerOptions *SanOpts;
603
604  /// In ARC, whether we should autorelease the return value.
605  bool AutoreleaseResult;
606
607  const CodeGen::CGBlockInfo *BlockInfo;
608  llvm::Value *BlockPointer;
609
610  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
611  FieldDecl *LambdaThisCaptureField;
612
613  /// \brief A mapping from NRVO variables to the flags used to indicate
614  /// when the NRVO has been applied to this variable.
615  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
616
617  EHScopeStack EHStack;
618
619  /// i32s containing the indexes of the cleanup destinations.
620  llvm::AllocaInst *NormalCleanupDest;
621
622  unsigned NextCleanupDestIndex;
623
624  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
625  CGBlockInfo *FirstBlockInfo;
626
627  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
628  llvm::BasicBlock *EHResumeBlock;
629
630  /// The exception slot.  All landing pads write the current exception pointer
631  /// into this alloca.
632  llvm::Value *ExceptionSlot;
633
634  /// The selector slot.  Under the MandatoryCleanup model, all landing pads
635  /// write the current selector value into this alloca.
636  llvm::AllocaInst *EHSelectorSlot;
637
638  /// Emits a landing pad for the current EH stack.
639  llvm::BasicBlock *EmitLandingPad();
640
641  llvm::BasicBlock *getInvokeDestImpl();
642
643  template <class T>
644  typename DominatingValue<T>::saved_type saveValueInCond(T value) {
645    return DominatingValue<T>::save(*this, value);
646  }
647
648public:
649  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
650  /// rethrows.
651  SmallVector<llvm::Value*, 8> ObjCEHValueStack;
652
653  /// A class controlling the emission of a finally block.
654  class FinallyInfo {
655    /// Where the catchall's edge through the cleanup should go.
656    JumpDest RethrowDest;
657
658    /// A function to call to enter the catch.
659    llvm::Constant *BeginCatchFn;
660
661    /// An i1 variable indicating whether or not the @finally is
662    /// running for an exception.
663    llvm::AllocaInst *ForEHVar;
664
665    /// An i8* variable into which the exception pointer to rethrow
666    /// has been saved.
667    llvm::AllocaInst *SavedExnVar;
668
669  public:
670    void enter(CodeGenFunction &CGF, const Stmt *Finally,
671               llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
672               llvm::Constant *rethrowFn);
673    void exit(CodeGenFunction &CGF);
674  };
675
676  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
677  /// current full-expression.  Safe against the possibility that
678  /// we're currently inside a conditionally-evaluated expression.
679  template <class T, class A0>
680  void pushFullExprCleanup(CleanupKind kind, A0 a0) {
681    // If we're not in a conditional branch, or if none of the
682    // arguments requires saving, then use the unconditional cleanup.
683    if (!isInConditionalBranch())
684      return EHStack.pushCleanup<T>(kind, a0);
685
686    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
687
688    typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
689    EHStack.pushCleanup<CleanupType>(kind, a0_saved);
690    initFullExprCleanup();
691  }
692
693  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
694  /// current full-expression.  Safe against the possibility that
695  /// we're currently inside a conditionally-evaluated expression.
696  template <class T, class A0, class A1>
697  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
698    // If we're not in a conditional branch, or if none of the
699    // arguments requires saving, then use the unconditional cleanup.
700    if (!isInConditionalBranch())
701      return EHStack.pushCleanup<T>(kind, a0, a1);
702
703    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
704    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
705
706    typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
707    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
708    initFullExprCleanup();
709  }
710
711  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
712  /// current full-expression.  Safe against the possibility that
713  /// we're currently inside a conditionally-evaluated expression.
714  template <class T, class A0, class A1, class A2>
715  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
716    // If we're not in a conditional branch, or if none of the
717    // arguments requires saving, then use the unconditional cleanup.
718    if (!isInConditionalBranch()) {
719      return EHStack.pushCleanup<T>(kind, a0, a1, a2);
720    }
721
722    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
723    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
724    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
725
726    typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
727    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
728    initFullExprCleanup();
729  }
730
731  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
732  /// current full-expression.  Safe against the possibility that
733  /// we're currently inside a conditionally-evaluated expression.
734  template <class T, class A0, class A1, class A2, class A3>
735  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
736    // If we're not in a conditional branch, or if none of the
737    // arguments requires saving, then use the unconditional cleanup.
738    if (!isInConditionalBranch()) {
739      return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
740    }
741
742    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
743    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
744    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
745    typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
746
747    typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
748    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
749                                     a2_saved, a3_saved);
750    initFullExprCleanup();
751  }
752
753  /// Set up the last cleaup that was pushed as a conditional
754  /// full-expression cleanup.
755  void initFullExprCleanup();
756
757  /// PushDestructorCleanup - Push a cleanup to call the
758  /// complete-object destructor of an object of the given type at the
759  /// given address.  Does nothing if T is not a C++ class type with a
760  /// non-trivial destructor.
761  void PushDestructorCleanup(QualType T, llvm::Value *Addr);
762
763  /// PushDestructorCleanup - Push a cleanup to call the
764  /// complete-object variant of the given destructor on the object at
765  /// the given address.
766  void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
767                             llvm::Value *Addr);
768
769  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
770  /// process all branch fixups.
771  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
772
773  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
774  /// The block cannot be reactivated.  Pops it if it's the top of the
775  /// stack.
776  ///
777  /// \param DominatingIP - An instruction which is known to
778  ///   dominate the current IP (if set) and which lies along
779  ///   all paths of execution between the current IP and the
780  ///   the point at which the cleanup comes into scope.
781  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
782                              llvm::Instruction *DominatingIP);
783
784  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
785  /// Cannot be used to resurrect a deactivated cleanup.
786  ///
787  /// \param DominatingIP - An instruction which is known to
788  ///   dominate the current IP (if set) and which lies along
789  ///   all paths of execution between the current IP and the
790  ///   the point at which the cleanup comes into scope.
791  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
792                            llvm::Instruction *DominatingIP);
793
794  /// \brief Enters a new scope for capturing cleanups, all of which
795  /// will be executed once the scope is exited.
796  class RunCleanupsScope {
797    EHScopeStack::stable_iterator CleanupStackDepth;
798    bool OldDidCallStackSave;
799    bool PerformCleanup;
800
801    RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
802    void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
803
804  protected:
805    CodeGenFunction& CGF;
806
807  public:
808    /// \brief Enter a new cleanup scope.
809    explicit RunCleanupsScope(CodeGenFunction &CGF)
810      : PerformCleanup(true), CGF(CGF)
811    {
812      CleanupStackDepth = CGF.EHStack.stable_begin();
813      OldDidCallStackSave = CGF.DidCallStackSave;
814      CGF.DidCallStackSave = false;
815    }
816
817    /// \brief Exit this cleanup scope, emitting any accumulated
818    /// cleanups.
819    ~RunCleanupsScope() {
820      if (PerformCleanup) {
821        CGF.DidCallStackSave = OldDidCallStackSave;
822        CGF.PopCleanupBlocks(CleanupStackDepth);
823      }
824    }
825
826    /// \brief Determine whether this scope requires any cleanups.
827    bool requiresCleanups() const {
828      return CGF.EHStack.stable_begin() != CleanupStackDepth;
829    }
830
831    /// \brief Force the emission of cleanups now, instead of waiting
832    /// until this object is destroyed.
833    void ForceCleanup() {
834      assert(PerformCleanup && "Already forced cleanup");
835      CGF.DidCallStackSave = OldDidCallStackSave;
836      CGF.PopCleanupBlocks(CleanupStackDepth);
837      PerformCleanup = false;
838    }
839  };
840
841  class LexicalScope: protected RunCleanupsScope {
842    SourceRange Range;
843    bool PopDebugStack;
844
845    LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
846    void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
847
848  public:
849    /// \brief Enter a new cleanup scope.
850    explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
851      : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
852      if (CGDebugInfo *DI = CGF.getDebugInfo())
853        DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
854    }
855
856    /// \brief Exit this cleanup scope, emitting any accumulated
857    /// cleanups.
858    ~LexicalScope() {
859      if (PopDebugStack) {
860        CGDebugInfo *DI = CGF.getDebugInfo();
861        if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
862      }
863    }
864
865    /// \brief Force the emission of cleanups now, instead of waiting
866    /// until this object is destroyed.
867    void ForceCleanup() {
868      RunCleanupsScope::ForceCleanup();
869      if (CGDebugInfo *DI = CGF.getDebugInfo()) {
870        DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
871        PopDebugStack = false;
872      }
873    }
874  };
875
876
877  /// PopCleanupBlocks - Takes the old cleanup stack size and emits
878  /// the cleanup blocks that have been added.
879  void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
880
881  void ResolveBranchFixups(llvm::BasicBlock *Target);
882
883  /// The given basic block lies in the current EH scope, but may be a
884  /// target of a potentially scope-crossing jump; get a stable handle
885  /// to which we can perform this jump later.
886  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
887    return JumpDest(Target,
888                    EHStack.getInnermostNormalCleanup(),
889                    NextCleanupDestIndex++);
890  }
891
892  /// The given basic block lies in the current EH scope, but may be a
893  /// target of a potentially scope-crossing jump; get a stable handle
894  /// to which we can perform this jump later.
895  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
896    return getJumpDestInCurrentScope(createBasicBlock(Name));
897  }
898
899  /// EmitBranchThroughCleanup - Emit a branch from the current insert
900  /// block through the normal cleanup handling code (if any) and then
901  /// on to \arg Dest.
902  void EmitBranchThroughCleanup(JumpDest Dest);
903
904  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
905  /// specified destination obviously has no cleanups to run.  'false' is always
906  /// a conservatively correct answer for this method.
907  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
908
909  /// popCatchScope - Pops the catch scope at the top of the EHScope
910  /// stack, emitting any required code (other than the catch handlers
911  /// themselves).
912  void popCatchScope();
913
914  llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
915  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
916
917  /// An object to manage conditionally-evaluated expressions.
918  class ConditionalEvaluation {
919    llvm::BasicBlock *StartBB;
920
921  public:
922    ConditionalEvaluation(CodeGenFunction &CGF)
923      : StartBB(CGF.Builder.GetInsertBlock()) {}
924
925    void begin(CodeGenFunction &CGF) {
926      assert(CGF.OutermostConditional != this);
927      if (!CGF.OutermostConditional)
928        CGF.OutermostConditional = this;
929    }
930
931    void end(CodeGenFunction &CGF) {
932      assert(CGF.OutermostConditional != 0);
933      if (CGF.OutermostConditional == this)
934        CGF.OutermostConditional = 0;
935    }
936
937    /// Returns a block which will be executed prior to each
938    /// evaluation of the conditional code.
939    llvm::BasicBlock *getStartingBlock() const {
940      return StartBB;
941    }
942  };
943
944  /// isInConditionalBranch - Return true if we're currently emitting
945  /// one branch or the other of a conditional expression.
946  bool isInConditionalBranch() const { return OutermostConditional != 0; }
947
948  void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
949    assert(isInConditionalBranch());
950    llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
951    new llvm::StoreInst(value, addr, &block->back());
952  }
953
954  /// An RAII object to record that we're evaluating a statement
955  /// expression.
956  class StmtExprEvaluation {
957    CodeGenFunction &CGF;
958
959    /// We have to save the outermost conditional: cleanups in a
960    /// statement expression aren't conditional just because the
961    /// StmtExpr is.
962    ConditionalEvaluation *SavedOutermostConditional;
963
964  public:
965    StmtExprEvaluation(CodeGenFunction &CGF)
966      : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
967      CGF.OutermostConditional = 0;
968    }
969
970    ~StmtExprEvaluation() {
971      CGF.OutermostConditional = SavedOutermostConditional;
972      CGF.EnsureInsertPoint();
973    }
974  };
975
976  /// An object which temporarily prevents a value from being
977  /// destroyed by aggressive peephole optimizations that assume that
978  /// all uses of a value have been realized in the IR.
979  class PeepholeProtection {
980    llvm::Instruction *Inst;
981    friend class CodeGenFunction;
982
983  public:
984    PeepholeProtection() : Inst(0) {}
985  };
986
987  /// A non-RAII class containing all the information about a bound
988  /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
989  /// this which makes individual mappings very simple; using this
990  /// class directly is useful when you have a variable number of
991  /// opaque values or don't want the RAII functionality for some
992  /// reason.
993  class OpaqueValueMappingData {
994    const OpaqueValueExpr *OpaqueValue;
995    bool BoundLValue;
996    CodeGenFunction::PeepholeProtection Protection;
997
998    OpaqueValueMappingData(const OpaqueValueExpr *ov,
999                           bool boundLValue)
1000      : OpaqueValue(ov), BoundLValue(boundLValue) {}
1001  public:
1002    OpaqueValueMappingData() : OpaqueValue(0) {}
1003
1004    static bool shouldBindAsLValue(const Expr *expr) {
1005      // gl-values should be bound as l-values for obvious reasons.
1006      // Records should be bound as l-values because IR generation
1007      // always keeps them in memory.  Expressions of function type
1008      // act exactly like l-values but are formally required to be
1009      // r-values in C.
1010      return expr->isGLValue() ||
1011             expr->getType()->isRecordType() ||
1012             expr->getType()->isFunctionType();
1013    }
1014
1015    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1016                                       const OpaqueValueExpr *ov,
1017                                       const Expr *e) {
1018      if (shouldBindAsLValue(ov))
1019        return bind(CGF, ov, CGF.EmitLValue(e));
1020      return bind(CGF, ov, CGF.EmitAnyExpr(e));
1021    }
1022
1023    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1024                                       const OpaqueValueExpr *ov,
1025                                       const LValue &lv) {
1026      assert(shouldBindAsLValue(ov));
1027      CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1028      return OpaqueValueMappingData(ov, true);
1029    }
1030
1031    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1032                                       const OpaqueValueExpr *ov,
1033                                       const RValue &rv) {
1034      assert(!shouldBindAsLValue(ov));
1035      CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1036
1037      OpaqueValueMappingData data(ov, false);
1038
1039      // Work around an extremely aggressive peephole optimization in
1040      // EmitScalarConversion which assumes that all other uses of a
1041      // value are extant.
1042      data.Protection = CGF.protectFromPeepholes(rv);
1043
1044      return data;
1045    }
1046
1047    bool isValid() const { return OpaqueValue != 0; }
1048    void clear() { OpaqueValue = 0; }
1049
1050    void unbind(CodeGenFunction &CGF) {
1051      assert(OpaqueValue && "no data to unbind!");
1052
1053      if (BoundLValue) {
1054        CGF.OpaqueLValues.erase(OpaqueValue);
1055      } else {
1056        CGF.OpaqueRValues.erase(OpaqueValue);
1057        CGF.unprotectFromPeepholes(Protection);
1058      }
1059    }
1060  };
1061
1062  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1063  class OpaqueValueMapping {
1064    CodeGenFunction &CGF;
1065    OpaqueValueMappingData Data;
1066
1067  public:
1068    static bool shouldBindAsLValue(const Expr *expr) {
1069      return OpaqueValueMappingData::shouldBindAsLValue(expr);
1070    }
1071
1072    /// Build the opaque value mapping for the given conditional
1073    /// operator if it's the GNU ?: extension.  This is a common
1074    /// enough pattern that the convenience operator is really
1075    /// helpful.
1076    ///
1077    OpaqueValueMapping(CodeGenFunction &CGF,
1078                       const AbstractConditionalOperator *op) : CGF(CGF) {
1079      if (isa<ConditionalOperator>(op))
1080        // Leave Data empty.
1081        return;
1082
1083      const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1084      Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1085                                          e->getCommon());
1086    }
1087
1088    OpaqueValueMapping(CodeGenFunction &CGF,
1089                       const OpaqueValueExpr *opaqueValue,
1090                       LValue lvalue)
1091      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1092    }
1093
1094    OpaqueValueMapping(CodeGenFunction &CGF,
1095                       const OpaqueValueExpr *opaqueValue,
1096                       RValue rvalue)
1097      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1098    }
1099
1100    void pop() {
1101      Data.unbind(CGF);
1102      Data.clear();
1103    }
1104
1105    ~OpaqueValueMapping() {
1106      if (Data.isValid()) Data.unbind(CGF);
1107    }
1108  };
1109
1110  /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1111  /// number that holds the value.
1112  unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1113
1114  /// BuildBlockByrefAddress - Computes address location of the
1115  /// variable which is declared as __block.
1116  llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1117                                      const VarDecl *V);
1118private:
1119  CGDebugInfo *DebugInfo;
1120  bool DisableDebugInfo;
1121
1122  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1123  /// calling llvm.stacksave for multiple VLAs in the same scope.
1124  bool DidCallStackSave;
1125
1126  /// IndirectBranch - The first time an indirect goto is seen we create a block
1127  /// with an indirect branch.  Every time we see the address of a label taken,
1128  /// we add the label to the indirect goto.  Every subsequent indirect goto is
1129  /// codegen'd as a jump to the IndirectBranch's basic block.
1130  llvm::IndirectBrInst *IndirectBranch;
1131
1132  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1133  /// decls.
1134  typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1135  DeclMapTy LocalDeclMap;
1136
1137  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1138  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1139
1140  // BreakContinueStack - This keeps track of where break and continue
1141  // statements should jump to.
1142  struct BreakContinue {
1143    BreakContinue(JumpDest Break, JumpDest Continue)
1144      : BreakBlock(Break), ContinueBlock(Continue) {}
1145
1146    JumpDest BreakBlock;
1147    JumpDest ContinueBlock;
1148  };
1149  SmallVector<BreakContinue, 8> BreakContinueStack;
1150
1151  /// SwitchInsn - This is nearest current switch instruction. It is null if
1152  /// current context is not in a switch.
1153  llvm::SwitchInst *SwitchInsn;
1154
1155  /// CaseRangeBlock - This block holds if condition check for last case
1156  /// statement range in current switch instruction.
1157  llvm::BasicBlock *CaseRangeBlock;
1158
1159  /// OpaqueLValues - Keeps track of the current set of opaque value
1160  /// expressions.
1161  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1162  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1163
1164  // VLASizeMap - This keeps track of the associated size for each VLA type.
1165  // We track this by the size expression rather than the type itself because
1166  // in certain situations, like a const qualifier applied to an VLA typedef,
1167  // multiple VLA types can share the same size expression.
1168  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1169  // enter/leave scopes.
1170  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1171
1172  /// A block containing a single 'unreachable' instruction.  Created
1173  /// lazily by getUnreachableBlock().
1174  llvm::BasicBlock *UnreachableBlock;
1175
1176  /// CXXThisDecl - When generating code for a C++ member function,
1177  /// this will hold the implicit 'this' declaration.
1178  ImplicitParamDecl *CXXABIThisDecl;
1179  llvm::Value *CXXABIThisValue;
1180  llvm::Value *CXXThisValue;
1181
1182  /// CXXVTTDecl - When generating code for a base object constructor or
1183  /// base object destructor with virtual bases, this will hold the implicit
1184  /// VTT parameter.
1185  ImplicitParamDecl *CXXVTTDecl;
1186  llvm::Value *CXXVTTValue;
1187
1188  /// OutermostConditional - Points to the outermost active
1189  /// conditional control.  This is used so that we know if a
1190  /// temporary should be destroyed conditionally.
1191  ConditionalEvaluation *OutermostConditional;
1192
1193
1194  /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1195  /// type as well as the field number that contains the actual data.
1196  llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1197                                              unsigned> > ByRefValueInfo;
1198
1199  llvm::BasicBlock *TerminateLandingPad;
1200  llvm::BasicBlock *TerminateHandler;
1201  llvm::BasicBlock *TrapBB;
1202
1203  /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
1204  /// In the kernel metadata node, reference the kernel function and metadata
1205  /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
1206  /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
1207  ///   "work_group_size_hint", and three 32-bit integers X, Y and Z.
1208  /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
1209  ///   "reqd_work_group_size", and three 32-bit integers X, Y and Z.
1210  void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1211                                llvm::Function *Fn);
1212
1213public:
1214  CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1215  ~CodeGenFunction();
1216
1217  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1218  ASTContext &getContext() const { return CGM.getContext(); }
1219  /// Returns true if DebugInfo is actually initialized.
1220  bool maybeInitializeDebugInfo() {
1221    if (CGM.getModuleDebugInfo()) {
1222      DebugInfo = CGM.getModuleDebugInfo();
1223      return true;
1224    }
1225    return false;
1226  }
1227  CGDebugInfo *getDebugInfo() {
1228    if (DisableDebugInfo)
1229      return NULL;
1230    return DebugInfo;
1231  }
1232  void disableDebugInfo() { DisableDebugInfo = true; }
1233  void enableDebugInfo() { DisableDebugInfo = false; }
1234
1235  bool shouldUseFusedARCCalls() {
1236    return CGM.getCodeGenOpts().OptimizationLevel == 0;
1237  }
1238
1239  const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1240
1241  /// Returns a pointer to the function's exception object and selector slot,
1242  /// which is assigned in every landing pad.
1243  llvm::Value *getExceptionSlot();
1244  llvm::Value *getEHSelectorSlot();
1245
1246  /// Returns the contents of the function's exception object and selector
1247  /// slots.
1248  llvm::Value *getExceptionFromSlot();
1249  llvm::Value *getSelectorFromSlot();
1250
1251  llvm::Value *getNormalCleanupDestSlot();
1252
1253  llvm::BasicBlock *getUnreachableBlock() {
1254    if (!UnreachableBlock) {
1255      UnreachableBlock = createBasicBlock("unreachable");
1256      new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1257    }
1258    return UnreachableBlock;
1259  }
1260
1261  llvm::BasicBlock *getInvokeDest() {
1262    if (!EHStack.requiresLandingPad()) return 0;
1263    return getInvokeDestImpl();
1264  }
1265
1266  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1267
1268  //===--------------------------------------------------------------------===//
1269  //                                  Cleanups
1270  //===--------------------------------------------------------------------===//
1271
1272  typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1273
1274  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1275                                        llvm::Value *arrayEndPointer,
1276                                        QualType elementType,
1277                                        Destroyer *destroyer);
1278  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1279                                      llvm::Value *arrayEnd,
1280                                      QualType elementType,
1281                                      Destroyer *destroyer);
1282
1283  void pushDestroy(QualType::DestructionKind dtorKind,
1284                   llvm::Value *addr, QualType type);
1285  void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1286                   Destroyer *destroyer, bool useEHCleanupForArray);
1287  void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1288                   bool useEHCleanupForArray);
1289  llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1290                                        QualType type,
1291                                        Destroyer *destroyer,
1292                                        bool useEHCleanupForArray);
1293  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1294                        QualType type, Destroyer *destroyer,
1295                        bool checkZeroLength, bool useEHCleanup);
1296
1297  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1298
1299  /// Determines whether an EH cleanup is required to destroy a type
1300  /// with the given destruction kind.
1301  bool needsEHCleanup(QualType::DestructionKind kind) {
1302    switch (kind) {
1303    case QualType::DK_none:
1304      return false;
1305    case QualType::DK_cxx_destructor:
1306    case QualType::DK_objc_weak_lifetime:
1307      return getLangOpts().Exceptions;
1308    case QualType::DK_objc_strong_lifetime:
1309      return getLangOpts().Exceptions &&
1310             CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1311    }
1312    llvm_unreachable("bad destruction kind");
1313  }
1314
1315  CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1316    return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1317  }
1318
1319  //===--------------------------------------------------------------------===//
1320  //                                  Objective-C
1321  //===--------------------------------------------------------------------===//
1322
1323  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1324
1325  void StartObjCMethod(const ObjCMethodDecl *MD,
1326                       const ObjCContainerDecl *CD,
1327                       SourceLocation StartLoc);
1328
1329  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1330  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1331                          const ObjCPropertyImplDecl *PID);
1332  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1333                              const ObjCPropertyImplDecl *propImpl,
1334                              const ObjCMethodDecl *GetterMothodDecl,
1335                              llvm::Constant *AtomicHelperFn);
1336
1337  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1338                                  ObjCMethodDecl *MD, bool ctor);
1339
1340  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1341  /// for the given property.
1342  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1343                          const ObjCPropertyImplDecl *PID);
1344  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1345                              const ObjCPropertyImplDecl *propImpl,
1346                              llvm::Constant *AtomicHelperFn);
1347  bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1348  bool IvarTypeWithAggrGCObjects(QualType Ty);
1349
1350  //===--------------------------------------------------------------------===//
1351  //                                  Block Bits
1352  //===--------------------------------------------------------------------===//
1353
1354  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1355  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1356  static void destroyBlockInfos(CGBlockInfo *info);
1357  llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1358                                           const CGBlockInfo &Info,
1359                                           llvm::StructType *,
1360                                           llvm::Constant *BlockVarLayout);
1361
1362  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1363                                        const CGBlockInfo &Info,
1364                                        const Decl *OuterFuncDecl,
1365                                        const DeclMapTy &ldm,
1366                                        bool IsLambdaConversionToBlock);
1367
1368  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1369  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1370  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1371                                             const ObjCPropertyImplDecl *PID);
1372  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1373                                             const ObjCPropertyImplDecl *PID);
1374  llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1375
1376  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1377
1378  class AutoVarEmission;
1379
1380  void emitByrefStructureInit(const AutoVarEmission &emission);
1381  void enterByrefCleanup(const AutoVarEmission &emission);
1382
1383  llvm::Value *LoadBlockStruct() {
1384    assert(BlockPointer && "no block pointer set!");
1385    return BlockPointer;
1386  }
1387
1388  void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1389  void AllocateBlockDecl(const DeclRefExpr *E);
1390  llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1391  llvm::Type *BuildByRefType(const VarDecl *var);
1392
1393  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1394                    const CGFunctionInfo &FnInfo);
1395  void StartFunction(GlobalDecl GD, QualType RetTy,
1396                     llvm::Function *Fn,
1397                     const CGFunctionInfo &FnInfo,
1398                     const FunctionArgList &Args,
1399                     SourceLocation StartLoc);
1400
1401  void EmitConstructorBody(FunctionArgList &Args);
1402  void EmitDestructorBody(FunctionArgList &Args);
1403  void EmitFunctionBody(FunctionArgList &Args);
1404
1405  void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
1406                                  CallArgList &CallArgs);
1407  void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1408  void EmitLambdaBlockInvokeBody();
1409  void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1410  void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1411
1412  /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1413  /// emission when possible.
1414  void EmitReturnBlock();
1415
1416  /// FinishFunction - Complete IR generation of the current function. It is
1417  /// legal to call this function even if there is no current insertion point.
1418  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1419
1420  /// GenerateThunk - Generate a thunk for the given method.
1421  void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1422                     GlobalDecl GD, const ThunkInfo &Thunk);
1423
1424  void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1425                            GlobalDecl GD, const ThunkInfo &Thunk);
1426
1427  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1428                        FunctionArgList &Args);
1429
1430  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1431                               ArrayRef<VarDecl *> ArrayIndexes);
1432
1433  /// InitializeVTablePointer - Initialize the vtable pointer of the given
1434  /// subobject.
1435  ///
1436  void InitializeVTablePointer(BaseSubobject Base,
1437                               const CXXRecordDecl *NearestVBase,
1438                               CharUnits OffsetFromNearestVBase,
1439                               llvm::Constant *VTable,
1440                               const CXXRecordDecl *VTableClass);
1441
1442  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1443  void InitializeVTablePointers(BaseSubobject Base,
1444                                const CXXRecordDecl *NearestVBase,
1445                                CharUnits OffsetFromNearestVBase,
1446                                bool BaseIsNonVirtualPrimaryBase,
1447                                llvm::Constant *VTable,
1448                                const CXXRecordDecl *VTableClass,
1449                                VisitedVirtualBasesSetTy& VBases);
1450
1451  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1452
1453  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1454  /// to by This.
1455  llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1456
1457  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1458  /// given phase of destruction for a destructor.  The end result
1459  /// should call destructors on members and base classes in reverse
1460  /// order of their construction.
1461  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1462
1463  /// ShouldInstrumentFunction - Return true if the current function should be
1464  /// instrumented with __cyg_profile_func_* calls
1465  bool ShouldInstrumentFunction();
1466
1467  /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1468  /// instrumentation function with the current function and the call site, if
1469  /// function instrumentation is enabled.
1470  void EmitFunctionInstrumentation(const char *Fn);
1471
1472  /// EmitMCountInstrumentation - Emit call to .mcount.
1473  void EmitMCountInstrumentation();
1474
1475  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1476  /// arguments for the given function. This is also responsible for naming the
1477  /// LLVM function arguments.
1478  void EmitFunctionProlog(const CGFunctionInfo &FI,
1479                          llvm::Function *Fn,
1480                          const FunctionArgList &Args);
1481
1482  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1483  /// given temporary.
1484  void EmitFunctionEpilog(const CGFunctionInfo &FI);
1485
1486  /// EmitStartEHSpec - Emit the start of the exception spec.
1487  void EmitStartEHSpec(const Decl *D);
1488
1489  /// EmitEndEHSpec - Emit the end of the exception spec.
1490  void EmitEndEHSpec(const Decl *D);
1491
1492  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1493  llvm::BasicBlock *getTerminateLandingPad();
1494
1495  /// getTerminateHandler - Return a handler (not a landing pad, just
1496  /// a catch handler) that just calls terminate.  This is used when
1497  /// a terminate scope encloses a try.
1498  llvm::BasicBlock *getTerminateHandler();
1499
1500  llvm::Type *ConvertTypeForMem(QualType T);
1501  llvm::Type *ConvertType(QualType T);
1502  llvm::Type *ConvertType(const TypeDecl *T) {
1503    return ConvertType(getContext().getTypeDeclType(T));
1504  }
1505
1506  /// LoadObjCSelf - Load the value of self. This function is only valid while
1507  /// generating code for an Objective-C method.
1508  llvm::Value *LoadObjCSelf();
1509
1510  /// TypeOfSelfObject - Return type of object that this self represents.
1511  QualType TypeOfSelfObject();
1512
1513  /// hasAggregateLLVMType - Return true if the specified AST type will map into
1514  /// an aggregate LLVM type or is void.
1515  static bool hasAggregateLLVMType(QualType T);
1516
1517  /// createBasicBlock - Create an LLVM basic block.
1518  llvm::BasicBlock *createBasicBlock(const Twine &name = "",
1519                                     llvm::Function *parent = 0,
1520                                     llvm::BasicBlock *before = 0) {
1521#ifdef NDEBUG
1522    return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1523#else
1524    return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1525#endif
1526  }
1527
1528  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1529  /// label maps to.
1530  JumpDest getJumpDestForLabel(const LabelDecl *S);
1531
1532  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1533  /// another basic block, simplify it. This assumes that no other code could
1534  /// potentially reference the basic block.
1535  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1536
1537  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1538  /// adding a fall-through branch from the current insert block if
1539  /// necessary. It is legal to call this function even if there is no current
1540  /// insertion point.
1541  ///
1542  /// IsFinished - If true, indicates that the caller has finished emitting
1543  /// branches to the given block and does not expect to emit code into it. This
1544  /// means the block can be ignored if it is unreachable.
1545  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1546
1547  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1548  /// near its uses, and leave the insertion point in it.
1549  void EmitBlockAfterUses(llvm::BasicBlock *BB);
1550
1551  /// EmitBranch - Emit a branch to the specified basic block from the current
1552  /// insert block, taking care to avoid creation of branches from dummy
1553  /// blocks. It is legal to call this function even if there is no current
1554  /// insertion point.
1555  ///
1556  /// This function clears the current insertion point. The caller should follow
1557  /// calls to this function with calls to Emit*Block prior to generation new
1558  /// code.
1559  void EmitBranch(llvm::BasicBlock *Block);
1560
1561  /// HaveInsertPoint - True if an insertion point is defined. If not, this
1562  /// indicates that the current code being emitted is unreachable.
1563  bool HaveInsertPoint() const {
1564    return Builder.GetInsertBlock() != 0;
1565  }
1566
1567  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1568  /// emitted IR has a place to go. Note that by definition, if this function
1569  /// creates a block then that block is unreachable; callers may do better to
1570  /// detect when no insertion point is defined and simply skip IR generation.
1571  void EnsureInsertPoint() {
1572    if (!HaveInsertPoint())
1573      EmitBlock(createBasicBlock());
1574  }
1575
1576  /// ErrorUnsupported - Print out an error that codegen doesn't support the
1577  /// specified stmt yet.
1578  void ErrorUnsupported(const Stmt *S, const char *Type,
1579                        bool OmitOnError=false);
1580
1581  //===--------------------------------------------------------------------===//
1582  //                                  Helpers
1583  //===--------------------------------------------------------------------===//
1584
1585  LValue MakeAddrLValue(llvm::Value *V, QualType T,
1586                        CharUnits Alignment = CharUnits()) {
1587    return LValue::MakeAddr(V, T, Alignment, getContext(),
1588                            CGM.getTBAAInfo(T));
1589  }
1590
1591  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1592    CharUnits Alignment;
1593    if (!T->isIncompleteType())
1594      Alignment = getContext().getTypeAlignInChars(T);
1595    return LValue::MakeAddr(V, T, Alignment, getContext(),
1596                            CGM.getTBAAInfo(T));
1597  }
1598
1599  /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1600  /// block. The caller is responsible for setting an appropriate alignment on
1601  /// the alloca.
1602  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1603                                     const Twine &Name = "tmp");
1604
1605  /// InitTempAlloca - Provide an initial value for the given alloca.
1606  void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1607
1608  /// CreateIRTemp - Create a temporary IR object of the given type, with
1609  /// appropriate alignment. This routine should only be used when an temporary
1610  /// value needs to be stored into an alloca (for example, to avoid explicit
1611  /// PHI construction), but the type is the IR type, not the type appropriate
1612  /// for storing in memory.
1613  llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1614
1615  /// CreateMemTemp - Create a temporary memory object of the given type, with
1616  /// appropriate alignment.
1617  llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1618
1619  /// CreateAggTemp - Create a temporary memory object for the given
1620  /// aggregate type.
1621  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1622    CharUnits Alignment = getContext().getTypeAlignInChars(T);
1623    return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1624                                 T.getQualifiers(),
1625                                 AggValueSlot::IsNotDestructed,
1626                                 AggValueSlot::DoesNotNeedGCBarriers,
1627                                 AggValueSlot::IsNotAliased);
1628  }
1629
1630  /// Emit a cast to void* in the appropriate address space.
1631  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1632
1633  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1634  /// expression and compare the result against zero, returning an Int1Ty value.
1635  llvm::Value *EvaluateExprAsBool(const Expr *E);
1636
1637  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1638  void EmitIgnoredExpr(const Expr *E);
1639
1640  /// EmitAnyExpr - Emit code to compute the specified expression which can have
1641  /// any type.  The result is returned as an RValue struct.  If this is an
1642  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1643  /// the result should be returned.
1644  ///
1645  /// \param ignoreResult True if the resulting value isn't used.
1646  RValue EmitAnyExpr(const Expr *E,
1647                     AggValueSlot aggSlot = AggValueSlot::ignored(),
1648                     bool ignoreResult = false);
1649
1650  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1651  // or the value of the expression, depending on how va_list is defined.
1652  llvm::Value *EmitVAListRef(const Expr *E);
1653
1654  /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1655  /// always be accessible even if no aggregate location is provided.
1656  RValue EmitAnyExprToTemp(const Expr *E);
1657
1658  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1659  /// arbitrary expression into the given memory location.
1660  void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1661                        Qualifiers Quals, bool IsInitializer);
1662
1663  /// EmitExprAsInit - Emits the code necessary to initialize a
1664  /// location in memory with the given initializer.
1665  void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1666                      LValue lvalue, bool capturedByInit);
1667
1668  /// hasVolatileMember - returns true if aggregate type has a volatile
1669  /// member.
1670  bool hasVolatileMember(QualType T) {
1671    if (const RecordType *RT = T->getAs<RecordType>()) {
1672      const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
1673      return RD->hasVolatileMember();
1674    }
1675    return false;
1676  }
1677  /// EmitAggregateCopy - Emit an aggrate assignment.
1678  ///
1679  /// The difference to EmitAggregateCopy is that tail padding is not copied.
1680  /// This is required for correctness when assigning non-POD structures in C++.
1681  void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1682                           QualType EltTy) {
1683    bool IsVolatile = hasVolatileMember(EltTy);
1684    EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, CharUnits::Zero(),
1685                      true);
1686  }
1687
1688  /// EmitAggregateCopy - Emit an aggrate copy.
1689  ///
1690  /// \param isVolatile - True iff either the source or the destination is
1691  /// volatile.
1692  /// \param isAssignment - If false, allow padding to be copied.  This often
1693  /// yields more efficient.
1694  void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1695                         QualType EltTy, bool isVolatile=false,
1696                         CharUnits Alignment = CharUnits::Zero(),
1697                         bool isAssignment = false);
1698
1699  /// StartBlock - Start new block named N. If insert block is a dummy block
1700  /// then reuse it.
1701  void StartBlock(const char *N);
1702
1703  /// GetAddrOfLocalVar - Return the address of a local variable.
1704  llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1705    llvm::Value *Res = LocalDeclMap[VD];
1706    assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1707    return Res;
1708  }
1709
1710  /// getOpaqueLValueMapping - Given an opaque value expression (which
1711  /// must be mapped to an l-value), return its mapping.
1712  const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1713    assert(OpaqueValueMapping::shouldBindAsLValue(e));
1714
1715    llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1716      it = OpaqueLValues.find(e);
1717    assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1718    return it->second;
1719  }
1720
1721  /// getOpaqueRValueMapping - Given an opaque value expression (which
1722  /// must be mapped to an r-value), return its mapping.
1723  const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1724    assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1725
1726    llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1727      it = OpaqueRValues.find(e);
1728    assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1729    return it->second;
1730  }
1731
1732  /// getAccessedFieldNo - Given an encoded value and a result number, return
1733  /// the input field number being accessed.
1734  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1735
1736  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1737  llvm::BasicBlock *GetIndirectGotoBlock();
1738
1739  /// EmitNullInitialization - Generate code to set a value of the given type to
1740  /// null, If the type contains data member pointers, they will be initialized
1741  /// to -1 in accordance with the Itanium C++ ABI.
1742  void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1743
1744  // EmitVAArg - Generate code to get an argument from the passed in pointer
1745  // and update it accordingly. The return value is a pointer to the argument.
1746  // FIXME: We should be able to get rid of this method and use the va_arg
1747  // instruction in LLVM instead once it works well enough.
1748  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1749
1750  /// emitArrayLength - Compute the length of an array, even if it's a
1751  /// VLA, and drill down to the base element type.
1752  llvm::Value *emitArrayLength(const ArrayType *arrayType,
1753                               QualType &baseType,
1754                               llvm::Value *&addr);
1755
1756  /// EmitVLASize - Capture all the sizes for the VLA expressions in
1757  /// the given variably-modified type and store them in the VLASizeMap.
1758  ///
1759  /// This function can be called with a null (unreachable) insert point.
1760  void EmitVariablyModifiedType(QualType Ty);
1761
1762  /// getVLASize - Returns an LLVM value that corresponds to the size,
1763  /// in non-variably-sized elements, of a variable length array type,
1764  /// plus that largest non-variably-sized element type.  Assumes that
1765  /// the type has already been emitted with EmitVariablyModifiedType.
1766  std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1767  std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1768
1769  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1770  /// generating code for an C++ member function.
1771  llvm::Value *LoadCXXThis() {
1772    assert(CXXThisValue && "no 'this' value for this function");
1773    return CXXThisValue;
1774  }
1775
1776  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1777  /// virtual bases.
1778  llvm::Value *LoadCXXVTT() {
1779    assert(CXXVTTValue && "no VTT value for this function");
1780    return CXXVTTValue;
1781  }
1782
1783  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1784  /// complete class to the given direct base.
1785  llvm::Value *
1786  GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1787                                        const CXXRecordDecl *Derived,
1788                                        const CXXRecordDecl *Base,
1789                                        bool BaseIsVirtual);
1790
1791  /// GetAddressOfBaseClass - This function will add the necessary delta to the
1792  /// load of 'this' and returns address of the base class.
1793  llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1794                                     const CXXRecordDecl *Derived,
1795                                     CastExpr::path_const_iterator PathBegin,
1796                                     CastExpr::path_const_iterator PathEnd,
1797                                     bool NullCheckValue);
1798
1799  llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1800                                        const CXXRecordDecl *Derived,
1801                                        CastExpr::path_const_iterator PathBegin,
1802                                        CastExpr::path_const_iterator PathEnd,
1803                                        bool NullCheckValue);
1804
1805  llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1806                                         const CXXRecordDecl *ClassDecl,
1807                                         const CXXRecordDecl *BaseClassDecl);
1808
1809  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1810                                      CXXCtorType CtorType,
1811                                      const FunctionArgList &Args);
1812  // It's important not to confuse this and the previous function. Delegating
1813  // constructors are the C++0x feature. The constructor delegate optimization
1814  // is used to reduce duplication in the base and complete consturctors where
1815  // they are substantially the same.
1816  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1817                                        const FunctionArgList &Args);
1818  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1819                              bool ForVirtualBase, llvm::Value *This,
1820                              CallExpr::const_arg_iterator ArgBeg,
1821                              CallExpr::const_arg_iterator ArgEnd);
1822
1823  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1824                              llvm::Value *This, llvm::Value *Src,
1825                              CallExpr::const_arg_iterator ArgBeg,
1826                              CallExpr::const_arg_iterator ArgEnd);
1827
1828  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1829                                  const ConstantArrayType *ArrayTy,
1830                                  llvm::Value *ArrayPtr,
1831                                  CallExpr::const_arg_iterator ArgBeg,
1832                                  CallExpr::const_arg_iterator ArgEnd,
1833                                  bool ZeroInitialization = false);
1834
1835  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1836                                  llvm::Value *NumElements,
1837                                  llvm::Value *ArrayPtr,
1838                                  CallExpr::const_arg_iterator ArgBeg,
1839                                  CallExpr::const_arg_iterator ArgEnd,
1840                                  bool ZeroInitialization = false);
1841
1842  static Destroyer destroyCXXObject;
1843
1844  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1845                             bool ForVirtualBase, llvm::Value *This);
1846
1847  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1848                               llvm::Value *NewPtr, llvm::Value *NumElements);
1849
1850  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1851                        llvm::Value *Ptr);
1852
1853  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1854  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1855
1856  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1857                      QualType DeleteTy);
1858
1859  llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1860  llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1861  llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
1862
1863  void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
1864  void EmitStdInitializerListCleanup(llvm::Value *loc,
1865                                     const InitListExpr *init);
1866
1867  /// \brief Situations in which we might emit a check for the suitability of a
1868  ///        pointer or glvalue.
1869  enum TypeCheckKind {
1870    /// Checking the operand of a load. Must be suitably sized and aligned.
1871    TCK_Load,
1872    /// Checking the destination of a store. Must be suitably sized and aligned.
1873    TCK_Store,
1874    /// Checking the bound value in a reference binding. Must be suitably sized
1875    /// and aligned, but is not required to refer to an object (until the
1876    /// reference is used), per core issue 453.
1877    TCK_ReferenceBinding,
1878    /// Checking the object expression in a non-static data member access. Must
1879    /// be an object within its lifetime.
1880    TCK_MemberAccess,
1881    /// Checking the 'this' pointer for a call to a non-static member function.
1882    /// Must be an object within its lifetime.
1883    TCK_MemberCall,
1884    /// Checking the 'this' pointer for a constructor call.
1885    TCK_ConstructorCall
1886  };
1887
1888  /// \brief Emit a check that \p V is the address of storage of the
1889  /// appropriate size and alignment for an object of type \p Type.
1890  void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
1891                     QualType Type, CharUnits Alignment = CharUnits::Zero());
1892
1893  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1894                                       bool isInc, bool isPre);
1895  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1896                                         bool isInc, bool isPre);
1897  //===--------------------------------------------------------------------===//
1898  //                            Declaration Emission
1899  //===--------------------------------------------------------------------===//
1900
1901  /// EmitDecl - Emit a declaration.
1902  ///
1903  /// This function can be called with a null (unreachable) insert point.
1904  void EmitDecl(const Decl &D);
1905
1906  /// EmitVarDecl - Emit a local variable declaration.
1907  ///
1908  /// This function can be called with a null (unreachable) insert point.
1909  void EmitVarDecl(const VarDecl &D);
1910
1911  void EmitScalarInit(const Expr *init, const ValueDecl *D,
1912                      LValue lvalue, bool capturedByInit);
1913  void EmitScalarInit(llvm::Value *init, LValue lvalue);
1914
1915  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1916                             llvm::Value *Address);
1917
1918  /// EmitAutoVarDecl - Emit an auto variable declaration.
1919  ///
1920  /// This function can be called with a null (unreachable) insert point.
1921  void EmitAutoVarDecl(const VarDecl &D);
1922
1923  class AutoVarEmission {
1924    friend class CodeGenFunction;
1925
1926    const VarDecl *Variable;
1927
1928    /// The alignment of the variable.
1929    CharUnits Alignment;
1930
1931    /// The address of the alloca.  Null if the variable was emitted
1932    /// as a global constant.
1933    llvm::Value *Address;
1934
1935    llvm::Value *NRVOFlag;
1936
1937    /// True if the variable is a __block variable.
1938    bool IsByRef;
1939
1940    /// True if the variable is of aggregate type and has a constant
1941    /// initializer.
1942    bool IsConstantAggregate;
1943
1944    struct Invalid {};
1945    AutoVarEmission(Invalid) : Variable(0) {}
1946
1947    AutoVarEmission(const VarDecl &variable)
1948      : Variable(&variable), Address(0), NRVOFlag(0),
1949        IsByRef(false), IsConstantAggregate(false) {}
1950
1951    bool wasEmittedAsGlobal() const { return Address == 0; }
1952
1953  public:
1954    static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1955
1956    /// Returns the address of the object within this declaration.
1957    /// Note that this does not chase the forwarding pointer for
1958    /// __block decls.
1959    llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1960      if (!IsByRef) return Address;
1961
1962      return CGF.Builder.CreateStructGEP(Address,
1963                                         CGF.getByRefValueLLVMField(Variable),
1964                                         Variable->getNameAsString());
1965    }
1966  };
1967  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1968  void EmitAutoVarInit(const AutoVarEmission &emission);
1969  void EmitAutoVarCleanups(const AutoVarEmission &emission);
1970  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1971                              QualType::DestructionKind dtorKind);
1972
1973  void EmitStaticVarDecl(const VarDecl &D,
1974                         llvm::GlobalValue::LinkageTypes Linkage);
1975
1976  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1977  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1978
1979  /// protectFromPeepholes - Protect a value that we're intending to
1980  /// store to the side, but which will probably be used later, from
1981  /// aggressive peepholing optimizations that might delete it.
1982  ///
1983  /// Pass the result to unprotectFromPeepholes to declare that
1984  /// protection is no longer required.
1985  ///
1986  /// There's no particular reason why this shouldn't apply to
1987  /// l-values, it's just that no existing peepholes work on pointers.
1988  PeepholeProtection protectFromPeepholes(RValue rvalue);
1989  void unprotectFromPeepholes(PeepholeProtection protection);
1990
1991  //===--------------------------------------------------------------------===//
1992  //                             Statement Emission
1993  //===--------------------------------------------------------------------===//
1994
1995  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1996  void EmitStopPoint(const Stmt *S);
1997
1998  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1999  /// this function even if there is no current insertion point.
2000  ///
2001  /// This function may clear the current insertion point; callers should use
2002  /// EnsureInsertPoint if they wish to subsequently generate code without first
2003  /// calling EmitBlock, EmitBranch, or EmitStmt.
2004  void EmitStmt(const Stmt *S);
2005
2006  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2007  /// necessarily require an insertion point or debug information; typically
2008  /// because the statement amounts to a jump or a container of other
2009  /// statements.
2010  ///
2011  /// \return True if the statement was handled.
2012  bool EmitSimpleStmt(const Stmt *S);
2013
2014  RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2015                          AggValueSlot AVS = AggValueSlot::ignored());
2016
2017  /// EmitLabel - Emit the block for the given label. It is legal to call this
2018  /// function even if there is no current insertion point.
2019  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2020
2021  void EmitLabelStmt(const LabelStmt &S);
2022  void EmitAttributedStmt(const AttributedStmt &S);
2023  void EmitGotoStmt(const GotoStmt &S);
2024  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2025  void EmitIfStmt(const IfStmt &S);
2026  void EmitWhileStmt(const WhileStmt &S);
2027  void EmitDoStmt(const DoStmt &S);
2028  void EmitForStmt(const ForStmt &S);
2029  void EmitReturnStmt(const ReturnStmt &S);
2030  void EmitDeclStmt(const DeclStmt &S);
2031  void EmitBreakStmt(const BreakStmt &S);
2032  void EmitContinueStmt(const ContinueStmt &S);
2033  void EmitSwitchStmt(const SwitchStmt &S);
2034  void EmitDefaultStmt(const DefaultStmt &S);
2035  void EmitCaseStmt(const CaseStmt &S);
2036  void EmitCaseStmtRange(const CaseStmt &S);
2037  void EmitAsmStmt(const AsmStmt &S);
2038
2039  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2040  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2041  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2042  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2043  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2044
2045  llvm::Constant *getUnwindResumeFn();
2046  llvm::Constant *getUnwindResumeOrRethrowFn();
2047  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2048  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2049
2050  void EmitCXXTryStmt(const CXXTryStmt &S);
2051  void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
2052
2053  //===--------------------------------------------------------------------===//
2054  //                         LValue Expression Emission
2055  //===--------------------------------------------------------------------===//
2056
2057  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
2058  RValue GetUndefRValue(QualType Ty);
2059
2060  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
2061  /// and issue an ErrorUnsupported style diagnostic (using the
2062  /// provided Name).
2063  RValue EmitUnsupportedRValue(const Expr *E,
2064                               const char *Name);
2065
2066  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
2067  /// an ErrorUnsupported style diagnostic (using the provided Name).
2068  LValue EmitUnsupportedLValue(const Expr *E,
2069                               const char *Name);
2070
2071  /// EmitLValue - Emit code to compute a designator that specifies the location
2072  /// of the expression.
2073  ///
2074  /// This can return one of two things: a simple address or a bitfield
2075  /// reference.  In either case, the LLVM Value* in the LValue structure is
2076  /// guaranteed to be an LLVM pointer type.
2077  ///
2078  /// If this returns a bitfield reference, nothing about the pointee type of
2079  /// the LLVM value is known: For example, it may not be a pointer to an
2080  /// integer.
2081  ///
2082  /// If this returns a normal address, and if the lvalue's C type is fixed
2083  /// size, this method guarantees that the returned pointer type will point to
2084  /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2085  /// variable length type, this is not possible.
2086  ///
2087  LValue EmitLValue(const Expr *E);
2088
2089  /// \brief Same as EmitLValue but additionally we generate checking code to
2090  /// guard against undefined behavior.  This is only suitable when we know
2091  /// that the address will be used to access the object.
2092  LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
2093
2094  /// EmitToMemory - Change a scalar value from its value
2095  /// representation to its in-memory representation.
2096  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2097
2098  /// EmitFromMemory - Change a scalar value from its memory
2099  /// representation to its value representation.
2100  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2101
2102  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2103  /// care to appropriately convert from the memory representation to
2104  /// the LLVM value representation.
2105  llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2106                                unsigned Alignment, QualType Ty,
2107                                llvm::MDNode *TBAAInfo = 0);
2108
2109  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2110  /// care to appropriately convert from the memory representation to
2111  /// the LLVM value representation.  The l-value must be a simple
2112  /// l-value.
2113  llvm::Value *EmitLoadOfScalar(LValue lvalue);
2114
2115  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2116  /// care to appropriately convert from the memory representation to
2117  /// the LLVM value representation.
2118  void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2119                         bool Volatile, unsigned Alignment, QualType Ty,
2120                         llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2121
2122  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2123  /// care to appropriately convert from the memory representation to
2124  /// the LLVM value representation.  The l-value must be a simple
2125  /// l-value.  The isInit flag indicates whether this is an initialization.
2126  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2127  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2128
2129  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2130  /// this method emits the address of the lvalue, then loads the result as an
2131  /// rvalue, returning the rvalue.
2132  RValue EmitLoadOfLValue(LValue V);
2133  RValue EmitLoadOfExtVectorElementLValue(LValue V);
2134  RValue EmitLoadOfBitfieldLValue(LValue LV);
2135
2136  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2137  /// lvalue, where both are guaranteed to the have the same type, and that type
2138  /// is 'Ty'.
2139  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2140  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2141
2142  /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2143  /// EmitStoreThroughLValue.
2144  ///
2145  /// \param Result [out] - If non-null, this will be set to a Value* for the
2146  /// bit-field contents after the store, appropriate for use as the result of
2147  /// an assignment to the bit-field.
2148  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2149                                      llvm::Value **Result=0);
2150
2151  /// Emit an l-value for an assignment (simple or compound) of complex type.
2152  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2153  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2154
2155  // Note: only available for agg return types
2156  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2157  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2158  // Note: only available for agg return types
2159  LValue EmitCallExprLValue(const CallExpr *E);
2160  // Note: only available for agg return types
2161  LValue EmitVAArgExprLValue(const VAArgExpr *E);
2162  LValue EmitDeclRefLValue(const DeclRefExpr *E);
2163  LValue EmitStringLiteralLValue(const StringLiteral *E);
2164  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2165  LValue EmitPredefinedLValue(const PredefinedExpr *E);
2166  LValue EmitUnaryOpLValue(const UnaryOperator *E);
2167  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2168  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2169  LValue EmitMemberExpr(const MemberExpr *E);
2170  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2171  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2172  LValue EmitInitListLValue(const InitListExpr *E);
2173  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2174  LValue EmitCastLValue(const CastExpr *E);
2175  LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2176  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2177  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2178
2179  RValue EmitRValueForField(LValue LV, const FieldDecl *FD);
2180
2181  class ConstantEmission {
2182    llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
2183    ConstantEmission(llvm::Constant *C, bool isReference)
2184      : ValueAndIsReference(C, isReference) {}
2185  public:
2186    ConstantEmission() {}
2187    static ConstantEmission forReference(llvm::Constant *C) {
2188      return ConstantEmission(C, true);
2189    }
2190    static ConstantEmission forValue(llvm::Constant *C) {
2191      return ConstantEmission(C, false);
2192    }
2193
2194    operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
2195
2196    bool isReference() const { return ValueAndIsReference.getInt(); }
2197    LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
2198      assert(isReference());
2199      return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
2200                                            refExpr->getType());
2201    }
2202
2203    llvm::Constant *getValue() const {
2204      assert(!isReference());
2205      return ValueAndIsReference.getPointer();
2206    }
2207  };
2208
2209  ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
2210
2211  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2212                                AggValueSlot slot = AggValueSlot::ignored());
2213  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2214
2215  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2216                              const ObjCIvarDecl *Ivar);
2217  LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
2218
2219  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2220  /// if the Field is a reference, this will return the address of the reference
2221  /// and not the address of the value stored in the reference.
2222  LValue EmitLValueForFieldInitialization(LValue Base,
2223                                          const FieldDecl* Field);
2224
2225  LValue EmitLValueForIvar(QualType ObjectTy,
2226                           llvm::Value* Base, const ObjCIvarDecl *Ivar,
2227                           unsigned CVRQualifiers);
2228
2229  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2230  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2231  LValue EmitLambdaLValue(const LambdaExpr *E);
2232  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2233  LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
2234
2235  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2236  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2237  LValue EmitStmtExprLValue(const StmtExpr *E);
2238  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2239  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2240  void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2241
2242  //===--------------------------------------------------------------------===//
2243  //                         Scalar Expression Emission
2244  //===--------------------------------------------------------------------===//
2245
2246  /// EmitCall - Generate a call of the given function, expecting the given
2247  /// result type, and using the given argument list which specifies both the
2248  /// LLVM arguments and the types they were derived from.
2249  ///
2250  /// \param TargetDecl - If given, the decl of the function in a direct call;
2251  /// used to set attributes on the call (noreturn, etc.).
2252  RValue EmitCall(const CGFunctionInfo &FnInfo,
2253                  llvm::Value *Callee,
2254                  ReturnValueSlot ReturnValue,
2255                  const CallArgList &Args,
2256                  const Decl *TargetDecl = 0,
2257                  llvm::Instruction **callOrInvoke = 0);
2258
2259  RValue EmitCall(QualType FnType, llvm::Value *Callee,
2260                  ReturnValueSlot ReturnValue,
2261                  CallExpr::const_arg_iterator ArgBeg,
2262                  CallExpr::const_arg_iterator ArgEnd,
2263                  const Decl *TargetDecl = 0);
2264  RValue EmitCallExpr(const CallExpr *E,
2265                      ReturnValueSlot ReturnValue = ReturnValueSlot());
2266
2267  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2268                                  ArrayRef<llvm::Value *> Args,
2269                                  const Twine &Name = "");
2270  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2271                                  const Twine &Name = "");
2272
2273  llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2274                                llvm::Type *Ty);
2275  llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2276                                llvm::Value *This, llvm::Type *Ty);
2277  llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2278                                         NestedNameSpecifier *Qual,
2279                                         llvm::Type *Ty);
2280
2281  llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2282                                                   CXXDtorType Type,
2283                                                   const CXXRecordDecl *RD);
2284
2285  RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2286                           SourceLocation CallLoc,
2287                           llvm::Value *Callee,
2288                           ReturnValueSlot ReturnValue,
2289                           llvm::Value *This,
2290                           llvm::Value *VTT,
2291                           CallExpr::const_arg_iterator ArgBeg,
2292                           CallExpr::const_arg_iterator ArgEnd);
2293  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2294                               ReturnValueSlot ReturnValue);
2295  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2296                                      ReturnValueSlot ReturnValue);
2297
2298  llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2299                                           const CXXMethodDecl *MD,
2300                                           llvm::Value *This);
2301  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2302                                       const CXXMethodDecl *MD,
2303                                       ReturnValueSlot ReturnValue);
2304
2305  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2306                                ReturnValueSlot ReturnValue);
2307
2308
2309  RValue EmitBuiltinExpr(const FunctionDecl *FD,
2310                         unsigned BuiltinID, const CallExpr *E);
2311
2312  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2313
2314  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2315  /// is unhandled by the current target.
2316  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2317
2318  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2319  llvm::Value *EmitNeonCall(llvm::Function *F,
2320                            SmallVectorImpl<llvm::Value*> &O,
2321                            const char *name,
2322                            unsigned shift = 0, bool rightshift = false);
2323  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2324  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2325                                   bool negateForRightShift);
2326
2327  llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2328  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2329  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2330
2331  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2332  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2333  llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
2334  llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
2335  llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2336  llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
2337                                const ObjCMethodDecl *MethodWithObjects);
2338  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2339  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2340                             ReturnValueSlot Return = ReturnValueSlot());
2341
2342  /// Retrieves the default cleanup kind for an ARC cleanup.
2343  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2344  CleanupKind getARCCleanupKind() {
2345    return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2346             ? NormalAndEHCleanup : NormalCleanup;
2347  }
2348
2349  // ARC primitives.
2350  void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2351  void EmitARCDestroyWeak(llvm::Value *addr);
2352  llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2353  llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2354  llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2355                                bool ignored);
2356  void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2357  void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2358  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2359  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2360  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2361                                  bool ignored);
2362  llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2363                                      bool ignored);
2364  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2365  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2366  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2367  void EmitARCDestroyStrong(llvm::Value *addr, bool precise);
2368  void EmitARCRelease(llvm::Value *value, bool precise);
2369  llvm::Value *EmitARCAutorelease(llvm::Value *value);
2370  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2371  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2372  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2373
2374  std::pair<LValue,llvm::Value*>
2375  EmitARCStoreAutoreleasing(const BinaryOperator *e);
2376  std::pair<LValue,llvm::Value*>
2377  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2378
2379  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2380
2381  llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2382  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2383  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2384
2385  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2386  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2387  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2388
2389  static Destroyer destroyARCStrongImprecise;
2390  static Destroyer destroyARCStrongPrecise;
2391  static Destroyer destroyARCWeak;
2392
2393  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2394  llvm::Value *EmitObjCAutoreleasePoolPush();
2395  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2396  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2397  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2398
2399  /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2400  /// expression. Will emit a temporary variable if E is not an LValue.
2401  RValue EmitReferenceBindingToExpr(const Expr* E,
2402                                    const NamedDecl *InitializedDecl);
2403
2404  //===--------------------------------------------------------------------===//
2405  //                           Expression Emission
2406  //===--------------------------------------------------------------------===//
2407
2408  // Expressions are broken into three classes: scalar, complex, aggregate.
2409
2410  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2411  /// scalar type, returning the result.
2412  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2413
2414  /// EmitScalarConversion - Emit a conversion from the specified type to the
2415  /// specified destination type, both of which are LLVM scalar types.
2416  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2417                                    QualType DstTy);
2418
2419  /// EmitComplexToScalarConversion - Emit a conversion from the specified
2420  /// complex type to the specified destination type, where the destination type
2421  /// is an LLVM scalar type.
2422  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2423                                             QualType DstTy);
2424
2425
2426  /// EmitAggExpr - Emit the computation of the specified expression
2427  /// of aggregate type.  The result is computed into the given slot,
2428  /// which may be null to indicate that the value is not needed.
2429  void EmitAggExpr(const Expr *E, AggValueSlot AS);
2430
2431  /// EmitAggExprToLValue - Emit the computation of the specified expression of
2432  /// aggregate type into a temporary LValue.
2433  LValue EmitAggExprToLValue(const Expr *E);
2434
2435  /// EmitGCMemmoveCollectable - Emit special API for structs with object
2436  /// pointers.
2437  void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2438                                QualType Ty);
2439
2440  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2441  /// make sure it survives garbage collection until this point.
2442  void EmitExtendGCLifetime(llvm::Value *object);
2443
2444  /// EmitComplexExpr - Emit the computation of the specified expression of
2445  /// complex type, returning the result.
2446  ComplexPairTy EmitComplexExpr(const Expr *E,
2447                                bool IgnoreReal = false,
2448                                bool IgnoreImag = false);
2449
2450  /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2451  /// of complex type, storing into the specified Value*.
2452  void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2453                               bool DestIsVolatile);
2454
2455  /// StoreComplexToAddr - Store a complex number into the specified address.
2456  void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2457                          bool DestIsVolatile);
2458  /// LoadComplexFromAddr - Load a complex number from the specified address.
2459  ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2460
2461  /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2462  /// a static local variable.
2463  llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2464                                            const char *Separator,
2465                                       llvm::GlobalValue::LinkageTypes Linkage);
2466
2467  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2468  /// global variable that has already been created for it.  If the initializer
2469  /// has a different type than GV does, this may free GV and return a different
2470  /// one.  Otherwise it just returns GV.
2471  llvm::GlobalVariable *
2472  AddInitializerToStaticVarDecl(const VarDecl &D,
2473                                llvm::GlobalVariable *GV);
2474
2475
2476  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2477  /// variable with global storage.
2478  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2479                                bool PerformInit);
2480
2481  /// Call atexit() with a function that passes the given argument to
2482  /// the given function.
2483  void registerGlobalDtorWithAtExit(llvm::Constant *fn, llvm::Constant *addr);
2484
2485  /// Emit code in this function to perform a guarded variable
2486  /// initialization.  Guarded initializations are used when it's not
2487  /// possible to prove that an initialization will be done exactly
2488  /// once, e.g. with a static local variable or a static data member
2489  /// of a class template.
2490  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2491                          bool PerformInit);
2492
2493  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2494  /// variables.
2495  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2496                                 llvm::Constant **Decls,
2497                                 unsigned NumDecls);
2498
2499  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
2500  /// variables.
2501  void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
2502                                  const std::vector<std::pair<llvm::WeakVH,
2503                                  llvm::Constant*> > &DtorsAndObjects);
2504
2505  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2506                                        const VarDecl *D,
2507                                        llvm::GlobalVariable *Addr,
2508                                        bool PerformInit);
2509
2510  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2511
2512  void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2513                                  const Expr *Exp);
2514
2515  void enterFullExpression(const ExprWithCleanups *E) {
2516    if (E->getNumObjects() == 0) return;
2517    enterNonTrivialFullExpression(E);
2518  }
2519  void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2520
2521  void EmitCXXThrowExpr(const CXXThrowExpr *E);
2522
2523  void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2524
2525  RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2526
2527  //===--------------------------------------------------------------------===//
2528  //                         Annotations Emission
2529  //===--------------------------------------------------------------------===//
2530
2531  /// Emit an annotation call (intrinsic or builtin).
2532  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2533                                  llvm::Value *AnnotatedVal,
2534                                  StringRef AnnotationStr,
2535                                  SourceLocation Location);
2536
2537  /// Emit local annotations for the local variable V, declared by D.
2538  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2539
2540  /// Emit field annotations for the given field & value. Returns the
2541  /// annotation result.
2542  llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2543
2544  //===--------------------------------------------------------------------===//
2545  //                             Internal Helpers
2546  //===--------------------------------------------------------------------===//
2547
2548  /// ContainsLabel - Return true if the statement contains a label in it.  If
2549  /// this statement is not executed normally, it not containing a label means
2550  /// that we can just remove the code.
2551  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2552
2553  /// containsBreak - Return true if the statement contains a break out of it.
2554  /// If the statement (recursively) contains a switch or loop with a break
2555  /// inside of it, this is fine.
2556  static bool containsBreak(const Stmt *S);
2557
2558  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2559  /// to a constant, or if it does but contains a label, return false.  If it
2560  /// constant folds return true and set the boolean result in Result.
2561  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2562
2563  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2564  /// to a constant, or if it does but contains a label, return false.  If it
2565  /// constant folds return true and set the folded value.
2566  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
2567
2568  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2569  /// if statement) to the specified blocks.  Based on the condition, this might
2570  /// try to simplify the codegen of the conditional based on the branch.
2571  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2572                            llvm::BasicBlock *FalseBlock);
2573
2574  /// \brief Emit a description of a type in a format suitable for passing to
2575  /// a runtime sanitizer handler.
2576  llvm::Constant *EmitCheckTypeDescriptor(QualType T);
2577
2578  /// \brief Convert a value into a format suitable for passing to a runtime
2579  /// sanitizer handler.
2580  llvm::Value *EmitCheckValue(llvm::Value *V);
2581
2582  /// \brief Emit a description of a source location in a format suitable for
2583  /// passing to a runtime sanitizer handler.
2584  llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
2585
2586  /// \brief Specify under what conditions this check can be recovered
2587  enum CheckRecoverableKind {
2588    /// Always terminate program execution if this check fails
2589    CRK_Unrecoverable,
2590    /// Check supports recovering, allows user to specify which
2591    CRK_Recoverable,
2592    /// Runtime conditionally aborts, always need to support recovery.
2593    CRK_AlwaysRecoverable
2594  };
2595
2596  /// \brief Create a basic block that will call a handler function in a
2597  /// sanitizer runtime with the provided arguments, and create a conditional
2598  /// branch to it.
2599  void EmitCheck(llvm::Value *Checked, StringRef CheckName,
2600                 ArrayRef<llvm::Constant *> StaticArgs,
2601                 ArrayRef<llvm::Value *> DynamicArgs,
2602                 CheckRecoverableKind Recoverable);
2603
2604  /// \brief Create a basic block that will call the trap intrinsic, and emit a
2605  /// conditional branch to it, for the -ftrapv checks.
2606  void EmitTrapvCheck(llvm::Value *Checked);
2607
2608  /// EmitCallArg - Emit a single call argument.
2609  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2610
2611  /// EmitDelegateCallArg - We are performing a delegate call; that
2612  /// is, the current function is delegating to another one.  Produce
2613  /// a r-value suitable for passing the given parameter.
2614  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2615
2616  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2617  /// point operation, expressed as the maximum relative error in ulp.
2618  void SetFPAccuracy(llvm::Value *Val, float Accuracy);
2619
2620private:
2621  llvm::MDNode *getRangeForLoadFromType(QualType Ty);
2622  void EmitReturnOfRValue(RValue RV, QualType Ty);
2623
2624  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2625  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2626  ///
2627  /// \param AI - The first function argument of the expansion.
2628  /// \return The argument following the last expanded function
2629  /// argument.
2630  llvm::Function::arg_iterator
2631  ExpandTypeFromArgs(QualType Ty, LValue Dst,
2632                     llvm::Function::arg_iterator AI);
2633
2634  /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2635  /// Ty, into individual arguments on the provided vector \arg Args. See
2636  /// ABIArgInfo::Expand.
2637  void ExpandTypeToArgs(QualType Ty, RValue Src,
2638                        SmallVector<llvm::Value*, 16> &Args,
2639                        llvm::FunctionType *IRFuncTy);
2640
2641  llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2642                            const Expr *InputExpr, std::string &ConstraintStr);
2643
2644  llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2645                                  LValue InputValue, QualType InputType,
2646                                  std::string &ConstraintStr);
2647
2648  /// EmitCallArgs - Emit call arguments for a function.
2649  /// The CallArgTypeInfo parameter is used for iterating over the known
2650  /// argument types of the function being called.
2651  template<typename T>
2652  void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2653                    CallExpr::const_arg_iterator ArgBeg,
2654                    CallExpr::const_arg_iterator ArgEnd) {
2655      CallExpr::const_arg_iterator Arg = ArgBeg;
2656
2657    // First, use the argument types that the type info knows about
2658    if (CallArgTypeInfo) {
2659      for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2660           E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2661        assert(Arg != ArgEnd && "Running over edge of argument list!");
2662        QualType ArgType = *I;
2663#ifndef NDEBUG
2664        QualType ActualArgType = Arg->getType();
2665        if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2666          QualType ActualBaseType =
2667            ActualArgType->getAs<PointerType>()->getPointeeType();
2668          QualType ArgBaseType =
2669            ArgType->getAs<PointerType>()->getPointeeType();
2670          if (ArgBaseType->isVariableArrayType()) {
2671            if (const VariableArrayType *VAT =
2672                getContext().getAsVariableArrayType(ActualBaseType)) {
2673              if (!VAT->getSizeExpr())
2674                ActualArgType = ArgType;
2675            }
2676          }
2677        }
2678        assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2679               getTypePtr() ==
2680               getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2681               "type mismatch in call argument!");
2682#endif
2683        EmitCallArg(Args, *Arg, ArgType);
2684      }
2685
2686      // Either we've emitted all the call args, or we have a call to a
2687      // variadic function.
2688      assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2689             "Extra arguments in non-variadic function!");
2690
2691    }
2692
2693    // If we still have any arguments, emit them using the type of the argument.
2694    for (; Arg != ArgEnd; ++Arg)
2695      EmitCallArg(Args, *Arg, Arg->getType());
2696  }
2697
2698  const TargetCodeGenInfo &getTargetHooks() const {
2699    return CGM.getTargetCodeGenInfo();
2700  }
2701
2702  void EmitDeclMetadata();
2703
2704  CodeGenModule::ByrefHelpers *
2705  buildByrefHelpers(llvm::StructType &byrefType,
2706                    const AutoVarEmission &emission);
2707
2708  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2709
2710  /// GetPointeeAlignment - Given an expression with a pointer type, emit the
2711  /// value and compute our best estimate of the alignment of the pointee.
2712  std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
2713};
2714
2715/// Helper class with most of the code for saving a value for a
2716/// conditional expression cleanup.
2717struct DominatingLLVMValue {
2718  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2719
2720  /// Answer whether the given value needs extra work to be saved.
2721  static bool needsSaving(llvm::Value *value) {
2722    // If it's not an instruction, we don't need to save.
2723    if (!isa<llvm::Instruction>(value)) return false;
2724
2725    // If it's an instruction in the entry block, we don't need to save.
2726    llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2727    return (block != &block->getParent()->getEntryBlock());
2728  }
2729
2730  /// Try to save the given value.
2731  static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2732    if (!needsSaving(value)) return saved_type(value, false);
2733
2734    // Otherwise we need an alloca.
2735    llvm::Value *alloca =
2736      CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2737    CGF.Builder.CreateStore(value, alloca);
2738
2739    return saved_type(alloca, true);
2740  }
2741
2742  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2743    if (!value.getInt()) return value.getPointer();
2744    return CGF.Builder.CreateLoad(value.getPointer());
2745  }
2746};
2747
2748/// A partial specialization of DominatingValue for llvm::Values that
2749/// might be llvm::Instructions.
2750template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2751  typedef T *type;
2752  static type restore(CodeGenFunction &CGF, saved_type value) {
2753    return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2754  }
2755};
2756
2757/// A specialization of DominatingValue for RValue.
2758template <> struct DominatingValue<RValue> {
2759  typedef RValue type;
2760  class saved_type {
2761    enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2762                AggregateAddress, ComplexAddress };
2763
2764    llvm::Value *Value;
2765    Kind K;
2766    saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2767
2768  public:
2769    static bool needsSaving(RValue value);
2770    static saved_type save(CodeGenFunction &CGF, RValue value);
2771    RValue restore(CodeGenFunction &CGF);
2772
2773    // implementations in CGExprCXX.cpp
2774  };
2775
2776  static bool needsSaving(type value) {
2777    return saved_type::needsSaving(value);
2778  }
2779  static saved_type save(CodeGenFunction &CGF, type value) {
2780    return saved_type::save(CGF, value);
2781  }
2782  static type restore(CodeGenFunction &CGF, saved_type value) {
2783    return value.restore(CGF);
2784  }
2785};
2786
2787}  // end namespace CodeGen
2788}  // end namespace clang
2789
2790#endif
2791