CodeGenFunction.h revision 21f6ed94b929beea31622f5e6b3890e51293cfad
1//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the internal per-function state used for llvm translation.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15#define CLANG_CODEGEN_CODEGENFUNCTION_H
16
17#include "clang/AST/Type.h"
18#include "clang/AST/ExprCXX.h"
19#include "clang/AST/ExprObjC.h"
20#include "clang/AST/CharUnits.h"
21#include "clang/Frontend/CodeGenOptions.h"
22#include "clang/Basic/ABI.h"
23#include "clang/Basic/TargetInfo.h"
24#include "llvm/ADT/ArrayRef.h"
25#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/Support/ValueHandle.h"
28#include "llvm/Support/Debug.h"
29#include "CodeGenModule.h"
30#include "CGBuilder.h"
31#include "CGDebugInfo.h"
32#include "CGValue.h"
33
34namespace llvm {
35  class BasicBlock;
36  class LLVMContext;
37  class MDNode;
38  class Module;
39  class SwitchInst;
40  class Twine;
41  class Value;
42  class CallSite;
43}
44
45namespace clang {
46  class ASTContext;
47  class BlockDecl;
48  class CXXDestructorDecl;
49  class CXXForRangeStmt;
50  class CXXTryStmt;
51  class Decl;
52  class LabelDecl;
53  class EnumConstantDecl;
54  class FunctionDecl;
55  class FunctionProtoType;
56  class LabelStmt;
57  class ObjCContainerDecl;
58  class ObjCInterfaceDecl;
59  class ObjCIvarDecl;
60  class ObjCMethodDecl;
61  class ObjCImplementationDecl;
62  class ObjCPropertyImplDecl;
63  class TargetInfo;
64  class TargetCodeGenInfo;
65  class VarDecl;
66  class ObjCForCollectionStmt;
67  class ObjCAtTryStmt;
68  class ObjCAtThrowStmt;
69  class ObjCAtSynchronizedStmt;
70  class ObjCAutoreleasePoolStmt;
71
72namespace CodeGen {
73  class CodeGenTypes;
74  class CGFunctionInfo;
75  class CGRecordLayout;
76  class CGBlockInfo;
77  class CGCXXABI;
78  class BlockFlags;
79  class BlockFieldFlags;
80
81/// A branch fixup.  These are required when emitting a goto to a
82/// label which hasn't been emitted yet.  The goto is optimistically
83/// emitted as a branch to the basic block for the label, and (if it
84/// occurs in a scope with non-trivial cleanups) a fixup is added to
85/// the innermost cleanup.  When a (normal) cleanup is popped, any
86/// unresolved fixups in that scope are threaded through the cleanup.
87struct BranchFixup {
88  /// The block containing the terminator which needs to be modified
89  /// into a switch if this fixup is resolved into the current scope.
90  /// If null, LatestBranch points directly to the destination.
91  llvm::BasicBlock *OptimisticBranchBlock;
92
93  /// The ultimate destination of the branch.
94  ///
95  /// This can be set to null to indicate that this fixup was
96  /// successfully resolved.
97  llvm::BasicBlock *Destination;
98
99  /// The destination index value.
100  unsigned DestinationIndex;
101
102  /// The initial branch of the fixup.
103  llvm::BranchInst *InitialBranch;
104};
105
106template <class T> struct InvariantValue {
107  typedef T type;
108  typedef T saved_type;
109  static bool needsSaving(type value) { return false; }
110  static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111  static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112};
113
114/// A metaprogramming class for ensuring that a value will dominate an
115/// arbitrary position in a function.
116template <class T> struct DominatingValue : InvariantValue<T> {};
117
118template <class T, bool mightBeInstruction =
119            llvm::is_base_of<llvm::Value, T>::value &&
120            !llvm::is_base_of<llvm::Constant, T>::value &&
121            !llvm::is_base_of<llvm::BasicBlock, T>::value>
122struct DominatingPointer;
123template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124// template <class T> struct DominatingPointer<T,true> at end of file
125
126template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127
128enum CleanupKind {
129  EHCleanup = 0x1,
130  NormalCleanup = 0x2,
131  NormalAndEHCleanup = EHCleanup | NormalCleanup,
132
133  InactiveCleanup = 0x4,
134  InactiveEHCleanup = EHCleanup | InactiveCleanup,
135  InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136  InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137};
138
139/// A stack of scopes which respond to exceptions, including cleanups
140/// and catch blocks.
141class EHScopeStack {
142public:
143  /// A saved depth on the scope stack.  This is necessary because
144  /// pushing scopes onto the stack invalidates iterators.
145  class stable_iterator {
146    friend class EHScopeStack;
147
148    /// Offset from StartOfData to EndOfBuffer.
149    ptrdiff_t Size;
150
151    stable_iterator(ptrdiff_t Size) : Size(Size) {}
152
153  public:
154    static stable_iterator invalid() { return stable_iterator(-1); }
155    stable_iterator() : Size(-1) {}
156
157    bool isValid() const { return Size >= 0; }
158
159    /// Returns true if this scope encloses I.
160    /// Returns false if I is invalid.
161    /// This scope must be valid.
162    bool encloses(stable_iterator I) const { return Size <= I.Size; }
163
164    /// Returns true if this scope strictly encloses I: that is,
165    /// if it encloses I and is not I.
166    /// Returns false is I is invalid.
167    /// This scope must be valid.
168    bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169
170    friend bool operator==(stable_iterator A, stable_iterator B) {
171      return A.Size == B.Size;
172    }
173    friend bool operator!=(stable_iterator A, stable_iterator B) {
174      return A.Size != B.Size;
175    }
176  };
177
178  /// Information for lazily generating a cleanup.  Subclasses must be
179  /// POD-like: cleanups will not be destructed, and they will be
180  /// allocated on the cleanup stack and freely copied and moved
181  /// around.
182  ///
183  /// Cleanup implementations should generally be declared in an
184  /// anonymous namespace.
185  class Cleanup {
186    // Anchor the construction vtable.
187    virtual void anchor();
188  public:
189    /// Generation flags.
190    class Flags {
191      enum {
192        F_IsForEH             = 0x1,
193        F_IsNormalCleanupKind = 0x2,
194        F_IsEHCleanupKind     = 0x4
195      };
196      unsigned flags;
197
198    public:
199      Flags() : flags(0) {}
200
201      /// isForEH - true if the current emission is for an EH cleanup.
202      bool isForEHCleanup() const { return flags & F_IsForEH; }
203      bool isForNormalCleanup() const { return !isForEHCleanup(); }
204      void setIsForEHCleanup() { flags |= F_IsForEH; }
205
206      bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207      void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208
209      /// isEHCleanupKind - true if the cleanup was pushed as an EH
210      /// cleanup.
211      bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212      void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213    };
214
215    // Provide a virtual destructor to suppress a very common warning
216    // that unfortunately cannot be suppressed without this.  Cleanups
217    // should not rely on this destructor ever being called.
218    virtual ~Cleanup() {}
219
220    /// Emit the cleanup.  For normal cleanups, this is run in the
221    /// same EH context as when the cleanup was pushed, i.e. the
222    /// immediately-enclosing context of the cleanup scope.  For
223    /// EH cleanups, this is run in a terminate context.
224    ///
225    // \param IsForEHCleanup true if this is for an EH cleanup, false
226    ///  if for a normal cleanup.
227    virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
228  };
229
230  /// ConditionalCleanupN stores the saved form of its N parameters,
231  /// then restores them and performs the cleanup.
232  template <class T, class A0>
233  class ConditionalCleanup1 : public Cleanup {
234    typedef typename DominatingValue<A0>::saved_type A0_saved;
235    A0_saved a0_saved;
236
237    void Emit(CodeGenFunction &CGF, Flags flags) {
238      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
239      T(a0).Emit(CGF, flags);
240    }
241
242  public:
243    ConditionalCleanup1(A0_saved a0)
244      : a0_saved(a0) {}
245  };
246
247  template <class T, class A0, class A1>
248  class ConditionalCleanup2 : public Cleanup {
249    typedef typename DominatingValue<A0>::saved_type A0_saved;
250    typedef typename DominatingValue<A1>::saved_type A1_saved;
251    A0_saved a0_saved;
252    A1_saved a1_saved;
253
254    void Emit(CodeGenFunction &CGF, Flags flags) {
255      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
256      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
257      T(a0, a1).Emit(CGF, flags);
258    }
259
260  public:
261    ConditionalCleanup2(A0_saved a0, A1_saved a1)
262      : a0_saved(a0), a1_saved(a1) {}
263  };
264
265  template <class T, class A0, class A1, class A2>
266  class ConditionalCleanup3 : public Cleanup {
267    typedef typename DominatingValue<A0>::saved_type A0_saved;
268    typedef typename DominatingValue<A1>::saved_type A1_saved;
269    typedef typename DominatingValue<A2>::saved_type A2_saved;
270    A0_saved a0_saved;
271    A1_saved a1_saved;
272    A2_saved a2_saved;
273
274    void Emit(CodeGenFunction &CGF, Flags flags) {
275      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
276      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
277      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
278      T(a0, a1, a2).Emit(CGF, flags);
279    }
280
281  public:
282    ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
283      : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
284  };
285
286  template <class T, class A0, class A1, class A2, class A3>
287  class ConditionalCleanup4 : public Cleanup {
288    typedef typename DominatingValue<A0>::saved_type A0_saved;
289    typedef typename DominatingValue<A1>::saved_type A1_saved;
290    typedef typename DominatingValue<A2>::saved_type A2_saved;
291    typedef typename DominatingValue<A3>::saved_type A3_saved;
292    A0_saved a0_saved;
293    A1_saved a1_saved;
294    A2_saved a2_saved;
295    A3_saved a3_saved;
296
297    void Emit(CodeGenFunction &CGF, Flags flags) {
298      A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
299      A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
300      A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
301      A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
302      T(a0, a1, a2, a3).Emit(CGF, flags);
303    }
304
305  public:
306    ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
307      : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
308  };
309
310private:
311  // The implementation for this class is in CGException.h and
312  // CGException.cpp; the definition is here because it's used as a
313  // member of CodeGenFunction.
314
315  /// The start of the scope-stack buffer, i.e. the allocated pointer
316  /// for the buffer.  All of these pointers are either simultaneously
317  /// null or simultaneously valid.
318  char *StartOfBuffer;
319
320  /// The end of the buffer.
321  char *EndOfBuffer;
322
323  /// The first valid entry in the buffer.
324  char *StartOfData;
325
326  /// The innermost normal cleanup on the stack.
327  stable_iterator InnermostNormalCleanup;
328
329  /// The innermost EH scope on the stack.
330  stable_iterator InnermostEHScope;
331
332  /// The current set of branch fixups.  A branch fixup is a jump to
333  /// an as-yet unemitted label, i.e. a label for which we don't yet
334  /// know the EH stack depth.  Whenever we pop a cleanup, we have
335  /// to thread all the current branch fixups through it.
336  ///
337  /// Fixups are recorded as the Use of the respective branch or
338  /// switch statement.  The use points to the final destination.
339  /// When popping out of a cleanup, these uses are threaded through
340  /// the cleanup and adjusted to point to the new cleanup.
341  ///
342  /// Note that branches are allowed to jump into protected scopes
343  /// in certain situations;  e.g. the following code is legal:
344  ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
345  ///     goto foo;
346  ///     A a;
347  ///    foo:
348  ///     bar();
349  SmallVector<BranchFixup, 8> BranchFixups;
350
351  char *allocate(size_t Size);
352
353  void *pushCleanup(CleanupKind K, size_t DataSize);
354
355public:
356  EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
357                   InnermostNormalCleanup(stable_end()),
358                   InnermostEHScope(stable_end()) {}
359  ~EHScopeStack() { delete[] StartOfBuffer; }
360
361  // Variadic templates would make this not terrible.
362
363  /// Push a lazily-created cleanup on the stack.
364  template <class T>
365  void pushCleanup(CleanupKind Kind) {
366    void *Buffer = pushCleanup(Kind, sizeof(T));
367    Cleanup *Obj = new(Buffer) T();
368    (void) Obj;
369  }
370
371  /// Push a lazily-created cleanup on the stack.
372  template <class T, class A0>
373  void pushCleanup(CleanupKind Kind, A0 a0) {
374    void *Buffer = pushCleanup(Kind, sizeof(T));
375    Cleanup *Obj = new(Buffer) T(a0);
376    (void) Obj;
377  }
378
379  /// Push a lazily-created cleanup on the stack.
380  template <class T, class A0, class A1>
381  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
382    void *Buffer = pushCleanup(Kind, sizeof(T));
383    Cleanup *Obj = new(Buffer) T(a0, a1);
384    (void) Obj;
385  }
386
387  /// Push a lazily-created cleanup on the stack.
388  template <class T, class A0, class A1, class A2>
389  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
390    void *Buffer = pushCleanup(Kind, sizeof(T));
391    Cleanup *Obj = new(Buffer) T(a0, a1, a2);
392    (void) Obj;
393  }
394
395  /// Push a lazily-created cleanup on the stack.
396  template <class T, class A0, class A1, class A2, class A3>
397  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
398    void *Buffer = pushCleanup(Kind, sizeof(T));
399    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
400    (void) Obj;
401  }
402
403  /// Push a lazily-created cleanup on the stack.
404  template <class T, class A0, class A1, class A2, class A3, class A4>
405  void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
406    void *Buffer = pushCleanup(Kind, sizeof(T));
407    Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
408    (void) Obj;
409  }
410
411  // Feel free to add more variants of the following:
412
413  /// Push a cleanup with non-constant storage requirements on the
414  /// stack.  The cleanup type must provide an additional static method:
415  ///   static size_t getExtraSize(size_t);
416  /// The argument to this method will be the value N, which will also
417  /// be passed as the first argument to the constructor.
418  ///
419  /// The data stored in the extra storage must obey the same
420  /// restrictions as normal cleanup member data.
421  ///
422  /// The pointer returned from this method is valid until the cleanup
423  /// stack is modified.
424  template <class T, class A0, class A1, class A2>
425  T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
426    void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
427    return new (Buffer) T(N, a0, a1, a2);
428  }
429
430  /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
431  void popCleanup();
432
433  /// Push a set of catch handlers on the stack.  The catch is
434  /// uninitialized and will need to have the given number of handlers
435  /// set on it.
436  class EHCatchScope *pushCatch(unsigned NumHandlers);
437
438  /// Pops a catch scope off the stack.  This is private to CGException.cpp.
439  void popCatch();
440
441  /// Push an exceptions filter on the stack.
442  class EHFilterScope *pushFilter(unsigned NumFilters);
443
444  /// Pops an exceptions filter off the stack.
445  void popFilter();
446
447  /// Push a terminate handler on the stack.
448  void pushTerminate();
449
450  /// Pops a terminate handler off the stack.
451  void popTerminate();
452
453  /// Determines whether the exception-scopes stack is empty.
454  bool empty() const { return StartOfData == EndOfBuffer; }
455
456  bool requiresLandingPad() const {
457    return InnermostEHScope != stable_end();
458  }
459
460  /// Determines whether there are any normal cleanups on the stack.
461  bool hasNormalCleanups() const {
462    return InnermostNormalCleanup != stable_end();
463  }
464
465  /// Returns the innermost normal cleanup on the stack, or
466  /// stable_end() if there are no normal cleanups.
467  stable_iterator getInnermostNormalCleanup() const {
468    return InnermostNormalCleanup;
469  }
470  stable_iterator getInnermostActiveNormalCleanup() const;
471
472  stable_iterator getInnermostEHScope() const {
473    return InnermostEHScope;
474  }
475
476  stable_iterator getInnermostActiveEHScope() const;
477
478  /// An unstable reference to a scope-stack depth.  Invalidated by
479  /// pushes but not pops.
480  class iterator;
481
482  /// Returns an iterator pointing to the innermost EH scope.
483  iterator begin() const;
484
485  /// Returns an iterator pointing to the outermost EH scope.
486  iterator end() const;
487
488  /// Create a stable reference to the top of the EH stack.  The
489  /// returned reference is valid until that scope is popped off the
490  /// stack.
491  stable_iterator stable_begin() const {
492    return stable_iterator(EndOfBuffer - StartOfData);
493  }
494
495  /// Create a stable reference to the bottom of the EH stack.
496  static stable_iterator stable_end() {
497    return stable_iterator(0);
498  }
499
500  /// Translates an iterator into a stable_iterator.
501  stable_iterator stabilize(iterator it) const;
502
503  /// Turn a stable reference to a scope depth into a unstable pointer
504  /// to the EH stack.
505  iterator find(stable_iterator save) const;
506
507  /// Removes the cleanup pointed to by the given stable_iterator.
508  void removeCleanup(stable_iterator save);
509
510  /// Add a branch fixup to the current cleanup scope.
511  BranchFixup &addBranchFixup() {
512    assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
513    BranchFixups.push_back(BranchFixup());
514    return BranchFixups.back();
515  }
516
517  unsigned getNumBranchFixups() const { return BranchFixups.size(); }
518  BranchFixup &getBranchFixup(unsigned I) {
519    assert(I < getNumBranchFixups());
520    return BranchFixups[I];
521  }
522
523  /// Pops lazily-removed fixups from the end of the list.  This
524  /// should only be called by procedures which have just popped a
525  /// cleanup or resolved one or more fixups.
526  void popNullFixups();
527
528  /// Clears the branch-fixups list.  This should only be called by
529  /// ResolveAllBranchFixups.
530  void clearFixups() { BranchFixups.clear(); }
531};
532
533/// CodeGenFunction - This class organizes the per-function state that is used
534/// while generating LLVM code.
535class CodeGenFunction : public CodeGenTypeCache {
536  CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
537  void operator=(const CodeGenFunction&);  // DO NOT IMPLEMENT
538
539  friend class CGCXXABI;
540public:
541  /// A jump destination is an abstract label, branching to which may
542  /// require a jump out through normal cleanups.
543  struct JumpDest {
544    JumpDest() : Block(0), ScopeDepth(), Index(0) {}
545    JumpDest(llvm::BasicBlock *Block,
546             EHScopeStack::stable_iterator Depth,
547             unsigned Index)
548      : Block(Block), ScopeDepth(Depth), Index(Index) {}
549
550    bool isValid() const { return Block != 0; }
551    llvm::BasicBlock *getBlock() const { return Block; }
552    EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
553    unsigned getDestIndex() const { return Index; }
554
555  private:
556    llvm::BasicBlock *Block;
557    EHScopeStack::stable_iterator ScopeDepth;
558    unsigned Index;
559  };
560
561  CodeGenModule &CGM;  // Per-module state.
562  const TargetInfo &Target;
563
564  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
565  CGBuilderTy Builder;
566
567  /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
568  /// This excludes BlockDecls.
569  const Decl *CurFuncDecl;
570  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
571  const Decl *CurCodeDecl;
572  const CGFunctionInfo *CurFnInfo;
573  QualType FnRetTy;
574  llvm::Function *CurFn;
575
576  /// CurGD - The GlobalDecl for the current function being compiled.
577  GlobalDecl CurGD;
578
579  /// PrologueCleanupDepth - The cleanup depth enclosing all the
580  /// cleanups associated with the parameters.
581  EHScopeStack::stable_iterator PrologueCleanupDepth;
582
583  /// ReturnBlock - Unified return block.
584  JumpDest ReturnBlock;
585
586  /// ReturnValue - The temporary alloca to hold the return value. This is null
587  /// iff the function has no return value.
588  llvm::Value *ReturnValue;
589
590  /// AllocaInsertPoint - This is an instruction in the entry block before which
591  /// we prefer to insert allocas.
592  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
593
594  bool CatchUndefined;
595
596  /// In ARC, whether we should autorelease the return value.
597  bool AutoreleaseResult;
598
599  const CodeGen::CGBlockInfo *BlockInfo;
600  llvm::Value *BlockPointer;
601
602  llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
603  FieldDecl *LambdaThisCaptureField;
604
605  /// \brief A mapping from NRVO variables to the flags used to indicate
606  /// when the NRVO has been applied to this variable.
607  llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
608
609  EHScopeStack EHStack;
610
611  /// i32s containing the indexes of the cleanup destinations.
612  llvm::AllocaInst *NormalCleanupDest;
613
614  unsigned NextCleanupDestIndex;
615
616  /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
617  CGBlockInfo *FirstBlockInfo;
618
619  /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
620  llvm::BasicBlock *EHResumeBlock;
621
622  /// The exception slot.  All landing pads write the current exception pointer
623  /// into this alloca.
624  llvm::Value *ExceptionSlot;
625
626  /// The selector slot.  Under the MandatoryCleanup model, all landing pads
627  /// write the current selector value into this alloca.
628  llvm::AllocaInst *EHSelectorSlot;
629
630  /// Emits a landing pad for the current EH stack.
631  llvm::BasicBlock *EmitLandingPad();
632
633  llvm::BasicBlock *getInvokeDestImpl();
634
635  template <class T>
636  typename DominatingValue<T>::saved_type saveValueInCond(T value) {
637    return DominatingValue<T>::save(*this, value);
638  }
639
640public:
641  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
642  /// rethrows.
643  SmallVector<llvm::Value*, 8> ObjCEHValueStack;
644
645  /// A class controlling the emission of a finally block.
646  class FinallyInfo {
647    /// Where the catchall's edge through the cleanup should go.
648    JumpDest RethrowDest;
649
650    /// A function to call to enter the catch.
651    llvm::Constant *BeginCatchFn;
652
653    /// An i1 variable indicating whether or not the @finally is
654    /// running for an exception.
655    llvm::AllocaInst *ForEHVar;
656
657    /// An i8* variable into which the exception pointer to rethrow
658    /// has been saved.
659    llvm::AllocaInst *SavedExnVar;
660
661  public:
662    void enter(CodeGenFunction &CGF, const Stmt *Finally,
663               llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
664               llvm::Constant *rethrowFn);
665    void exit(CodeGenFunction &CGF);
666  };
667
668  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
669  /// current full-expression.  Safe against the possibility that
670  /// we're currently inside a conditionally-evaluated expression.
671  template <class T, class A0>
672  void pushFullExprCleanup(CleanupKind kind, A0 a0) {
673    // If we're not in a conditional branch, or if none of the
674    // arguments requires saving, then use the unconditional cleanup.
675    if (!isInConditionalBranch())
676      return EHStack.pushCleanup<T>(kind, a0);
677
678    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
679
680    typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
681    EHStack.pushCleanup<CleanupType>(kind, a0_saved);
682    initFullExprCleanup();
683  }
684
685  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
686  /// current full-expression.  Safe against the possibility that
687  /// we're currently inside a conditionally-evaluated expression.
688  template <class T, class A0, class A1>
689  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
690    // If we're not in a conditional branch, or if none of the
691    // arguments requires saving, then use the unconditional cleanup.
692    if (!isInConditionalBranch())
693      return EHStack.pushCleanup<T>(kind, a0, a1);
694
695    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
696    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
697
698    typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
699    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
700    initFullExprCleanup();
701  }
702
703  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
704  /// current full-expression.  Safe against the possibility that
705  /// we're currently inside a conditionally-evaluated expression.
706  template <class T, class A0, class A1, class A2>
707  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
708    // If we're not in a conditional branch, or if none of the
709    // arguments requires saving, then use the unconditional cleanup.
710    if (!isInConditionalBranch()) {
711      return EHStack.pushCleanup<T>(kind, a0, a1, a2);
712    }
713
714    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
715    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
716    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
717
718    typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
719    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
720    initFullExprCleanup();
721  }
722
723  /// pushFullExprCleanup - Push a cleanup to be run at the end of the
724  /// current full-expression.  Safe against the possibility that
725  /// we're currently inside a conditionally-evaluated expression.
726  template <class T, class A0, class A1, class A2, class A3>
727  void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
728    // If we're not in a conditional branch, or if none of the
729    // arguments requires saving, then use the unconditional cleanup.
730    if (!isInConditionalBranch()) {
731      return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
732    }
733
734    typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
735    typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
736    typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
737    typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
738
739    typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
740    EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
741                                     a2_saved, a3_saved);
742    initFullExprCleanup();
743  }
744
745  /// Set up the last cleaup that was pushed as a conditional
746  /// full-expression cleanup.
747  void initFullExprCleanup();
748
749  /// PushDestructorCleanup - Push a cleanup to call the
750  /// complete-object destructor of an object of the given type at the
751  /// given address.  Does nothing if T is not a C++ class type with a
752  /// non-trivial destructor.
753  void PushDestructorCleanup(QualType T, llvm::Value *Addr);
754
755  /// PushDestructorCleanup - Push a cleanup to call the
756  /// complete-object variant of the given destructor on the object at
757  /// the given address.
758  void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
759                             llvm::Value *Addr);
760
761  /// PopCleanupBlock - Will pop the cleanup entry on the stack and
762  /// process all branch fixups.
763  void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
764
765  /// DeactivateCleanupBlock - Deactivates the given cleanup block.
766  /// The block cannot be reactivated.  Pops it if it's the top of the
767  /// stack.
768  ///
769  /// \param DominatingIP - An instruction which is known to
770  ///   dominate the current IP (if set) and which lies along
771  ///   all paths of execution between the current IP and the
772  ///   the point at which the cleanup comes into scope.
773  void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
774                              llvm::Instruction *DominatingIP);
775
776  /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
777  /// Cannot be used to resurrect a deactivated cleanup.
778  ///
779  /// \param DominatingIP - An instruction which is known to
780  ///   dominate the current IP (if set) and which lies along
781  ///   all paths of execution between the current IP and the
782  ///   the point at which the cleanup comes into scope.
783  void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
784                            llvm::Instruction *DominatingIP);
785
786  /// \brief Enters a new scope for capturing cleanups, all of which
787  /// will be executed once the scope is exited.
788  class RunCleanupsScope {
789    EHScopeStack::stable_iterator CleanupStackDepth;
790    bool OldDidCallStackSave;
791    bool PerformCleanup;
792
793    RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
794    RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
795
796  protected:
797    CodeGenFunction& CGF;
798
799  public:
800    /// \brief Enter a new cleanup scope.
801    explicit RunCleanupsScope(CodeGenFunction &CGF)
802      : PerformCleanup(true), CGF(CGF)
803    {
804      CleanupStackDepth = CGF.EHStack.stable_begin();
805      OldDidCallStackSave = CGF.DidCallStackSave;
806      CGF.DidCallStackSave = false;
807    }
808
809    /// \brief Exit this cleanup scope, emitting any accumulated
810    /// cleanups.
811    ~RunCleanupsScope() {
812      if (PerformCleanup) {
813        CGF.DidCallStackSave = OldDidCallStackSave;
814        CGF.PopCleanupBlocks(CleanupStackDepth);
815      }
816    }
817
818    /// \brief Determine whether this scope requires any cleanups.
819    bool requiresCleanups() const {
820      return CGF.EHStack.stable_begin() != CleanupStackDepth;
821    }
822
823    /// \brief Force the emission of cleanups now, instead of waiting
824    /// until this object is destroyed.
825    void ForceCleanup() {
826      assert(PerformCleanup && "Already forced cleanup");
827      CGF.DidCallStackSave = OldDidCallStackSave;
828      CGF.PopCleanupBlocks(CleanupStackDepth);
829      PerformCleanup = false;
830    }
831  };
832
833  class LexicalScope: protected RunCleanupsScope {
834    SourceRange Range;
835    bool PopDebugStack;
836
837    LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
838    LexicalScope &operator=(const LexicalScope &);
839
840  public:
841    /// \brief Enter a new cleanup scope.
842    explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
843      : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
844      if (CGDebugInfo *DI = CGF.getDebugInfo())
845        DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
846    }
847
848    /// \brief Exit this cleanup scope, emitting any accumulated
849    /// cleanups.
850    ~LexicalScope() {
851      if (PopDebugStack) {
852        CGDebugInfo *DI = CGF.getDebugInfo();
853        if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
854      }
855    }
856
857    /// \brief Force the emission of cleanups now, instead of waiting
858    /// until this object is destroyed.
859    void ForceCleanup() {
860      RunCleanupsScope::ForceCleanup();
861      if (CGDebugInfo *DI = CGF.getDebugInfo()) {
862        DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
863        PopDebugStack = false;
864      }
865    }
866  };
867
868
869  /// PopCleanupBlocks - Takes the old cleanup stack size and emits
870  /// the cleanup blocks that have been added.
871  void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
872
873  void ResolveBranchFixups(llvm::BasicBlock *Target);
874
875  /// The given basic block lies in the current EH scope, but may be a
876  /// target of a potentially scope-crossing jump; get a stable handle
877  /// to which we can perform this jump later.
878  JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
879    return JumpDest(Target,
880                    EHStack.getInnermostNormalCleanup(),
881                    NextCleanupDestIndex++);
882  }
883
884  /// The given basic block lies in the current EH scope, but may be a
885  /// target of a potentially scope-crossing jump; get a stable handle
886  /// to which we can perform this jump later.
887  JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
888    return getJumpDestInCurrentScope(createBasicBlock(Name));
889  }
890
891  /// EmitBranchThroughCleanup - Emit a branch from the current insert
892  /// block through the normal cleanup handling code (if any) and then
893  /// on to \arg Dest.
894  void EmitBranchThroughCleanup(JumpDest Dest);
895
896  /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
897  /// specified destination obviously has no cleanups to run.  'false' is always
898  /// a conservatively correct answer for this method.
899  bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
900
901  /// popCatchScope - Pops the catch scope at the top of the EHScope
902  /// stack, emitting any required code (other than the catch handlers
903  /// themselves).
904  void popCatchScope();
905
906  llvm::BasicBlock *getEHResumeBlock();
907  llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
908
909  /// An object to manage conditionally-evaluated expressions.
910  class ConditionalEvaluation {
911    llvm::BasicBlock *StartBB;
912
913  public:
914    ConditionalEvaluation(CodeGenFunction &CGF)
915      : StartBB(CGF.Builder.GetInsertBlock()) {}
916
917    void begin(CodeGenFunction &CGF) {
918      assert(CGF.OutermostConditional != this);
919      if (!CGF.OutermostConditional)
920        CGF.OutermostConditional = this;
921    }
922
923    void end(CodeGenFunction &CGF) {
924      assert(CGF.OutermostConditional != 0);
925      if (CGF.OutermostConditional == this)
926        CGF.OutermostConditional = 0;
927    }
928
929    /// Returns a block which will be executed prior to each
930    /// evaluation of the conditional code.
931    llvm::BasicBlock *getStartingBlock() const {
932      return StartBB;
933    }
934  };
935
936  /// isInConditionalBranch - Return true if we're currently emitting
937  /// one branch or the other of a conditional expression.
938  bool isInConditionalBranch() const { return OutermostConditional != 0; }
939
940  void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
941    assert(isInConditionalBranch());
942    llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
943    new llvm::StoreInst(value, addr, &block->back());
944  }
945
946  /// An RAII object to record that we're evaluating a statement
947  /// expression.
948  class StmtExprEvaluation {
949    CodeGenFunction &CGF;
950
951    /// We have to save the outermost conditional: cleanups in a
952    /// statement expression aren't conditional just because the
953    /// StmtExpr is.
954    ConditionalEvaluation *SavedOutermostConditional;
955
956  public:
957    StmtExprEvaluation(CodeGenFunction &CGF)
958      : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
959      CGF.OutermostConditional = 0;
960    }
961
962    ~StmtExprEvaluation() {
963      CGF.OutermostConditional = SavedOutermostConditional;
964      CGF.EnsureInsertPoint();
965    }
966  };
967
968  /// An object which temporarily prevents a value from being
969  /// destroyed by aggressive peephole optimizations that assume that
970  /// all uses of a value have been realized in the IR.
971  class PeepholeProtection {
972    llvm::Instruction *Inst;
973    friend class CodeGenFunction;
974
975  public:
976    PeepholeProtection() : Inst(0) {}
977  };
978
979  /// A non-RAII class containing all the information about a bound
980  /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
981  /// this which makes individual mappings very simple; using this
982  /// class directly is useful when you have a variable number of
983  /// opaque values or don't want the RAII functionality for some
984  /// reason.
985  class OpaqueValueMappingData {
986    const OpaqueValueExpr *OpaqueValue;
987    bool BoundLValue;
988    CodeGenFunction::PeepholeProtection Protection;
989
990    OpaqueValueMappingData(const OpaqueValueExpr *ov,
991                           bool boundLValue)
992      : OpaqueValue(ov), BoundLValue(boundLValue) {}
993  public:
994    OpaqueValueMappingData() : OpaqueValue(0) {}
995
996    static bool shouldBindAsLValue(const Expr *expr) {
997      // gl-values should be bound as l-values for obvious reasons.
998      // Records should be bound as l-values because IR generation
999      // always keeps them in memory.  Expressions of function type
1000      // act exactly like l-values but are formally required to be
1001      // r-values in C.
1002      return expr->isGLValue() ||
1003             expr->getType()->isRecordType() ||
1004             expr->getType()->isFunctionType();
1005    }
1006
1007    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1008                                       const OpaqueValueExpr *ov,
1009                                       const Expr *e) {
1010      if (shouldBindAsLValue(ov))
1011        return bind(CGF, ov, CGF.EmitLValue(e));
1012      return bind(CGF, ov, CGF.EmitAnyExpr(e));
1013    }
1014
1015    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1016                                       const OpaqueValueExpr *ov,
1017                                       const LValue &lv) {
1018      assert(shouldBindAsLValue(ov));
1019      CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1020      return OpaqueValueMappingData(ov, true);
1021    }
1022
1023    static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1024                                       const OpaqueValueExpr *ov,
1025                                       const RValue &rv) {
1026      assert(!shouldBindAsLValue(ov));
1027      CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1028
1029      OpaqueValueMappingData data(ov, false);
1030
1031      // Work around an extremely aggressive peephole optimization in
1032      // EmitScalarConversion which assumes that all other uses of a
1033      // value are extant.
1034      data.Protection = CGF.protectFromPeepholes(rv);
1035
1036      return data;
1037    }
1038
1039    bool isValid() const { return OpaqueValue != 0; }
1040    void clear() { OpaqueValue = 0; }
1041
1042    void unbind(CodeGenFunction &CGF) {
1043      assert(OpaqueValue && "no data to unbind!");
1044
1045      if (BoundLValue) {
1046        CGF.OpaqueLValues.erase(OpaqueValue);
1047      } else {
1048        CGF.OpaqueRValues.erase(OpaqueValue);
1049        CGF.unprotectFromPeepholes(Protection);
1050      }
1051    }
1052  };
1053
1054  /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1055  class OpaqueValueMapping {
1056    CodeGenFunction &CGF;
1057    OpaqueValueMappingData Data;
1058
1059  public:
1060    static bool shouldBindAsLValue(const Expr *expr) {
1061      return OpaqueValueMappingData::shouldBindAsLValue(expr);
1062    }
1063
1064    /// Build the opaque value mapping for the given conditional
1065    /// operator if it's the GNU ?: extension.  This is a common
1066    /// enough pattern that the convenience operator is really
1067    /// helpful.
1068    ///
1069    OpaqueValueMapping(CodeGenFunction &CGF,
1070                       const AbstractConditionalOperator *op) : CGF(CGF) {
1071      if (isa<ConditionalOperator>(op))
1072        // Leave Data empty.
1073        return;
1074
1075      const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1076      Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1077                                          e->getCommon());
1078    }
1079
1080    OpaqueValueMapping(CodeGenFunction &CGF,
1081                       const OpaqueValueExpr *opaqueValue,
1082                       LValue lvalue)
1083      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1084    }
1085
1086    OpaqueValueMapping(CodeGenFunction &CGF,
1087                       const OpaqueValueExpr *opaqueValue,
1088                       RValue rvalue)
1089      : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1090    }
1091
1092    void pop() {
1093      Data.unbind(CGF);
1094      Data.clear();
1095    }
1096
1097    ~OpaqueValueMapping() {
1098      if (Data.isValid()) Data.unbind(CGF);
1099    }
1100  };
1101
1102  /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1103  /// number that holds the value.
1104  unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1105
1106  /// BuildBlockByrefAddress - Computes address location of the
1107  /// variable which is declared as __block.
1108  llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1109                                      const VarDecl *V);
1110private:
1111  CGDebugInfo *DebugInfo;
1112  bool DisableDebugInfo;
1113
1114  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1115  /// calling llvm.stacksave for multiple VLAs in the same scope.
1116  bool DidCallStackSave;
1117
1118  /// IndirectBranch - The first time an indirect goto is seen we create a block
1119  /// with an indirect branch.  Every time we see the address of a label taken,
1120  /// we add the label to the indirect goto.  Every subsequent indirect goto is
1121  /// codegen'd as a jump to the IndirectBranch's basic block.
1122  llvm::IndirectBrInst *IndirectBranch;
1123
1124  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1125  /// decls.
1126  typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1127  DeclMapTy LocalDeclMap;
1128
1129  /// LabelMap - This keeps track of the LLVM basic block for each C label.
1130  llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1131
1132  // BreakContinueStack - This keeps track of where break and continue
1133  // statements should jump to.
1134  struct BreakContinue {
1135    BreakContinue(JumpDest Break, JumpDest Continue)
1136      : BreakBlock(Break), ContinueBlock(Continue) {}
1137
1138    JumpDest BreakBlock;
1139    JumpDest ContinueBlock;
1140  };
1141  SmallVector<BreakContinue, 8> BreakContinueStack;
1142
1143  /// SwitchInsn - This is nearest current switch instruction. It is null if
1144  /// current context is not in a switch.
1145  llvm::SwitchInst *SwitchInsn;
1146
1147  /// CaseRangeBlock - This block holds if condition check for last case
1148  /// statement range in current switch instruction.
1149  llvm::BasicBlock *CaseRangeBlock;
1150
1151  /// OpaqueLValues - Keeps track of the current set of opaque value
1152  /// expressions.
1153  llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1154  llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1155
1156  // VLASizeMap - This keeps track of the associated size for each VLA type.
1157  // We track this by the size expression rather than the type itself because
1158  // in certain situations, like a const qualifier applied to an VLA typedef,
1159  // multiple VLA types can share the same size expression.
1160  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1161  // enter/leave scopes.
1162  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1163
1164  /// A block containing a single 'unreachable' instruction.  Created
1165  /// lazily by getUnreachableBlock().
1166  llvm::BasicBlock *UnreachableBlock;
1167
1168  /// CXXThisDecl - When generating code for a C++ member function,
1169  /// this will hold the implicit 'this' declaration.
1170  ImplicitParamDecl *CXXABIThisDecl;
1171  llvm::Value *CXXABIThisValue;
1172  llvm::Value *CXXThisValue;
1173
1174  /// CXXVTTDecl - When generating code for a base object constructor or
1175  /// base object destructor with virtual bases, this will hold the implicit
1176  /// VTT parameter.
1177  ImplicitParamDecl *CXXVTTDecl;
1178  llvm::Value *CXXVTTValue;
1179
1180  /// OutermostConditional - Points to the outermost active
1181  /// conditional control.  This is used so that we know if a
1182  /// temporary should be destroyed conditionally.
1183  ConditionalEvaluation *OutermostConditional;
1184
1185
1186  /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1187  /// type as well as the field number that contains the actual data.
1188  llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1189                                              unsigned> > ByRefValueInfo;
1190
1191  llvm::BasicBlock *TerminateLandingPad;
1192  llvm::BasicBlock *TerminateHandler;
1193  llvm::BasicBlock *TrapBB;
1194
1195public:
1196  CodeGenFunction(CodeGenModule &cgm);
1197  ~CodeGenFunction();
1198
1199  CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1200  ASTContext &getContext() const { return CGM.getContext(); }
1201  CGDebugInfo *getDebugInfo() {
1202    if (DisableDebugInfo)
1203      return NULL;
1204    return DebugInfo;
1205  }
1206  void disableDebugInfo() { DisableDebugInfo = true; }
1207  void enableDebugInfo() { DisableDebugInfo = false; }
1208
1209  bool shouldUseFusedARCCalls() {
1210    return CGM.getCodeGenOpts().OptimizationLevel == 0;
1211  }
1212
1213  const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
1214
1215  /// Returns a pointer to the function's exception object and selector slot,
1216  /// which is assigned in every landing pad.
1217  llvm::Value *getExceptionSlot();
1218  llvm::Value *getEHSelectorSlot();
1219
1220  /// Returns the contents of the function's exception object and selector
1221  /// slots.
1222  llvm::Value *getExceptionFromSlot();
1223  llvm::Value *getSelectorFromSlot();
1224
1225  llvm::Value *getNormalCleanupDestSlot();
1226
1227  llvm::BasicBlock *getUnreachableBlock() {
1228    if (!UnreachableBlock) {
1229      UnreachableBlock = createBasicBlock("unreachable");
1230      new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1231    }
1232    return UnreachableBlock;
1233  }
1234
1235  llvm::BasicBlock *getInvokeDest() {
1236    if (!EHStack.requiresLandingPad()) return 0;
1237    return getInvokeDestImpl();
1238  }
1239
1240  llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1241
1242  //===--------------------------------------------------------------------===//
1243  //                                  Cleanups
1244  //===--------------------------------------------------------------------===//
1245
1246  typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1247
1248  void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1249                                        llvm::Value *arrayEndPointer,
1250                                        QualType elementType,
1251                                        Destroyer *destroyer);
1252  void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1253                                      llvm::Value *arrayEnd,
1254                                      QualType elementType,
1255                                      Destroyer *destroyer);
1256
1257  void pushDestroy(QualType::DestructionKind dtorKind,
1258                   llvm::Value *addr, QualType type);
1259  void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1260                   Destroyer *destroyer, bool useEHCleanupForArray);
1261  void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1262                   bool useEHCleanupForArray);
1263  llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1264                                        QualType type,
1265                                        Destroyer *destroyer,
1266                                        bool useEHCleanupForArray);
1267  void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1268                        QualType type, Destroyer *destroyer,
1269                        bool checkZeroLength, bool useEHCleanup);
1270
1271  Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1272
1273  /// Determines whether an EH cleanup is required to destroy a type
1274  /// with the given destruction kind.
1275  bool needsEHCleanup(QualType::DestructionKind kind) {
1276    switch (kind) {
1277    case QualType::DK_none:
1278      return false;
1279    case QualType::DK_cxx_destructor:
1280    case QualType::DK_objc_weak_lifetime:
1281      return getLangOptions().Exceptions;
1282    case QualType::DK_objc_strong_lifetime:
1283      return getLangOptions().Exceptions &&
1284             CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1285    }
1286    llvm_unreachable("bad destruction kind");
1287  }
1288
1289  CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1290    return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1291  }
1292
1293  //===--------------------------------------------------------------------===//
1294  //                                  Objective-C
1295  //===--------------------------------------------------------------------===//
1296
1297  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1298
1299  void StartObjCMethod(const ObjCMethodDecl *MD,
1300                       const ObjCContainerDecl *CD,
1301                       SourceLocation StartLoc);
1302
1303  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1304  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1305                          const ObjCPropertyImplDecl *PID);
1306  void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1307                              const ObjCPropertyImplDecl *propImpl,
1308                              llvm::Constant *AtomicHelperFn);
1309
1310  void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1311                                  ObjCMethodDecl *MD, bool ctor);
1312
1313  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1314  /// for the given property.
1315  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1316                          const ObjCPropertyImplDecl *PID);
1317  void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1318                              const ObjCPropertyImplDecl *propImpl,
1319                              llvm::Constant *AtomicHelperFn);
1320  bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1321  bool IvarTypeWithAggrGCObjects(QualType Ty);
1322
1323  //===--------------------------------------------------------------------===//
1324  //                                  Block Bits
1325  //===--------------------------------------------------------------------===//
1326
1327  llvm::Value *EmitBlockLiteral(const BlockExpr *);
1328  llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1329  static void destroyBlockInfos(CGBlockInfo *info);
1330  llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1331                                           const CGBlockInfo &Info,
1332                                           llvm::StructType *,
1333                                           llvm::Constant *BlockVarLayout);
1334
1335  llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1336                                        const CGBlockInfo &Info,
1337                                        const Decl *OuterFuncDecl,
1338                                        const DeclMapTy &ldm);
1339
1340  llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1341  llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1342  llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1343                                             const ObjCPropertyImplDecl *PID);
1344  llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1345                                             const ObjCPropertyImplDecl *PID);
1346
1347  void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1348
1349  class AutoVarEmission;
1350
1351  void emitByrefStructureInit(const AutoVarEmission &emission);
1352  void enterByrefCleanup(const AutoVarEmission &emission);
1353
1354  llvm::Value *LoadBlockStruct() {
1355    assert(BlockPointer && "no block pointer set!");
1356    return BlockPointer;
1357  }
1358
1359  void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1360  void AllocateBlockDecl(const BlockDeclRefExpr *E);
1361  llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
1362    return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
1363  }
1364  llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1365  llvm::Type *BuildByRefType(const VarDecl *var);
1366
1367  void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1368                    const CGFunctionInfo &FnInfo);
1369  void StartFunction(GlobalDecl GD, QualType RetTy,
1370                     llvm::Function *Fn,
1371                     const CGFunctionInfo &FnInfo,
1372                     const FunctionArgList &Args,
1373                     SourceLocation StartLoc);
1374
1375  void EmitConstructorBody(FunctionArgList &Args);
1376  void EmitDestructorBody(FunctionArgList &Args);
1377  void EmitFunctionBody(FunctionArgList &Args);
1378
1379  void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1380  void EmitLambdaToFunctionPointerBody(FunctionArgList &Args);
1381  llvm::Constant *EmitLambdaConvertedFnPtr(const CXXMethodDecl *MD);
1382  void EmitLambdaThunkBody(llvm::Function *Fn,
1383                           const CGFunctionInfo &FnInfo,
1384                           const CXXRecordDecl *Lambda);
1385
1386  /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1387  /// emission when possible.
1388  void EmitReturnBlock();
1389
1390  /// FinishFunction - Complete IR generation of the current function. It is
1391  /// legal to call this function even if there is no current insertion point.
1392  void FinishFunction(SourceLocation EndLoc=SourceLocation());
1393
1394  /// GenerateThunk - Generate a thunk for the given method.
1395  void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1396                     GlobalDecl GD, const ThunkInfo &Thunk);
1397
1398  void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1399                            GlobalDecl GD, const ThunkInfo &Thunk);
1400
1401  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1402                        FunctionArgList &Args);
1403
1404  void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1405                               ArrayRef<VarDecl *> ArrayIndexes);
1406
1407  /// InitializeVTablePointer - Initialize the vtable pointer of the given
1408  /// subobject.
1409  ///
1410  void InitializeVTablePointer(BaseSubobject Base,
1411                               const CXXRecordDecl *NearestVBase,
1412                               CharUnits OffsetFromNearestVBase,
1413                               llvm::Constant *VTable,
1414                               const CXXRecordDecl *VTableClass);
1415
1416  typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1417  void InitializeVTablePointers(BaseSubobject Base,
1418                                const CXXRecordDecl *NearestVBase,
1419                                CharUnits OffsetFromNearestVBase,
1420                                bool BaseIsNonVirtualPrimaryBase,
1421                                llvm::Constant *VTable,
1422                                const CXXRecordDecl *VTableClass,
1423                                VisitedVirtualBasesSetTy& VBases);
1424
1425  void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1426
1427  /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1428  /// to by This.
1429  llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1430
1431  /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1432  /// given phase of destruction for a destructor.  The end result
1433  /// should call destructors on members and base classes in reverse
1434  /// order of their construction.
1435  void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1436
1437  /// ShouldInstrumentFunction - Return true if the current function should be
1438  /// instrumented with __cyg_profile_func_* calls
1439  bool ShouldInstrumentFunction();
1440
1441  /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1442  /// instrumentation function with the current function and the call site, if
1443  /// function instrumentation is enabled.
1444  void EmitFunctionInstrumentation(const char *Fn);
1445
1446  /// EmitMCountInstrumentation - Emit call to .mcount.
1447  void EmitMCountInstrumentation();
1448
1449  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1450  /// arguments for the given function. This is also responsible for naming the
1451  /// LLVM function arguments.
1452  void EmitFunctionProlog(const CGFunctionInfo &FI,
1453                          llvm::Function *Fn,
1454                          const FunctionArgList &Args);
1455
1456  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1457  /// given temporary.
1458  void EmitFunctionEpilog(const CGFunctionInfo &FI);
1459
1460  /// EmitStartEHSpec - Emit the start of the exception spec.
1461  void EmitStartEHSpec(const Decl *D);
1462
1463  /// EmitEndEHSpec - Emit the end of the exception spec.
1464  void EmitEndEHSpec(const Decl *D);
1465
1466  /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1467  llvm::BasicBlock *getTerminateLandingPad();
1468
1469  /// getTerminateHandler - Return a handler (not a landing pad, just
1470  /// a catch handler) that just calls terminate.  This is used when
1471  /// a terminate scope encloses a try.
1472  llvm::BasicBlock *getTerminateHandler();
1473
1474  llvm::Type *ConvertTypeForMem(QualType T);
1475  llvm::Type *ConvertType(QualType T);
1476  llvm::Type *ConvertType(const TypeDecl *T) {
1477    return ConvertType(getContext().getTypeDeclType(T));
1478  }
1479
1480  /// LoadObjCSelf - Load the value of self. This function is only valid while
1481  /// generating code for an Objective-C method.
1482  llvm::Value *LoadObjCSelf();
1483
1484  /// TypeOfSelfObject - Return type of object that this self represents.
1485  QualType TypeOfSelfObject();
1486
1487  /// hasAggregateLLVMType - Return true if the specified AST type will map into
1488  /// an aggregate LLVM type or is void.
1489  static bool hasAggregateLLVMType(QualType T);
1490
1491  /// createBasicBlock - Create an LLVM basic block.
1492  llvm::BasicBlock *createBasicBlock(StringRef name = "",
1493                                     llvm::Function *parent = 0,
1494                                     llvm::BasicBlock *before = 0) {
1495#ifdef NDEBUG
1496    return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1497#else
1498    return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1499#endif
1500  }
1501
1502  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1503  /// label maps to.
1504  JumpDest getJumpDestForLabel(const LabelDecl *S);
1505
1506  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1507  /// another basic block, simplify it. This assumes that no other code could
1508  /// potentially reference the basic block.
1509  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1510
1511  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1512  /// adding a fall-through branch from the current insert block if
1513  /// necessary. It is legal to call this function even if there is no current
1514  /// insertion point.
1515  ///
1516  /// IsFinished - If true, indicates that the caller has finished emitting
1517  /// branches to the given block and does not expect to emit code into it. This
1518  /// means the block can be ignored if it is unreachable.
1519  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1520
1521  /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1522  /// near its uses, and leave the insertion point in it.
1523  void EmitBlockAfterUses(llvm::BasicBlock *BB);
1524
1525  /// EmitBranch - Emit a branch to the specified basic block from the current
1526  /// insert block, taking care to avoid creation of branches from dummy
1527  /// blocks. It is legal to call this function even if there is no current
1528  /// insertion point.
1529  ///
1530  /// This function clears the current insertion point. The caller should follow
1531  /// calls to this function with calls to Emit*Block prior to generation new
1532  /// code.
1533  void EmitBranch(llvm::BasicBlock *Block);
1534
1535  /// HaveInsertPoint - True if an insertion point is defined. If not, this
1536  /// indicates that the current code being emitted is unreachable.
1537  bool HaveInsertPoint() const {
1538    return Builder.GetInsertBlock() != 0;
1539  }
1540
1541  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1542  /// emitted IR has a place to go. Note that by definition, if this function
1543  /// creates a block then that block is unreachable; callers may do better to
1544  /// detect when no insertion point is defined and simply skip IR generation.
1545  void EnsureInsertPoint() {
1546    if (!HaveInsertPoint())
1547      EmitBlock(createBasicBlock());
1548  }
1549
1550  /// ErrorUnsupported - Print out an error that codegen doesn't support the
1551  /// specified stmt yet.
1552  void ErrorUnsupported(const Stmt *S, const char *Type,
1553                        bool OmitOnError=false);
1554
1555  //===--------------------------------------------------------------------===//
1556  //                                  Helpers
1557  //===--------------------------------------------------------------------===//
1558
1559  LValue MakeAddrLValue(llvm::Value *V, QualType T,
1560                        CharUnits Alignment = CharUnits()) {
1561    return LValue::MakeAddr(V, T, Alignment, getContext(),
1562                            CGM.getTBAAInfo(T));
1563  }
1564  LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1565    CharUnits Alignment;
1566    if (!T->isIncompleteType())
1567      Alignment = getContext().getTypeAlignInChars(T);
1568    return LValue::MakeAddr(V, T, Alignment, getContext(),
1569                            CGM.getTBAAInfo(T));
1570  }
1571
1572  /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1573  /// block. The caller is responsible for setting an appropriate alignment on
1574  /// the alloca.
1575  llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1576                                     const Twine &Name = "tmp");
1577
1578  /// InitTempAlloca - Provide an initial value for the given alloca.
1579  void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1580
1581  /// CreateIRTemp - Create a temporary IR object of the given type, with
1582  /// appropriate alignment. This routine should only be used when an temporary
1583  /// value needs to be stored into an alloca (for example, to avoid explicit
1584  /// PHI construction), but the type is the IR type, not the type appropriate
1585  /// for storing in memory.
1586  llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1587
1588  /// CreateMemTemp - Create a temporary memory object of the given type, with
1589  /// appropriate alignment.
1590  llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1591
1592  /// CreateAggTemp - Create a temporary memory object for the given
1593  /// aggregate type.
1594  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1595    CharUnits Alignment = getContext().getTypeAlignInChars(T);
1596    return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1597                                 T.getQualifiers(),
1598                                 AggValueSlot::IsNotDestructed,
1599                                 AggValueSlot::DoesNotNeedGCBarriers,
1600                                 AggValueSlot::IsNotAliased);
1601  }
1602
1603  /// Emit a cast to void* in the appropriate address space.
1604  llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1605
1606  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1607  /// expression and compare the result against zero, returning an Int1Ty value.
1608  llvm::Value *EvaluateExprAsBool(const Expr *E);
1609
1610  /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1611  void EmitIgnoredExpr(const Expr *E);
1612
1613  /// EmitAnyExpr - Emit code to compute the specified expression which can have
1614  /// any type.  The result is returned as an RValue struct.  If this is an
1615  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1616  /// the result should be returned.
1617  ///
1618  /// \param IgnoreResult - True if the resulting value isn't used.
1619  RValue EmitAnyExpr(const Expr *E,
1620                     AggValueSlot AggSlot = AggValueSlot::ignored(),
1621                     bool IgnoreResult = false);
1622
1623  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1624  // or the value of the expression, depending on how va_list is defined.
1625  llvm::Value *EmitVAListRef(const Expr *E);
1626
1627  /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1628  /// always be accessible even if no aggregate location is provided.
1629  RValue EmitAnyExprToTemp(const Expr *E);
1630
1631  /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1632  /// arbitrary expression into the given memory location.
1633  void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1634                        Qualifiers Quals, bool IsInitializer);
1635
1636  /// EmitExprAsInit - Emits the code necessary to initialize a
1637  /// location in memory with the given initializer.
1638  void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1639                      LValue lvalue, bool capturedByInit);
1640
1641  /// EmitAggregateCopy - Emit an aggrate copy.
1642  ///
1643  /// \param isVolatile - True iff either the source or the destination is
1644  /// volatile.
1645  void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1646                         QualType EltTy, bool isVolatile=false,
1647                         unsigned Alignment = 0);
1648
1649  /// StartBlock - Start new block named N. If insert block is a dummy block
1650  /// then reuse it.
1651  void StartBlock(const char *N);
1652
1653  /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1654  llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1655    return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1656  }
1657
1658  /// GetAddrOfLocalVar - Return the address of a local variable.
1659  llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1660    llvm::Value *Res = LocalDeclMap[VD];
1661    assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1662    return Res;
1663  }
1664
1665  /// getOpaqueLValueMapping - Given an opaque value expression (which
1666  /// must be mapped to an l-value), return its mapping.
1667  const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1668    assert(OpaqueValueMapping::shouldBindAsLValue(e));
1669
1670    llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1671      it = OpaqueLValues.find(e);
1672    assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1673    return it->second;
1674  }
1675
1676  /// getOpaqueRValueMapping - Given an opaque value expression (which
1677  /// must be mapped to an r-value), return its mapping.
1678  const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1679    assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1680
1681    llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1682      it = OpaqueRValues.find(e);
1683    assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1684    return it->second;
1685  }
1686
1687  /// getAccessedFieldNo - Given an encoded value and a result number, return
1688  /// the input field number being accessed.
1689  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1690
1691  llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1692  llvm::BasicBlock *GetIndirectGotoBlock();
1693
1694  /// EmitNullInitialization - Generate code to set a value of the given type to
1695  /// null, If the type contains data member pointers, they will be initialized
1696  /// to -1 in accordance with the Itanium C++ ABI.
1697  void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1698
1699  // EmitVAArg - Generate code to get an argument from the passed in pointer
1700  // and update it accordingly. The return value is a pointer to the argument.
1701  // FIXME: We should be able to get rid of this method and use the va_arg
1702  // instruction in LLVM instead once it works well enough.
1703  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1704
1705  /// emitArrayLength - Compute the length of an array, even if it's a
1706  /// VLA, and drill down to the base element type.
1707  llvm::Value *emitArrayLength(const ArrayType *arrayType,
1708                               QualType &baseType,
1709                               llvm::Value *&addr);
1710
1711  /// EmitVLASize - Capture all the sizes for the VLA expressions in
1712  /// the given variably-modified type and store them in the VLASizeMap.
1713  ///
1714  /// This function can be called with a null (unreachable) insert point.
1715  void EmitVariablyModifiedType(QualType Ty);
1716
1717  /// getVLASize - Returns an LLVM value that corresponds to the size,
1718  /// in non-variably-sized elements, of a variable length array type,
1719  /// plus that largest non-variably-sized element type.  Assumes that
1720  /// the type has already been emitted with EmitVariablyModifiedType.
1721  std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1722  std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1723
1724  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1725  /// generating code for an C++ member function.
1726  llvm::Value *LoadCXXThis() {
1727    assert(CXXThisValue && "no 'this' value for this function");
1728    return CXXThisValue;
1729  }
1730
1731  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1732  /// virtual bases.
1733  llvm::Value *LoadCXXVTT() {
1734    assert(CXXVTTValue && "no VTT value for this function");
1735    return CXXVTTValue;
1736  }
1737
1738  /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1739  /// complete class to the given direct base.
1740  llvm::Value *
1741  GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1742                                        const CXXRecordDecl *Derived,
1743                                        const CXXRecordDecl *Base,
1744                                        bool BaseIsVirtual);
1745
1746  /// GetAddressOfBaseClass - This function will add the necessary delta to the
1747  /// load of 'this' and returns address of the base class.
1748  llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1749                                     const CXXRecordDecl *Derived,
1750                                     CastExpr::path_const_iterator PathBegin,
1751                                     CastExpr::path_const_iterator PathEnd,
1752                                     bool NullCheckValue);
1753
1754  llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1755                                        const CXXRecordDecl *Derived,
1756                                        CastExpr::path_const_iterator PathBegin,
1757                                        CastExpr::path_const_iterator PathEnd,
1758                                        bool NullCheckValue);
1759
1760  llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1761                                         const CXXRecordDecl *ClassDecl,
1762                                         const CXXRecordDecl *BaseClassDecl);
1763
1764  void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1765                                      CXXCtorType CtorType,
1766                                      const FunctionArgList &Args);
1767  // It's important not to confuse this and the previous function. Delegating
1768  // constructors are the C++0x feature. The constructor delegate optimization
1769  // is used to reduce duplication in the base and complete consturctors where
1770  // they are substantially the same.
1771  void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1772                                        const FunctionArgList &Args);
1773  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1774                              bool ForVirtualBase, llvm::Value *This,
1775                              CallExpr::const_arg_iterator ArgBeg,
1776                              CallExpr::const_arg_iterator ArgEnd);
1777
1778  void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1779                              llvm::Value *This, llvm::Value *Src,
1780                              CallExpr::const_arg_iterator ArgBeg,
1781                              CallExpr::const_arg_iterator ArgEnd);
1782
1783  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1784                                  const ConstantArrayType *ArrayTy,
1785                                  llvm::Value *ArrayPtr,
1786                                  CallExpr::const_arg_iterator ArgBeg,
1787                                  CallExpr::const_arg_iterator ArgEnd,
1788                                  bool ZeroInitialization = false);
1789
1790  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1791                                  llvm::Value *NumElements,
1792                                  llvm::Value *ArrayPtr,
1793                                  CallExpr::const_arg_iterator ArgBeg,
1794                                  CallExpr::const_arg_iterator ArgEnd,
1795                                  bool ZeroInitialization = false);
1796
1797  static Destroyer destroyCXXObject;
1798
1799  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1800                             bool ForVirtualBase, llvm::Value *This);
1801
1802  void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1803                               llvm::Value *NewPtr, llvm::Value *NumElements);
1804
1805  void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1806                        llvm::Value *Ptr);
1807
1808  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1809  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1810
1811  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1812                      QualType DeleteTy);
1813
1814  llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1815  llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1816
1817  void EmitCheck(llvm::Value *, unsigned Size);
1818
1819  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1820                                       bool isInc, bool isPre);
1821  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1822                                         bool isInc, bool isPre);
1823  //===--------------------------------------------------------------------===//
1824  //                            Declaration Emission
1825  //===--------------------------------------------------------------------===//
1826
1827  /// EmitDecl - Emit a declaration.
1828  ///
1829  /// This function can be called with a null (unreachable) insert point.
1830  void EmitDecl(const Decl &D);
1831
1832  /// EmitVarDecl - Emit a local variable declaration.
1833  ///
1834  /// This function can be called with a null (unreachable) insert point.
1835  void EmitVarDecl(const VarDecl &D);
1836
1837  void EmitScalarInit(const Expr *init, const ValueDecl *D,
1838                      LValue lvalue, bool capturedByInit);
1839  void EmitScalarInit(llvm::Value *init, LValue lvalue);
1840
1841  typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1842                             llvm::Value *Address);
1843
1844  /// EmitAutoVarDecl - Emit an auto variable declaration.
1845  ///
1846  /// This function can be called with a null (unreachable) insert point.
1847  void EmitAutoVarDecl(const VarDecl &D);
1848
1849  class AutoVarEmission {
1850    friend class CodeGenFunction;
1851
1852    const VarDecl *Variable;
1853
1854    /// The alignment of the variable.
1855    CharUnits Alignment;
1856
1857    /// The address of the alloca.  Null if the variable was emitted
1858    /// as a global constant.
1859    llvm::Value *Address;
1860
1861    llvm::Value *NRVOFlag;
1862
1863    /// True if the variable is a __block variable.
1864    bool IsByRef;
1865
1866    /// True if the variable is of aggregate type and has a constant
1867    /// initializer.
1868    bool IsConstantAggregate;
1869
1870    struct Invalid {};
1871    AutoVarEmission(Invalid) : Variable(0) {}
1872
1873    AutoVarEmission(const VarDecl &variable)
1874      : Variable(&variable), Address(0), NRVOFlag(0),
1875        IsByRef(false), IsConstantAggregate(false) {}
1876
1877    bool wasEmittedAsGlobal() const { return Address == 0; }
1878
1879  public:
1880    static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1881
1882    /// Returns the address of the object within this declaration.
1883    /// Note that this does not chase the forwarding pointer for
1884    /// __block decls.
1885    llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1886      if (!IsByRef) return Address;
1887
1888      return CGF.Builder.CreateStructGEP(Address,
1889                                         CGF.getByRefValueLLVMField(Variable),
1890                                         Variable->getNameAsString());
1891    }
1892  };
1893  AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1894  void EmitAutoVarInit(const AutoVarEmission &emission);
1895  void EmitAutoVarCleanups(const AutoVarEmission &emission);
1896  void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1897                              QualType::DestructionKind dtorKind);
1898
1899  void EmitStaticVarDecl(const VarDecl &D,
1900                         llvm::GlobalValue::LinkageTypes Linkage);
1901
1902  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1903  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1904
1905  /// protectFromPeepholes - Protect a value that we're intending to
1906  /// store to the side, but which will probably be used later, from
1907  /// aggressive peepholing optimizations that might delete it.
1908  ///
1909  /// Pass the result to unprotectFromPeepholes to declare that
1910  /// protection is no longer required.
1911  ///
1912  /// There's no particular reason why this shouldn't apply to
1913  /// l-values, it's just that no existing peepholes work on pointers.
1914  PeepholeProtection protectFromPeepholes(RValue rvalue);
1915  void unprotectFromPeepholes(PeepholeProtection protection);
1916
1917  //===--------------------------------------------------------------------===//
1918  //                             Statement Emission
1919  //===--------------------------------------------------------------------===//
1920
1921  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1922  void EmitStopPoint(const Stmt *S);
1923
1924  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1925  /// this function even if there is no current insertion point.
1926  ///
1927  /// This function may clear the current insertion point; callers should use
1928  /// EnsureInsertPoint if they wish to subsequently generate code without first
1929  /// calling EmitBlock, EmitBranch, or EmitStmt.
1930  void EmitStmt(const Stmt *S);
1931
1932  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1933  /// necessarily require an insertion point or debug information; typically
1934  /// because the statement amounts to a jump or a container of other
1935  /// statements.
1936  ///
1937  /// \return True if the statement was handled.
1938  bool EmitSimpleStmt(const Stmt *S);
1939
1940  RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1941                          AggValueSlot AVS = AggValueSlot::ignored());
1942
1943  /// EmitLabel - Emit the block for the given label. It is legal to call this
1944  /// function even if there is no current insertion point.
1945  void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1946
1947  void EmitLabelStmt(const LabelStmt &S);
1948  void EmitGotoStmt(const GotoStmt &S);
1949  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1950  void EmitIfStmt(const IfStmt &S);
1951  void EmitWhileStmt(const WhileStmt &S);
1952  void EmitDoStmt(const DoStmt &S);
1953  void EmitForStmt(const ForStmt &S);
1954  void EmitReturnStmt(const ReturnStmt &S);
1955  void EmitDeclStmt(const DeclStmt &S);
1956  void EmitBreakStmt(const BreakStmt &S);
1957  void EmitContinueStmt(const ContinueStmt &S);
1958  void EmitSwitchStmt(const SwitchStmt &S);
1959  void EmitDefaultStmt(const DefaultStmt &S);
1960  void EmitCaseStmt(const CaseStmt &S);
1961  void EmitCaseStmtRange(const CaseStmt &S);
1962  void EmitAsmStmt(const AsmStmt &S);
1963
1964  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1965  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1966  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1967  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1968  void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
1969
1970  llvm::Constant *getUnwindResumeFn();
1971  llvm::Constant *getUnwindResumeOrRethrowFn();
1972  void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1973  void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1974
1975  void EmitCXXTryStmt(const CXXTryStmt &S);
1976  void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
1977
1978  //===--------------------------------------------------------------------===//
1979  //                         LValue Expression Emission
1980  //===--------------------------------------------------------------------===//
1981
1982  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1983  RValue GetUndefRValue(QualType Ty);
1984
1985  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1986  /// and issue an ErrorUnsupported style diagnostic (using the
1987  /// provided Name).
1988  RValue EmitUnsupportedRValue(const Expr *E,
1989                               const char *Name);
1990
1991  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
1992  /// an ErrorUnsupported style diagnostic (using the provided Name).
1993  LValue EmitUnsupportedLValue(const Expr *E,
1994                               const char *Name);
1995
1996  /// EmitLValue - Emit code to compute a designator that specifies the location
1997  /// of the expression.
1998  ///
1999  /// This can return one of two things: a simple address or a bitfield
2000  /// reference.  In either case, the LLVM Value* in the LValue structure is
2001  /// guaranteed to be an LLVM pointer type.
2002  ///
2003  /// If this returns a bitfield reference, nothing about the pointee type of
2004  /// the LLVM value is known: For example, it may not be a pointer to an
2005  /// integer.
2006  ///
2007  /// If this returns a normal address, and if the lvalue's C type is fixed
2008  /// size, this method guarantees that the returned pointer type will point to
2009  /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2010  /// variable length type, this is not possible.
2011  ///
2012  LValue EmitLValue(const Expr *E);
2013
2014  /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
2015  /// checking code to guard against undefined behavior.  This is only
2016  /// suitable when we know that the address will be used to access the
2017  /// object.
2018  LValue EmitCheckedLValue(const Expr *E);
2019
2020  /// EmitToMemory - Change a scalar value from its value
2021  /// representation to its in-memory representation.
2022  llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2023
2024  /// EmitFromMemory - Change a scalar value from its memory
2025  /// representation to its value representation.
2026  llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2027
2028  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2029  /// care to appropriately convert from the memory representation to
2030  /// the LLVM value representation.
2031  llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2032                                unsigned Alignment, QualType Ty,
2033                                llvm::MDNode *TBAAInfo = 0);
2034
2035  /// EmitLoadOfScalar - Load a scalar value from an address, taking
2036  /// care to appropriately convert from the memory representation to
2037  /// the LLVM value representation.  The l-value must be a simple
2038  /// l-value.
2039  llvm::Value *EmitLoadOfScalar(LValue lvalue);
2040
2041  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2042  /// care to appropriately convert from the memory representation to
2043  /// the LLVM value representation.
2044  void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2045                         bool Volatile, unsigned Alignment, QualType Ty,
2046                         llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2047
2048  /// EmitStoreOfScalar - Store a scalar value to an address, taking
2049  /// care to appropriately convert from the memory representation to
2050  /// the LLVM value representation.  The l-value must be a simple
2051  /// l-value.  The isInit flag indicates whether this is an initialization.
2052  /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2053  void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2054
2055  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2056  /// this method emits the address of the lvalue, then loads the result as an
2057  /// rvalue, returning the rvalue.
2058  RValue EmitLoadOfLValue(LValue V);
2059  RValue EmitLoadOfExtVectorElementLValue(LValue V);
2060  RValue EmitLoadOfBitfieldLValue(LValue LV);
2061
2062  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2063  /// lvalue, where both are guaranteed to the have the same type, and that type
2064  /// is 'Ty'.
2065  void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2066  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2067
2068  /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2069  /// EmitStoreThroughLValue.
2070  ///
2071  /// \param Result [out] - If non-null, this will be set to a Value* for the
2072  /// bit-field contents after the store, appropriate for use as the result of
2073  /// an assignment to the bit-field.
2074  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2075                                      llvm::Value **Result=0);
2076
2077  /// Emit an l-value for an assignment (simple or compound) of complex type.
2078  LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2079  LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2080
2081  // Note: only available for agg return types
2082  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2083  LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2084  // Note: only available for agg return types
2085  LValue EmitCallExprLValue(const CallExpr *E);
2086  // Note: only available for agg return types
2087  LValue EmitVAArgExprLValue(const VAArgExpr *E);
2088  LValue EmitDeclRefLValue(const DeclRefExpr *E);
2089  LValue EmitStringLiteralLValue(const StringLiteral *E);
2090  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2091  LValue EmitPredefinedLValue(const PredefinedExpr *E);
2092  LValue EmitUnaryOpLValue(const UnaryOperator *E);
2093  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2094  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2095  LValue EmitMemberExpr(const MemberExpr *E);
2096  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2097  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2098  LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2099  LValue EmitCastLValue(const CastExpr *E);
2100  LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2101  LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2102  LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2103
2104  RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2105                                AggValueSlot slot = AggValueSlot::ignored());
2106  LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2107
2108  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2109                              const ObjCIvarDecl *Ivar);
2110  LValue EmitLValueForAnonRecordField(llvm::Value* Base,
2111                                      const IndirectFieldDecl* Field,
2112                                      unsigned CVRQualifiers);
2113  LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
2114                            unsigned CVRQualifiers);
2115
2116  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2117  /// if the Field is a reference, this will return the address of the reference
2118  /// and not the address of the value stored in the reference.
2119  LValue EmitLValueForFieldInitialization(llvm::Value* Base,
2120                                          const FieldDecl* Field,
2121                                          unsigned CVRQualifiers);
2122
2123  LValue EmitLValueForIvar(QualType ObjectTy,
2124                           llvm::Value* Base, const ObjCIvarDecl *Ivar,
2125                           unsigned CVRQualifiers);
2126
2127  LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
2128                                unsigned CVRQualifiers);
2129
2130  LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
2131
2132  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2133  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2134  LValue EmitLambdaLValue(const LambdaExpr *E);
2135  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2136
2137  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2138  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2139  LValue EmitStmtExprLValue(const StmtExpr *E);
2140  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2141  LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2142  void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2143
2144  //===--------------------------------------------------------------------===//
2145  //                         Scalar Expression Emission
2146  //===--------------------------------------------------------------------===//
2147
2148  /// EmitCall - Generate a call of the given function, expecting the given
2149  /// result type, and using the given argument list which specifies both the
2150  /// LLVM arguments and the types they were derived from.
2151  ///
2152  /// \param TargetDecl - If given, the decl of the function in a direct call;
2153  /// used to set attributes on the call (noreturn, etc.).
2154  RValue EmitCall(const CGFunctionInfo &FnInfo,
2155                  llvm::Value *Callee,
2156                  ReturnValueSlot ReturnValue,
2157                  const CallArgList &Args,
2158                  const Decl *TargetDecl = 0,
2159                  llvm::Instruction **callOrInvoke = 0);
2160
2161  RValue EmitCall(QualType FnType, llvm::Value *Callee,
2162                  ReturnValueSlot ReturnValue,
2163                  CallExpr::const_arg_iterator ArgBeg,
2164                  CallExpr::const_arg_iterator ArgEnd,
2165                  const Decl *TargetDecl = 0);
2166  RValue EmitCallExpr(const CallExpr *E,
2167                      ReturnValueSlot ReturnValue = ReturnValueSlot());
2168
2169  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2170                                  ArrayRef<llvm::Value *> Args,
2171                                  const Twine &Name = "");
2172  llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2173                                  const Twine &Name = "");
2174
2175  llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2176                                llvm::Type *Ty);
2177  llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2178                                llvm::Value *This, llvm::Type *Ty);
2179  llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2180                                         NestedNameSpecifier *Qual,
2181                                         llvm::Type *Ty);
2182
2183  llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2184                                                   CXXDtorType Type,
2185                                                   const CXXRecordDecl *RD);
2186
2187  RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2188                           llvm::Value *Callee,
2189                           ReturnValueSlot ReturnValue,
2190                           llvm::Value *This,
2191                           llvm::Value *VTT,
2192                           CallExpr::const_arg_iterator ArgBeg,
2193                           CallExpr::const_arg_iterator ArgEnd);
2194  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2195                               ReturnValueSlot ReturnValue);
2196  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2197                                      ReturnValueSlot ReturnValue);
2198
2199  llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2200                                           const CXXMethodDecl *MD,
2201                                           llvm::Value *This);
2202  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2203                                       const CXXMethodDecl *MD,
2204                                       ReturnValueSlot ReturnValue);
2205
2206  RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2207                                ReturnValueSlot ReturnValue);
2208
2209
2210  RValue EmitBuiltinExpr(const FunctionDecl *FD,
2211                         unsigned BuiltinID, const CallExpr *E);
2212
2213  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2214
2215  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2216  /// is unhandled by the current target.
2217  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2218
2219  llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2220  llvm::Value *EmitNeonCall(llvm::Function *F,
2221                            SmallVectorImpl<llvm::Value*> &O,
2222                            const char *name,
2223                            unsigned shift = 0, bool rightshift = false);
2224  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2225  llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2226                                   bool negateForRightShift);
2227
2228  llvm::Value *BuildVector(const SmallVectorImpl<llvm::Value*> &Ops);
2229  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2230  llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2231  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2232
2233  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2234  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2235  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2236  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2237                             ReturnValueSlot Return = ReturnValueSlot());
2238
2239  /// Retrieves the default cleanup kind for an ARC cleanup.
2240  /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2241  CleanupKind getARCCleanupKind() {
2242    return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2243             ? NormalAndEHCleanup : NormalCleanup;
2244  }
2245
2246  // ARC primitives.
2247  void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2248  void EmitARCDestroyWeak(llvm::Value *addr);
2249  llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2250  llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2251  llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2252                                bool ignored);
2253  void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2254  void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2255  llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2256  llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2257  llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2258                                  bool ignored);
2259  llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2260                                      bool ignored);
2261  llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2262  llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2263  llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2264  void EmitARCRelease(llvm::Value *value, bool precise);
2265  llvm::Value *EmitARCAutorelease(llvm::Value *value);
2266  llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2267  llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2268  llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2269
2270  std::pair<LValue,llvm::Value*>
2271  EmitARCStoreAutoreleasing(const BinaryOperator *e);
2272  std::pair<LValue,llvm::Value*>
2273  EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2274
2275  llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2276
2277  llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2278  llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2279  llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2280
2281  llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2282  llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2283  llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2284
2285  static Destroyer destroyARCStrongImprecise;
2286  static Destroyer destroyARCStrongPrecise;
2287  static Destroyer destroyARCWeak;
2288
2289  void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2290  llvm::Value *EmitObjCAutoreleasePoolPush();
2291  llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2292  void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2293  void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2294
2295  /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2296  /// expression. Will emit a temporary variable if E is not an LValue.
2297  RValue EmitReferenceBindingToExpr(const Expr* E,
2298                                    const NamedDecl *InitializedDecl);
2299
2300  //===--------------------------------------------------------------------===//
2301  //                           Expression Emission
2302  //===--------------------------------------------------------------------===//
2303
2304  // Expressions are broken into three classes: scalar, complex, aggregate.
2305
2306  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2307  /// scalar type, returning the result.
2308  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2309
2310  /// EmitScalarConversion - Emit a conversion from the specified type to the
2311  /// specified destination type, both of which are LLVM scalar types.
2312  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2313                                    QualType DstTy);
2314
2315  /// EmitComplexToScalarConversion - Emit a conversion from the specified
2316  /// complex type to the specified destination type, where the destination type
2317  /// is an LLVM scalar type.
2318  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2319                                             QualType DstTy);
2320
2321
2322  /// EmitAggExpr - Emit the computation of the specified expression
2323  /// of aggregate type.  The result is computed into the given slot,
2324  /// which may be null to indicate that the value is not needed.
2325  void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
2326
2327  /// EmitAggExprToLValue - Emit the computation of the specified expression of
2328  /// aggregate type into a temporary LValue.
2329  LValue EmitAggExprToLValue(const Expr *E);
2330
2331  /// EmitGCMemmoveCollectable - Emit special API for structs with object
2332  /// pointers.
2333  void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2334                                QualType Ty);
2335
2336  /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2337  /// make sure it survives garbage collection until this point.
2338  void EmitExtendGCLifetime(llvm::Value *object);
2339
2340  /// EmitComplexExpr - Emit the computation of the specified expression of
2341  /// complex type, returning the result.
2342  ComplexPairTy EmitComplexExpr(const Expr *E,
2343                                bool IgnoreReal = false,
2344                                bool IgnoreImag = false);
2345
2346  /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2347  /// of complex type, storing into the specified Value*.
2348  void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2349                               bool DestIsVolatile);
2350
2351  /// StoreComplexToAddr - Store a complex number into the specified address.
2352  void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2353                          bool DestIsVolatile);
2354  /// LoadComplexFromAddr - Load a complex number from the specified address.
2355  ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2356
2357  /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2358  /// a static local variable.
2359  llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2360                                            const char *Separator,
2361                                       llvm::GlobalValue::LinkageTypes Linkage);
2362
2363  /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2364  /// global variable that has already been created for it.  If the initializer
2365  /// has a different type than GV does, this may free GV and return a different
2366  /// one.  Otherwise it just returns GV.
2367  llvm::GlobalVariable *
2368  AddInitializerToStaticVarDecl(const VarDecl &D,
2369                                llvm::GlobalVariable *GV);
2370
2371
2372  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2373  /// variable with global storage.
2374  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2375                                bool PerformInit);
2376
2377  /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
2378  /// with the C++ runtime so that its destructor will be called at exit.
2379  void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
2380                                     llvm::Constant *DeclPtr);
2381
2382  /// Emit code in this function to perform a guarded variable
2383  /// initialization.  Guarded initializations are used when it's not
2384  /// possible to prove that an initialization will be done exactly
2385  /// once, e.g. with a static local variable or a static data member
2386  /// of a class template.
2387  void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2388                          bool PerformInit);
2389
2390  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2391  /// variables.
2392  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2393                                 llvm::Constant **Decls,
2394                                 unsigned NumDecls);
2395
2396  /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
2397  /// variables.
2398  void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
2399                                 const std::vector<std::pair<llvm::WeakVH,
2400                                   llvm::Constant*> > &DtorsAndObjects);
2401
2402  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2403                                        const VarDecl *D,
2404                                        llvm::GlobalVariable *Addr,
2405                                        bool PerformInit);
2406
2407  void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2408
2409  void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2410                                  const Expr *Exp);
2411
2412  void enterFullExpression(const ExprWithCleanups *E) {
2413    if (E->getNumObjects() == 0) return;
2414    enterNonTrivialFullExpression(E);
2415  }
2416  void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2417
2418  void EmitCXXThrowExpr(const CXXThrowExpr *E);
2419
2420  void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2421
2422  RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2423
2424  //===--------------------------------------------------------------------===//
2425  //                         Annotations Emission
2426  //===--------------------------------------------------------------------===//
2427
2428  /// Emit an annotation call (intrinsic or builtin).
2429  llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2430                                  llvm::Value *AnnotatedVal,
2431                                  llvm::StringRef AnnotationStr,
2432                                  SourceLocation Location);
2433
2434  /// Emit local annotations for the local variable V, declared by D.
2435  void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2436
2437  /// Emit field annotations for the given field & value. Returns the
2438  /// annotation result.
2439  llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2440
2441  //===--------------------------------------------------------------------===//
2442  //                             Internal Helpers
2443  //===--------------------------------------------------------------------===//
2444
2445  /// ContainsLabel - Return true if the statement contains a label in it.  If
2446  /// this statement is not executed normally, it not containing a label means
2447  /// that we can just remove the code.
2448  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2449
2450  /// containsBreak - Return true if the statement contains a break out of it.
2451  /// If the statement (recursively) contains a switch or loop with a break
2452  /// inside of it, this is fine.
2453  static bool containsBreak(const Stmt *S);
2454
2455  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2456  /// to a constant, or if it does but contains a label, return false.  If it
2457  /// constant folds return true and set the boolean result in Result.
2458  bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2459
2460  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2461  /// to a constant, or if it does but contains a label, return false.  If it
2462  /// constant folds return true and set the folded value.
2463  bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
2464
2465  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2466  /// if statement) to the specified blocks.  Based on the condition, this might
2467  /// try to simplify the codegen of the conditional based on the branch.
2468  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2469                            llvm::BasicBlock *FalseBlock);
2470
2471  /// getTrapBB - Create a basic block that will call the trap intrinsic.  We'll
2472  /// generate a branch around the created basic block as necessary.
2473  llvm::BasicBlock *getTrapBB();
2474
2475  /// EmitCallArg - Emit a single call argument.
2476  void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2477
2478  /// EmitDelegateCallArg - We are performing a delegate call; that
2479  /// is, the current function is delegating to another one.  Produce
2480  /// a r-value suitable for passing the given parameter.
2481  void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2482
2483  /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2484  /// point operation, expressed as the maximum relative error in ulp.
2485  void SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN,
2486                     unsigned AccuracyD = 1);
2487
2488private:
2489  void EmitReturnOfRValue(RValue RV, QualType Ty);
2490
2491  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2492  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2493  ///
2494  /// \param AI - The first function argument of the expansion.
2495  /// \return The argument following the last expanded function
2496  /// argument.
2497  llvm::Function::arg_iterator
2498  ExpandTypeFromArgs(QualType Ty, LValue Dst,
2499                     llvm::Function::arg_iterator AI);
2500
2501  /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2502  /// Ty, into individual arguments on the provided vector \arg Args. See
2503  /// ABIArgInfo::Expand.
2504  void ExpandTypeToArgs(QualType Ty, RValue Src,
2505                        SmallVector<llvm::Value*, 16> &Args,
2506                        llvm::FunctionType *IRFuncTy);
2507
2508  llvm::Value* EmitAsmInput(const AsmStmt &S,
2509                            const TargetInfo::ConstraintInfo &Info,
2510                            const Expr *InputExpr, std::string &ConstraintStr);
2511
2512  llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
2513                                  const TargetInfo::ConstraintInfo &Info,
2514                                  LValue InputValue, QualType InputType,
2515                                  std::string &ConstraintStr);
2516
2517  /// EmitCallArgs - Emit call arguments for a function.
2518  /// The CallArgTypeInfo parameter is used for iterating over the known
2519  /// argument types of the function being called.
2520  template<typename T>
2521  void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2522                    CallExpr::const_arg_iterator ArgBeg,
2523                    CallExpr::const_arg_iterator ArgEnd) {
2524      CallExpr::const_arg_iterator Arg = ArgBeg;
2525
2526    // First, use the argument types that the type info knows about
2527    if (CallArgTypeInfo) {
2528      for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2529           E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2530        assert(Arg != ArgEnd && "Running over edge of argument list!");
2531        QualType ArgType = *I;
2532#ifndef NDEBUG
2533        QualType ActualArgType = Arg->getType();
2534        if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2535          QualType ActualBaseType =
2536            ActualArgType->getAs<PointerType>()->getPointeeType();
2537          QualType ArgBaseType =
2538            ArgType->getAs<PointerType>()->getPointeeType();
2539          if (ArgBaseType->isVariableArrayType()) {
2540            if (const VariableArrayType *VAT =
2541                getContext().getAsVariableArrayType(ActualBaseType)) {
2542              if (!VAT->getSizeExpr())
2543                ActualArgType = ArgType;
2544            }
2545          }
2546        }
2547        assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2548               getTypePtr() ==
2549               getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2550               "type mismatch in call argument!");
2551#endif
2552        EmitCallArg(Args, *Arg, ArgType);
2553      }
2554
2555      // Either we've emitted all the call args, or we have a call to a
2556      // variadic function.
2557      assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2558             "Extra arguments in non-variadic function!");
2559
2560    }
2561
2562    // If we still have any arguments, emit them using the type of the argument.
2563    for (; Arg != ArgEnd; ++Arg)
2564      EmitCallArg(Args, *Arg, Arg->getType());
2565  }
2566
2567  const TargetCodeGenInfo &getTargetHooks() const {
2568    return CGM.getTargetCodeGenInfo();
2569  }
2570
2571  void EmitDeclMetadata();
2572
2573  CodeGenModule::ByrefHelpers *
2574  buildByrefHelpers(llvm::StructType &byrefType,
2575                    const AutoVarEmission &emission);
2576
2577  void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2578};
2579
2580/// Helper class with most of the code for saving a value for a
2581/// conditional expression cleanup.
2582struct DominatingLLVMValue {
2583  typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2584
2585  /// Answer whether the given value needs extra work to be saved.
2586  static bool needsSaving(llvm::Value *value) {
2587    // If it's not an instruction, we don't need to save.
2588    if (!isa<llvm::Instruction>(value)) return false;
2589
2590    // If it's an instruction in the entry block, we don't need to save.
2591    llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2592    return (block != &block->getParent()->getEntryBlock());
2593  }
2594
2595  /// Try to save the given value.
2596  static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2597    if (!needsSaving(value)) return saved_type(value, false);
2598
2599    // Otherwise we need an alloca.
2600    llvm::Value *alloca =
2601      CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2602    CGF.Builder.CreateStore(value, alloca);
2603
2604    return saved_type(alloca, true);
2605  }
2606
2607  static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2608    if (!value.getInt()) return value.getPointer();
2609    return CGF.Builder.CreateLoad(value.getPointer());
2610  }
2611};
2612
2613/// A partial specialization of DominatingValue for llvm::Values that
2614/// might be llvm::Instructions.
2615template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2616  typedef T *type;
2617  static type restore(CodeGenFunction &CGF, saved_type value) {
2618    return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2619  }
2620};
2621
2622/// A specialization of DominatingValue for RValue.
2623template <> struct DominatingValue<RValue> {
2624  typedef RValue type;
2625  class saved_type {
2626    enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2627                AggregateAddress, ComplexAddress };
2628
2629    llvm::Value *Value;
2630    Kind K;
2631    saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2632
2633  public:
2634    static bool needsSaving(RValue value);
2635    static saved_type save(CodeGenFunction &CGF, RValue value);
2636    RValue restore(CodeGenFunction &CGF);
2637
2638    // implementations in CGExprCXX.cpp
2639  };
2640
2641  static bool needsSaving(type value) {
2642    return saved_type::needsSaving(value);
2643  }
2644  static saved_type save(CodeGenFunction &CGF, type value) {
2645    return saved_type::save(CGF, value);
2646  }
2647  static type restore(CodeGenFunction &CGF, saved_type value) {
2648    return value.restore(CGF);
2649  }
2650};
2651
2652}  // end namespace CodeGen
2653}  // end namespace clang
2654
2655#endif
2656