CGCall.cpp revision 72564e73277e29f6db3305d1f27ba408abb7ed88
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/RecordLayout.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/Attributes.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/MathExtras.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/Target/TargetData.h"
29
30#include "ABIInfo.h"
31
32using namespace clang;
33using namespace CodeGen;
34
35/***/
36
37// FIXME: Use iterator and sidestep silly type array creation.
38
39const
40CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
41  return getFunctionInfo(FTNP->getResultType(),
42                         llvm::SmallVector<QualType, 16>());
43}
44
45const
46CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
47  llvm::SmallVector<QualType, 16> ArgTys;
48  // FIXME: Kill copy.
49  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
50    ArgTys.push_back(FTP->getArgType(i));
51  return getFunctionInfo(FTP->getResultType(), ArgTys);
52}
53
54const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
55  const FunctionType *FTy = FD->getType()->getAsFunctionType();
56  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
57    return getFunctionInfo(FTP);
58  return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
59}
60
61const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
62  llvm::SmallVector<QualType, 16> ArgTys;
63  ArgTys.push_back(MD->getSelfDecl()->getType());
64  ArgTys.push_back(Context.getObjCSelType());
65  // FIXME: Kill copy?
66  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
67         e = MD->param_end(); i != e; ++i)
68    ArgTys.push_back((*i)->getType());
69  return getFunctionInfo(MD->getResultType(), ArgTys);
70}
71
72const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
73                                                    const CallArgList &Args) {
74  // FIXME: Kill copy.
75  llvm::SmallVector<QualType, 16> ArgTys;
76  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
77       i != e; ++i)
78    ArgTys.push_back(i->second);
79  return getFunctionInfo(ResTy, ArgTys);
80}
81
82const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
83                                                  const FunctionArgList &Args) {
84  // FIXME: Kill copy.
85  llvm::SmallVector<QualType, 16> ArgTys;
86  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
87       i != e; ++i)
88    ArgTys.push_back(i->second);
89  return getFunctionInfo(ResTy, ArgTys);
90}
91
92const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
93                               const llvm::SmallVector<QualType, 16> &ArgTys) {
94  // Lookup or create unique function info.
95  llvm::FoldingSetNodeID ID;
96  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
97
98  void *InsertPos = 0;
99  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
100  if (FI)
101    return *FI;
102
103  // Construct the function info.
104  FI = new CGFunctionInfo(ResTy, ArgTys);
105  FunctionInfos.InsertNode(FI, InsertPos);
106
107  // Compute ABI information.
108  getABIInfo().computeInfo(*FI, getContext());
109
110  return *FI;
111}
112
113/***/
114
115ABIInfo::~ABIInfo() {}
116
117void ABIArgInfo::dump() const {
118  fprintf(stderr, "(ABIArgInfo Kind=");
119  switch (TheKind) {
120  case Direct:
121    fprintf(stderr, "Direct");
122    break;
123  case Ignore:
124    fprintf(stderr, "Ignore");
125    break;
126  case Coerce:
127    fprintf(stderr, "Coerce Type=");
128    getCoerceToType()->print(llvm::errs());
129    // FIXME: This is ridiculous.
130    llvm::errs().flush();
131    break;
132  case Indirect:
133    fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
134    break;
135  case Expand:
136    fprintf(stderr, "Expand");
137    break;
138  }
139  fprintf(stderr, ")\n");
140}
141
142/***/
143
144/// isEmptyStruct - Return true iff a structure has no non-empty
145/// members. Note that a structure with a flexible array member is not
146/// considered empty.
147static bool isEmptyStruct(QualType T) {
148  const RecordType *RT = T->getAsStructureType();
149  if (!RT)
150    return 0;
151  const RecordDecl *RD = RT->getDecl();
152  if (RD->hasFlexibleArrayMember())
153    return false;
154  for (RecordDecl::field_iterator i = RD->field_begin(),
155         e = RD->field_end(); i != e; ++i) {
156    const FieldDecl *FD = *i;
157    if (!isEmptyStruct(FD->getType()))
158      return false;
159  }
160  return true;
161}
162
163/// isSingleElementStruct - Determine if a structure is a "single
164/// element struct", i.e. it has exactly one non-empty field or
165/// exactly one field which is itself a single element
166/// struct. Structures with flexible array members are never
167/// considered single element structs.
168///
169/// \return The field declaration for the single non-empty field, if
170/// it exists.
171static const FieldDecl *isSingleElementStruct(QualType T) {
172  const RecordType *RT = T->getAsStructureType();
173  if (!RT)
174    return 0;
175
176  const RecordDecl *RD = RT->getDecl();
177  if (RD->hasFlexibleArrayMember())
178    return 0;
179
180  const FieldDecl *Found = 0;
181  for (RecordDecl::field_iterator i = RD->field_begin(),
182         e = RD->field_end(); i != e; ++i) {
183    const FieldDecl *FD = *i;
184    QualType FT = FD->getType();
185
186    if (isEmptyStruct(FT)) {
187      // Ignore
188    } else if (Found) {
189      return 0;
190    } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
191      Found = FD;
192    } else {
193      Found = isSingleElementStruct(FT);
194      if (!Found)
195        return 0;
196    }
197  }
198
199  return Found;
200}
201
202static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
203  if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
204    return false;
205
206  uint64_t Size = Context.getTypeSize(Ty);
207  return Size == 32 || Size == 64;
208}
209
210static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
211                                           ASTContext &Context) {
212  for (RecordDecl::field_iterator i = RD->field_begin(),
213         e = RD->field_end(); i != e; ++i) {
214    const FieldDecl *FD = *i;
215
216    if (!is32Or64BitBasicType(FD->getType(), Context))
217      return false;
218
219    // If this is a bit-field we need to make sure it is still a
220    // 32-bit or 64-bit type.
221    if (Expr *BW = FD->getBitWidth()) {
222      unsigned Width = BW->getIntegerConstantExprValue(Context).getZExtValue();
223      if (Width <= 16)
224        return false;
225    }
226  }
227  return true;
228}
229
230namespace {
231/// DefaultABIInfo - The default implementation for ABI specific
232/// details. This implementation provides information which results in
233/// self-consistent and sensible LLVM IR generation, but does not
234/// conform to any particular ABI.
235class DefaultABIInfo : public ABIInfo {
236  ABIArgInfo classifyReturnType(QualType RetTy,
237                                ASTContext &Context) const;
238
239  ABIArgInfo classifyArgumentType(QualType RetTy,
240                                  ASTContext &Context) const;
241
242  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
243    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
244    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
245         it != ie; ++it)
246      it->info = classifyArgumentType(it->type, Context);
247  }
248
249  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
250                                 CodeGenFunction &CGF) const;
251};
252
253/// X86_32ABIInfo - The X86-32 ABI information.
254class X86_32ABIInfo : public ABIInfo {
255public:
256  ABIArgInfo classifyReturnType(QualType RetTy,
257                                ASTContext &Context) const;
258
259  ABIArgInfo classifyArgumentType(QualType RetTy,
260                                  ASTContext &Context) const;
261
262  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
263    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
264    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
265         it != ie; ++it)
266      it->info = classifyArgumentType(it->type, Context);
267  }
268
269  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
270                                 CodeGenFunction &CGF) const;
271};
272}
273
274ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
275                                            ASTContext &Context) const {
276  if (RetTy->isVoidType()) {
277    return ABIArgInfo::getIgnore();
278  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
279    // Classify "single element" structs as their element type.
280    const FieldDecl *SeltFD = isSingleElementStruct(RetTy);
281    if (SeltFD) {
282      QualType SeltTy = SeltFD->getType()->getDesugaredType();
283      if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
284        // FIXME: This is gross, it would be nice if we could just
285        // pass back SeltTy and have clients deal with it. Is it worth
286        // supporting coerce to both LLVM and clang Types?
287        if (BT->isIntegerType()) {
288          uint64_t Size = Context.getTypeSize(SeltTy);
289          return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
290        } else if (BT->getKind() == BuiltinType::Float) {
291          return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
292        } else if (BT->getKind() == BuiltinType::Double) {
293          return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
294        }
295      } else if (SeltTy->isPointerType()) {
296        // FIXME: It would be really nice if this could come out as
297        // the proper pointer type.
298        llvm::Type *PtrTy =
299          llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
300        return ABIArgInfo::getCoerce(PtrTy);
301      }
302    }
303
304    uint64_t Size = Context.getTypeSize(RetTy);
305    if (Size == 8) {
306      return ABIArgInfo::getCoerce(llvm::Type::Int8Ty);
307    } else if (Size == 16) {
308      return ABIArgInfo::getCoerce(llvm::Type::Int16Ty);
309    } else if (Size == 32) {
310      return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
311    } else if (Size == 64) {
312      return ABIArgInfo::getCoerce(llvm::Type::Int64Ty);
313    } else {
314      return ABIArgInfo::getIndirect(0);
315    }
316  } else {
317    return ABIArgInfo::getDirect();
318  }
319}
320
321ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
322                                               ASTContext &Context) const {
323  // FIXME: Set alignment on indirect arguments.
324  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
325    // Structures with flexible arrays are always indirect.
326    if (const RecordType *RT = Ty->getAsStructureType())
327      if (RT->getDecl()->hasFlexibleArrayMember())
328        return ABIArgInfo::getIndirect(0);
329
330    // Ignore empty structs.
331    uint64_t Size = Context.getTypeSize(Ty);
332    if (Ty->isStructureType() && Size == 0)
333      return ABIArgInfo::getIgnore();
334
335    // Expand structs with size <= 128-bits which consist only of
336    // basic types (int, long long, float, double, xxx*). This is
337    // non-recursive and does not ignore empty fields.
338    if (const RecordType *RT = Ty->getAsStructureType()) {
339      if (Context.getTypeSize(Ty) <= 4*32 &&
340          areAllFields32Or64BitBasicType(RT->getDecl(), Context))
341        return ABIArgInfo::getExpand();
342    }
343
344    return ABIArgInfo::getIndirect(0);
345  } else {
346    return ABIArgInfo::getDirect();
347  }
348}
349
350llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
351                                      CodeGenFunction &CGF) const {
352  const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
353  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
354
355  CGBuilderTy &Builder = CGF.Builder;
356  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
357                                                       "ap");
358  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
359  llvm::Type *PTy =
360    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
361  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
362
363  uint64_t Offset =
364    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
365  llvm::Value *NextAddr =
366    Builder.CreateGEP(Addr,
367                      llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
368                      "ap.next");
369  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
370
371  return AddrTyped;
372}
373
374namespace {
375/// X86_64ABIInfo - The X86_64 ABI information.
376class X86_64ABIInfo : public ABIInfo {
377  enum Class {
378    Integer = 0,
379    SSE,
380    SSEUp,
381    X87,
382    X87Up,
383    ComplexX87,
384    NoClass,
385    Memory
386  };
387
388  /// merge - Implement the X86_64 ABI merging algorithm.
389  ///
390  /// Merge an accumulating classification \arg Accum with a field
391  /// classification \arg Field.
392  ///
393  /// \param Accum - The accumulating classification. This should
394  /// always be either NoClass or the result of a previous merge
395  /// call. In addition, this should never be Memory (the caller
396  /// should just return Memory for the aggregate).
397  Class merge(Class Accum, Class Field) const;
398
399  /// classify - Determine the x86_64 register classes in which the
400  /// given type T should be passed.
401  ///
402  /// \param Lo - The classification for the parts of the type
403  /// residing in the low word of the containing object.
404  ///
405  /// \param Hi - The classification for the parts of the type
406  /// residing in the high word of the containing object.
407  ///
408  /// \param OffsetBase - The bit offset of this type in the
409  /// containing object.  Some parameters are classified different
410  /// depending on whether they straddle an eightbyte boundary.
411  ///
412  /// If a word is unused its result will be NoClass; if a type should
413  /// be passed in Memory then at least the classification of \arg Lo
414  /// will be Memory.
415  ///
416  /// The \arg Lo class will be NoClass iff the argument is ignored.
417  ///
418  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
419  /// also be ComplexX87.
420  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
421                Class &Lo, Class &Hi) const;
422
423  /// getCoerceResult - Given a source type \arg Ty and an LLVM type
424  /// to coerce to, chose the best way to pass Ty in the same place
425  /// that \arg CoerceTo would be passed, but while keeping the
426  /// emitted code as simple as possible.
427  ///
428  /// FIXME: Note, this should be cleaned up to just take an
429  /// enumeration of all the ways we might want to pass things,
430  /// instead of constructing an LLVM type. This makes this code more
431  /// explicit, and it makes it clearer that we are also doing this
432  /// for correctness in the case of passing scalar types.
433  ABIArgInfo getCoerceResult(QualType Ty,
434                             const llvm::Type *CoerceTo,
435                             ASTContext &Context) const;
436
437  ABIArgInfo classifyReturnType(QualType RetTy,
438                                ASTContext &Context) const;
439
440  ABIArgInfo classifyArgumentType(QualType Ty,
441                                  ASTContext &Context,
442                                  unsigned &neededInt,
443                                  unsigned &neededSSE) const;
444
445public:
446  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
447
448  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
449                                 CodeGenFunction &CGF) const;
450};
451}
452
453X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
454                                          Class Field) const {
455  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
456  // classified recursively so that always two fields are
457  // considered. The resulting class is calculated according to
458  // the classes of the fields in the eightbyte:
459  //
460  // (a) If both classes are equal, this is the resulting class.
461  //
462  // (b) If one of the classes is NO_CLASS, the resulting class is
463  // the other class.
464  //
465  // (c) If one of the classes is MEMORY, the result is the MEMORY
466  // class.
467  //
468  // (d) If one of the classes is INTEGER, the result is the
469  // INTEGER.
470  //
471  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
472  // MEMORY is used as class.
473  //
474  // (f) Otherwise class SSE is used.
475  assert((Accum == NoClass || Accum == Integer ||
476          Accum == SSE || Accum == SSEUp) &&
477         "Invalid accumulated classification during merge.");
478  if (Accum == Field || Field == NoClass)
479    return Accum;
480  else if (Field == Memory)
481    return Memory;
482  else if (Accum == NoClass)
483    return Field;
484  else if (Accum == Integer || Field == Integer)
485    return Integer;
486  else if (Field == X87 || Field == X87Up || Field == ComplexX87)
487    return Memory;
488  else
489    return SSE;
490}
491
492void X86_64ABIInfo::classify(QualType Ty,
493                             ASTContext &Context,
494                             uint64_t OffsetBase,
495                             Class &Lo, Class &Hi) const {
496  // FIXME: This code can be simplified by introducing a simple value
497  // class for Class pairs with appropriate constructor methods for
498  // the various situations.
499
500  // FIXME: Some of the split computations are wrong; unaligned
501  // vectors shouldn't be passed in registers for example, so there is
502  // no chance they can straddle an eightbyte. Verify & simplify.
503
504  Lo = Hi = NoClass;
505
506  Class &Current = OffsetBase < 64 ? Lo : Hi;
507  Current = Memory;
508
509  if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
510    BuiltinType::Kind k = BT->getKind();
511
512    if (k == BuiltinType::Void) {
513      Current = NoClass;
514    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
515      Current = Integer;
516    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
517      Current = SSE;
518    } else if (k == BuiltinType::LongDouble) {
519      Lo = X87;
520      Hi = X87Up;
521    }
522    // FIXME: _Decimal32 and _Decimal64 are SSE.
523    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
524    // FIXME: __int128 is (Integer, Integer).
525  } else if (const EnumType *ET = Ty->getAsEnumType()) {
526    // Classify the underlying integer type.
527    classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
528  } else if (Ty->hasPointerRepresentation()) {
529    Current = Integer;
530  } else if (const VectorType *VT = Ty->getAsVectorType()) {
531    uint64_t Size = Context.getTypeSize(VT);
532    if (Size == 32) {
533      // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
534      // float> as integer.
535      Current = Integer;
536
537      // If this type crosses an eightbyte boundary, it should be
538      // split.
539      uint64_t EB_Real = (OffsetBase) / 64;
540      uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
541      if (EB_Real != EB_Imag)
542        Hi = Lo;
543    } else if (Size == 64) {
544      // gcc passes <1 x double> in memory. :(
545      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
546        return;
547
548      // gcc passes <1 x long long> as INTEGER.
549      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
550        Current = Integer;
551      else
552        Current = SSE;
553
554      // If this type crosses an eightbyte boundary, it should be
555      // split.
556      if (OffsetBase && OffsetBase != 64)
557        Hi = Lo;
558    } else if (Size == 128) {
559      Lo = SSE;
560      Hi = SSEUp;
561    }
562  } else if (const ComplexType *CT = Ty->getAsComplexType()) {
563    QualType ET = Context.getCanonicalType(CT->getElementType());
564
565    uint64_t Size = Context.getTypeSize(Ty);
566    if (ET->isIntegralType()) {
567      if (Size <= 64)
568        Current = Integer;
569      else if (Size <= 128)
570        Lo = Hi = Integer;
571    } else if (ET == Context.FloatTy)
572      Current = SSE;
573    else if (ET == Context.DoubleTy)
574      Lo = Hi = SSE;
575    else if (ET == Context.LongDoubleTy)
576      Current = ComplexX87;
577
578    // If this complex type crosses an eightbyte boundary then it
579    // should be split.
580    uint64_t EB_Real = (OffsetBase) / 64;
581    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
582    if (Hi == NoClass && EB_Real != EB_Imag)
583      Hi = Lo;
584  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
585    // Arrays are treated like structures.
586
587    uint64_t Size = Context.getTypeSize(Ty);
588
589    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
590    // than two eightbytes, ..., it has class MEMORY.
591    if (Size > 128)
592      return;
593
594    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
595    // fields, it has class MEMORY.
596    //
597    // Only need to check alignment of array base.
598    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
599      return;
600
601    // Otherwise implement simplified merge. We could be smarter about
602    // this, but it isn't worth it and would be harder to verify.
603    Current = NoClass;
604    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
605    uint64_t ArraySize = AT->getSize().getZExtValue();
606    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
607      Class FieldLo, FieldHi;
608      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
609      Lo = merge(Lo, FieldLo);
610      Hi = merge(Hi, FieldHi);
611      if (Lo == Memory || Hi == Memory)
612        break;
613    }
614
615    // Do post merger cleanup (see below). Only case we worry about is Memory.
616    if (Hi == Memory)
617      Lo = Memory;
618    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
619  } else if (const RecordType *RT = Ty->getAsRecordType()) {
620    uint64_t Size = Context.getTypeSize(Ty);
621
622    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
623    // than two eightbytes, ..., it has class MEMORY.
624    if (Size > 128)
625      return;
626
627    const RecordDecl *RD = RT->getDecl();
628
629    // Assume variable sized types are passed in memory.
630    if (RD->hasFlexibleArrayMember())
631      return;
632
633    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
634
635    // Reset Lo class, this will be recomputed.
636    Current = NoClass;
637    unsigned idx = 0;
638    for (RecordDecl::field_iterator i = RD->field_begin(),
639           e = RD->field_end(); i != e; ++i, ++idx) {
640      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
641      bool BitField = i->isBitField();
642
643      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
644      // fields, it has class MEMORY.
645      //
646      // Note, skip this test for bitfields, see below.
647      if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
648        Lo = Memory;
649        return;
650      }
651
652      // Classify this field.
653      //
654      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
655      // exceeds a single eightbyte, each is classified
656      // separately. Each eightbyte gets initialized to class
657      // NO_CLASS.
658      Class FieldLo, FieldHi;
659
660      // Bitfields require special handling, they do not force the
661      // structure to be passed in memory even if unaligned, and
662      // therefore they can straddle an eightbyte.
663      if (BitField) {
664        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
665        uint64_t Size =
666          i->getBitWidth()->getIntegerConstantExprValue(Context).getZExtValue();
667
668        uint64_t EB_Lo = Offset / 64;
669        uint64_t EB_Hi = (Offset + Size - 1) / 64;
670        FieldLo = FieldHi = NoClass;
671        if (EB_Lo) {
672          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
673          FieldLo = NoClass;
674          FieldHi = Integer;
675        } else {
676          FieldLo = Integer;
677          FieldHi = EB_Hi ? Integer : NoClass;
678        }
679      } else
680        classify(i->getType(), Context, Offset, FieldLo, FieldHi);
681      Lo = merge(Lo, FieldLo);
682      Hi = merge(Hi, FieldHi);
683      if (Lo == Memory || Hi == Memory)
684        break;
685    }
686
687    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
688    //
689    // (a) If one of the classes is MEMORY, the whole argument is
690    // passed in memory.
691    //
692    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
693
694    // The first of these conditions is guaranteed by how we implement
695    // the merge (just bail).
696    //
697    // The second condition occurs in the case of unions; for example
698    // union { _Complex double; unsigned; }.
699    if (Hi == Memory)
700      Lo = Memory;
701    if (Hi == SSEUp && Lo != SSE)
702      Hi = SSE;
703  }
704}
705
706ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
707                                          const llvm::Type *CoerceTo,
708                                          ASTContext &Context) const {
709  if (CoerceTo == llvm::Type::Int64Ty) {
710    // Integer and pointer types will end up in a general purpose
711    // register.
712    if (Ty->isIntegralType() || Ty->isPointerType())
713      return ABIArgInfo::getDirect();
714
715  } else if (CoerceTo == llvm::Type::DoubleTy) {
716    // FIXME: It would probably be better to make CGFunctionInfo only
717    // map using canonical types than to canonize here.
718    QualType CTy = Context.getCanonicalType(Ty);
719
720    // Float and double end up in a single SSE reg.
721    if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
722      return ABIArgInfo::getDirect();
723
724  }
725
726  return ABIArgInfo::getCoerce(CoerceTo);
727}
728
729ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
730                                            ASTContext &Context) const {
731  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
732  // classification algorithm.
733  X86_64ABIInfo::Class Lo, Hi;
734  classify(RetTy, Context, 0, Lo, Hi);
735
736  // Check some invariants.
737  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
738  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
739  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
740
741  const llvm::Type *ResType = 0;
742  switch (Lo) {
743  case NoClass:
744    return ABIArgInfo::getIgnore();
745
746  case SSEUp:
747  case X87Up:
748    assert(0 && "Invalid classification for lo word.");
749
750    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
751    // hidden argument.
752  case Memory:
753    return ABIArgInfo::getIndirect(0);
754
755    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
756    // available register of the sequence %rax, %rdx is used.
757  case Integer:
758    ResType = llvm::Type::Int64Ty; break;
759
760    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
761    // available SSE register of the sequence %xmm0, %xmm1 is used.
762  case SSE:
763    ResType = llvm::Type::DoubleTy; break;
764
765    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
766    // returned on the X87 stack in %st0 as 80-bit x87 number.
767  case X87:
768    ResType = llvm::Type::X86_FP80Ty; break;
769
770    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
771    // part of the value is returned in %st0 and the imaginary part in
772    // %st1.
773  case ComplexX87:
774    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
775    ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
776                                    llvm::Type::X86_FP80Ty,
777                                    NULL);
778    break;
779  }
780
781  switch (Hi) {
782    // Memory was handled previously and X87 should
783    // never occur as a hi class.
784  case Memory:
785  case X87:
786    assert(0 && "Invalid classification for hi word.");
787
788  case ComplexX87: // Previously handled.
789  case NoClass: break;
790
791  case Integer:
792    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
793    break;
794  case SSE:
795    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
796    break;
797
798    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
799    // is passed in the upper half of the last used SSE register.
800    //
801    // SSEUP should always be preceeded by SSE, just widen.
802  case SSEUp:
803    assert(Lo == SSE && "Unexpected SSEUp classification.");
804    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
805    break;
806
807    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
808    // returned together with the previous X87 value in %st0.
809    //
810    // X87UP should always be preceeded by X87, so we don't need to do
811    // anything here.
812  case X87Up:
813    assert(Lo == X87 && "Unexpected X87Up classification.");
814    break;
815  }
816
817  return getCoerceResult(RetTy, ResType, Context);
818}
819
820ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
821                                               unsigned &neededInt,
822                                               unsigned &neededSSE) const {
823  X86_64ABIInfo::Class Lo, Hi;
824  classify(Ty, Context, 0, Lo, Hi);
825
826  // Check some invariants.
827  // FIXME: Enforce these by construction.
828  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
829  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
830  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
831
832  neededInt = 0;
833  neededSSE = 0;
834  const llvm::Type *ResType = 0;
835  switch (Lo) {
836  case NoClass:
837    return ABIArgInfo::getIgnore();
838
839    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
840    // on the stack.
841  case Memory:
842
843    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
844    // COMPLEX_X87, it is passed in memory.
845  case X87:
846  case ComplexX87:
847    return ABIArgInfo::getIndirect(0);
848
849  case SSEUp:
850  case X87Up:
851    assert(0 && "Invalid classification for lo word.");
852
853    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
854    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
855    // and %r9 is used.
856  case Integer:
857    ++neededInt;
858    ResType = llvm::Type::Int64Ty;
859    break;
860
861    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
862    // available SSE register is used, the registers are taken in the
863    // order from %xmm0 to %xmm7.
864  case SSE:
865    ++neededSSE;
866    ResType = llvm::Type::DoubleTy;
867    break;
868  }
869
870  switch (Hi) {
871    // Memory was handled previously, ComplexX87 and X87 should
872    // never occur as hi classes, and X87Up must be preceed by X87,
873    // which is passed in memory.
874  case Memory:
875  case X87:
876  case X87Up:
877  case ComplexX87:
878    assert(0 && "Invalid classification for hi word.");
879
880  case NoClass: break;
881  case Integer:
882    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
883    ++neededInt;
884    break;
885  case SSE:
886    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
887    ++neededSSE;
888    break;
889
890    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
891    // eightbyte is passed in the upper half of the last used SSE
892    // register.
893  case SSEUp:
894    assert(Lo == SSE && "Unexpected SSEUp classification.");
895    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
896    break;
897  }
898
899  return getCoerceResult(Ty, ResType, Context);
900}
901
902void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
903  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
904
905  // Keep track of the number of assigned registers.
906  unsigned freeIntRegs = 6, freeSSERegs = 8;
907
908  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
909  // get assigned (in left-to-right order) for passing as follows...
910  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
911       it != ie; ++it) {
912    unsigned neededInt, neededSSE;
913    it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
914
915    // AMD64-ABI 3.2.3p3: If there are no registers available for any
916    // eightbyte of an argument, the whole argument is passed on the
917    // stack. If registers have already been assigned for some
918    // eightbytes of such an argument, the assignments get reverted.
919    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
920      freeIntRegs -= neededInt;
921      freeSSERegs -= neededSSE;
922    } else {
923      it->info = ABIArgInfo::getIndirect(0);
924    }
925  }
926}
927
928static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
929                                        QualType Ty,
930                                        CodeGenFunction &CGF) {
931  llvm::Value *overflow_arg_area_p =
932    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
933  llvm::Value *overflow_arg_area =
934    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
935
936  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
937  // byte boundary if alignment needed by type exceeds 8 byte boundary.
938  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
939  if (Align > 8) {
940    // Note that we follow the ABI & gcc here, even though the type
941    // could in theory have an alignment greater than 16. This case
942    // shouldn't ever matter in practice.
943
944    // overflow_arg_area = (overflow_arg_area + 15) & ~15;
945    llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
946    overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
947    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
948                                                    llvm::Type::Int64Ty);
949    llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
950    overflow_arg_area =
951      CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
952                                 overflow_arg_area->getType(),
953                                 "overflow_arg_area.align");
954  }
955
956  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
957  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
958  llvm::Value *Res =
959    CGF.Builder.CreateBitCast(overflow_arg_area,
960                              llvm::PointerType::getUnqual(LTy));
961
962  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
963  // l->overflow_arg_area + sizeof(type).
964  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
965  // an 8 byte boundary.
966
967  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
968  llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
969                                               (SizeInBytes + 7)  & ~7);
970  overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
971                                            "overflow_arg_area.next");
972  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
973
974  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
975  return Res;
976}
977
978llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
979                                      CodeGenFunction &CGF) const {
980  // Assume that va_list type is correct; should be pointer to LLVM type:
981  // struct {
982  //   i32 gp_offset;
983  //   i32 fp_offset;
984  //   i8* overflow_arg_area;
985  //   i8* reg_save_area;
986  // };
987  unsigned neededInt, neededSSE;
988  ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
989                                       neededInt, neededSSE);
990
991  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
992  // in the registers. If not go to step 7.
993  if (!neededInt && !neededSSE)
994    return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
995
996  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
997  // general purpose registers needed to pass type and num_fp to hold
998  // the number of floating point registers needed.
999
1000  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1001  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1002  // l->fp_offset > 304 - num_fp * 16 go to step 7.
1003  //
1004  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1005  // register save space).
1006
1007  llvm::Value *InRegs = 0;
1008  llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1009  llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1010  if (neededInt) {
1011    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1012    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1013    InRegs =
1014      CGF.Builder.CreateICmpULE(gp_offset,
1015                                llvm::ConstantInt::get(llvm::Type::Int32Ty,
1016                                                       48 - neededInt * 8),
1017                                "fits_in_gp");
1018  }
1019
1020  if (neededSSE) {
1021    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1022    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1023    llvm::Value *FitsInFP =
1024      CGF.Builder.CreateICmpULE(fp_offset,
1025                                llvm::ConstantInt::get(llvm::Type::Int32Ty,
1026                                                       176 - neededSSE * 16),
1027                                "fits_in_fp");
1028    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1029  }
1030
1031  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1032  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1033  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1034  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1035
1036  // Emit code to load the value if it was passed in registers.
1037
1038  CGF.EmitBlock(InRegBlock);
1039
1040  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1041  // an offset of l->gp_offset and/or l->fp_offset. This may require
1042  // copying to a temporary location in case the parameter is passed
1043  // in different register classes or requires an alignment greater
1044  // than 8 for general purpose registers and 16 for XMM registers.
1045  //
1046  // FIXME: This really results in shameful code when we end up
1047  // needing to collect arguments from different places; often what
1048  // should result in a simple assembling of a structure from
1049  // scattered addresses has many more loads than necessary. Can we
1050  // clean this up?
1051  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1052  llvm::Value *RegAddr =
1053    CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1054                           "reg_save_area");
1055  if (neededInt && neededSSE) {
1056    // FIXME: Cleanup.
1057    assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1058    const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1059    llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1060    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1061    const llvm::Type *TyLo = ST->getElementType(0);
1062    const llvm::Type *TyHi = ST->getElementType(1);
1063    assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1064           "Unexpected ABI info for mixed regs");
1065    const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1066    const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1067    llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1068    llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1069    llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1070    llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1071    llvm::Value *V =
1072      CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1073    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1074    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1075    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1076
1077    RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
1078  } else if (neededInt) {
1079    RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1080    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1081                                        llvm::PointerType::getUnqual(LTy));
1082  } else {
1083    if (neededSSE == 1) {
1084      RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1085      RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1086                                          llvm::PointerType::getUnqual(LTy));
1087    } else {
1088      assert(neededSSE == 2 && "Invalid number of needed registers!");
1089      // SSE registers are spaced 16 bytes apart in the register save
1090      // area, we need to collect the two eightbytes together.
1091      llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1092      llvm::Value *RegAddrHi =
1093        CGF.Builder.CreateGEP(RegAddrLo,
1094                              llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
1095      const llvm::Type *DblPtrTy =
1096        llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
1097      const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
1098                                                         llvm::Type::DoubleTy,
1099                                                         NULL);
1100      llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1101      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1102                                                           DblPtrTy));
1103      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1104      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1105                                                           DblPtrTy));
1106      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1107      RegAddr = CGF.Builder.CreateBitCast(Tmp,
1108                                          llvm::PointerType::getUnqual(LTy));
1109    }
1110  }
1111
1112  // AMD64-ABI 3.5.7p5: Step 5. Set:
1113  // l->gp_offset = l->gp_offset + num_gp * 8
1114  // l->fp_offset = l->fp_offset + num_fp * 16.
1115  if (neededInt) {
1116    llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1117                                                 neededInt * 8);
1118    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1119                            gp_offset_p);
1120  }
1121  if (neededSSE) {
1122    llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1123                                                 neededSSE * 16);
1124    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1125                            fp_offset_p);
1126  }
1127  CGF.EmitBranch(ContBlock);
1128
1129  // Emit code to load the value if it was passed in memory.
1130
1131  CGF.EmitBlock(InMemBlock);
1132  llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1133
1134  // Return the appropriate result.
1135
1136  CGF.EmitBlock(ContBlock);
1137  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1138                                                 "vaarg.addr");
1139  ResAddr->reserveOperandSpace(2);
1140  ResAddr->addIncoming(RegAddr, InRegBlock);
1141  ResAddr->addIncoming(MemAddr, InMemBlock);
1142
1143  return ResAddr;
1144}
1145
1146ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
1147                                              ASTContext &Context) const {
1148  if (RetTy->isVoidType()) {
1149    return ABIArgInfo::getIgnore();
1150  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1151    return ABIArgInfo::getIndirect(0);
1152  } else {
1153    return ABIArgInfo::getDirect();
1154  }
1155}
1156
1157ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
1158                                                ASTContext &Context) const {
1159  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1160    return ABIArgInfo::getIndirect(0);
1161  } else {
1162    return ABIArgInfo::getDirect();
1163  }
1164}
1165
1166llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1167                                       CodeGenFunction &CGF) const {
1168  return 0;
1169}
1170
1171const ABIInfo &CodeGenTypes::getABIInfo() const {
1172  if (TheABIInfo)
1173    return *TheABIInfo;
1174
1175  // For now we just cache this in the CodeGenTypes and don't bother
1176  // to free it.
1177  const char *TargetPrefix = getContext().Target.getTargetPrefix();
1178  if (strcmp(TargetPrefix, "x86") == 0) {
1179    switch (getContext().Target.getPointerWidth(0)) {
1180    case 32:
1181      return *(TheABIInfo = new X86_32ABIInfo());
1182    case 64:
1183      return *(TheABIInfo = new X86_64ABIInfo());
1184    }
1185  }
1186
1187  return *(TheABIInfo = new DefaultABIInfo);
1188}
1189
1190/***/
1191
1192CGFunctionInfo::CGFunctionInfo(QualType ResTy,
1193                               const llvm::SmallVector<QualType, 16> &ArgTys) {
1194  NumArgs = ArgTys.size();
1195  Args = new ArgInfo[1 + NumArgs];
1196  Args[0].type = ResTy;
1197  for (unsigned i = 0; i < NumArgs; ++i)
1198    Args[1 + i].type = ArgTys[i];
1199}
1200
1201/***/
1202
1203void CodeGenTypes::GetExpandedTypes(QualType Ty,
1204                                    std::vector<const llvm::Type*> &ArgTys) {
1205  const RecordType *RT = Ty->getAsStructureType();
1206  assert(RT && "Can only expand structure types.");
1207  const RecordDecl *RD = RT->getDecl();
1208  assert(!RD->hasFlexibleArrayMember() &&
1209         "Cannot expand structure with flexible array.");
1210
1211  for (RecordDecl::field_iterator i = RD->field_begin(),
1212         e = RD->field_end(); i != e; ++i) {
1213    const FieldDecl *FD = *i;
1214    assert(!FD->isBitField() &&
1215           "Cannot expand structure with bit-field members.");
1216
1217    QualType FT = FD->getType();
1218    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1219      GetExpandedTypes(FT, ArgTys);
1220    } else {
1221      ArgTys.push_back(ConvertType(FT));
1222    }
1223  }
1224}
1225
1226llvm::Function::arg_iterator
1227CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1228                                    llvm::Function::arg_iterator AI) {
1229  const RecordType *RT = Ty->getAsStructureType();
1230  assert(RT && "Can only expand structure types.");
1231
1232  RecordDecl *RD = RT->getDecl();
1233  assert(LV.isSimple() &&
1234         "Unexpected non-simple lvalue during struct expansion.");
1235  llvm::Value *Addr = LV.getAddress();
1236  for (RecordDecl::field_iterator i = RD->field_begin(),
1237         e = RD->field_end(); i != e; ++i) {
1238    FieldDecl *FD = *i;
1239    QualType FT = FD->getType();
1240
1241    // FIXME: What are the right qualifiers here?
1242    LValue LV = EmitLValueForField(Addr, FD, false, 0);
1243    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1244      AI = ExpandTypeFromArgs(FT, LV, AI);
1245    } else {
1246      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
1247      ++AI;
1248    }
1249  }
1250
1251  return AI;
1252}
1253
1254void
1255CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1256                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
1257  const RecordType *RT = Ty->getAsStructureType();
1258  assert(RT && "Can only expand structure types.");
1259
1260  RecordDecl *RD = RT->getDecl();
1261  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1262  llvm::Value *Addr = RV.getAggregateAddr();
1263  for (RecordDecl::field_iterator i = RD->field_begin(),
1264         e = RD->field_end(); i != e; ++i) {
1265    FieldDecl *FD = *i;
1266    QualType FT = FD->getType();
1267
1268    // FIXME: What are the right qualifiers here?
1269    LValue LV = EmitLValueForField(Addr, FD, false, 0);
1270    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1271      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
1272    } else {
1273      RValue RV = EmitLoadOfLValue(LV, FT);
1274      assert(RV.isScalar() &&
1275             "Unexpected non-scalar rvalue during struct expansion.");
1276      Args.push_back(RV.getScalarVal());
1277    }
1278  }
1279}
1280
1281/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1282/// a pointer to an object of type \arg Ty.
1283///
1284/// This safely handles the case when the src type is smaller than the
1285/// destination type; in this situation the values of bits which not
1286/// present in the src are undefined.
1287static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
1288                                      const llvm::Type *Ty,
1289                                      CodeGenFunction &CGF) {
1290  const llvm::Type *SrcTy =
1291    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
1292  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1293  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty);
1294
1295  // If load is legal, just bitcast the src pointer.
1296  if (SrcSize == DstSize) {
1297    llvm::Value *Casted =
1298      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
1299    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
1300    // FIXME: Use better alignment / avoid requiring aligned load.
1301    Load->setAlignment(1);
1302    return Load;
1303  } else {
1304    assert(SrcSize < DstSize && "Coercion is losing source bits!");
1305
1306    // Otherwise do coercion through memory. This is stupid, but
1307    // simple.
1308    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
1309    llvm::Value *Casted =
1310      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
1311    llvm::StoreInst *Store =
1312      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
1313    // FIXME: Use better alignment / avoid requiring aligned store.
1314    Store->setAlignment(1);
1315    return CGF.Builder.CreateLoad(Tmp);
1316  }
1317}
1318
1319/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1320/// where the source and destination may have different types.
1321///
1322/// This safely handles the case when the src type is larger than the
1323/// destination type; the upper bits of the src will be lost.
1324static void CreateCoercedStore(llvm::Value *Src,
1325                               llvm::Value *DstPtr,
1326                               CodeGenFunction &CGF) {
1327  const llvm::Type *SrcTy = Src->getType();
1328  const llvm::Type *DstTy =
1329    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1330
1331  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1332  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy);
1333
1334  // If store is legal, just bitcast the src pointer.
1335  if (SrcSize == DstSize) {
1336    llvm::Value *Casted =
1337      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1338    // FIXME: Use better alignment / avoid requiring aligned store.
1339    CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
1340  } else {
1341    assert(SrcSize > DstSize && "Coercion is missing bits!");
1342
1343    // Otherwise do coercion through memory. This is stupid, but
1344    // simple.
1345    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1346    CGF.Builder.CreateStore(Src, Tmp);
1347    llvm::Value *Casted =
1348      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
1349    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
1350    // FIXME: Use better alignment / avoid requiring aligned load.
1351    Load->setAlignment(1);
1352    CGF.Builder.CreateStore(Load, DstPtr);
1353  }
1354}
1355
1356/***/
1357
1358bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
1359  return FI.getReturnInfo().isIndirect();
1360}
1361
1362const llvm::FunctionType *
1363CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
1364  std::vector<const llvm::Type*> ArgTys;
1365
1366  const llvm::Type *ResultType = 0;
1367
1368  QualType RetTy = FI.getReturnType();
1369  const ABIArgInfo &RetAI = FI.getReturnInfo();
1370  switch (RetAI.getKind()) {
1371  case ABIArgInfo::Expand:
1372    assert(0 && "Invalid ABI kind for return argument");
1373
1374  case ABIArgInfo::Direct:
1375    ResultType = ConvertType(RetTy);
1376    break;
1377
1378  case ABIArgInfo::Indirect: {
1379    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
1380    ResultType = llvm::Type::VoidTy;
1381    const llvm::Type *STy = ConvertType(RetTy);
1382    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
1383    break;
1384  }
1385
1386  case ABIArgInfo::Ignore:
1387    ResultType = llvm::Type::VoidTy;
1388    break;
1389
1390  case ABIArgInfo::Coerce:
1391    ResultType = RetAI.getCoerceToType();
1392    break;
1393  }
1394
1395  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1396         ie = FI.arg_end(); it != ie; ++it) {
1397    const ABIArgInfo &AI = it->info;
1398
1399    switch (AI.getKind()) {
1400    case ABIArgInfo::Ignore:
1401      break;
1402
1403    case ABIArgInfo::Coerce:
1404      ArgTys.push_back(AI.getCoerceToType());
1405      break;
1406
1407    case ABIArgInfo::Indirect: {
1408      // indirect arguments are always on the stack, which is addr space #0.
1409      const llvm::Type *LTy = ConvertTypeForMem(it->type);
1410      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
1411      break;
1412    }
1413
1414    case ABIArgInfo::Direct:
1415      ArgTys.push_back(ConvertType(it->type));
1416      break;
1417
1418    case ABIArgInfo::Expand:
1419      GetExpandedTypes(it->type, ArgTys);
1420      break;
1421    }
1422  }
1423
1424  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
1425}
1426
1427void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1428                                           const Decl *TargetDecl,
1429                                           AttributeListType &PAL) {
1430  unsigned FuncAttrs = 0;
1431  unsigned RetAttrs = 0;
1432
1433  if (TargetDecl) {
1434    if (TargetDecl->getAttr<NoThrowAttr>())
1435      FuncAttrs |= llvm::Attribute::NoUnwind;
1436    if (TargetDecl->getAttr<NoReturnAttr>())
1437      FuncAttrs |= llvm::Attribute::NoReturn;
1438    if (TargetDecl->getAttr<PureAttr>())
1439      FuncAttrs |= llvm::Attribute::ReadOnly;
1440    if (TargetDecl->getAttr<ConstAttr>())
1441      FuncAttrs |= llvm::Attribute::ReadNone;
1442  }
1443
1444  QualType RetTy = FI.getReturnType();
1445  unsigned Index = 1;
1446  const ABIArgInfo &RetAI = FI.getReturnInfo();
1447  switch (RetAI.getKind()) {
1448  case ABIArgInfo::Direct:
1449    if (RetTy->isPromotableIntegerType()) {
1450      if (RetTy->isSignedIntegerType()) {
1451        RetAttrs |= llvm::Attribute::SExt;
1452      } else if (RetTy->isUnsignedIntegerType()) {
1453        RetAttrs |= llvm::Attribute::ZExt;
1454      }
1455    }
1456    break;
1457
1458  case ABIArgInfo::Indirect:
1459    PAL.push_back(llvm::AttributeWithIndex::get(Index,
1460                                                llvm::Attribute::StructRet |
1461                                                llvm::Attribute::NoAlias));
1462    ++Index;
1463    break;
1464
1465  case ABIArgInfo::Ignore:
1466  case ABIArgInfo::Coerce:
1467    break;
1468
1469  case ABIArgInfo::Expand:
1470    assert(0 && "Invalid ABI kind for return argument");
1471  }
1472
1473  if (RetAttrs)
1474    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
1475  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1476         ie = FI.arg_end(); it != ie; ++it) {
1477    QualType ParamType = it->type;
1478    const ABIArgInfo &AI = it->info;
1479    unsigned Attributes = 0;
1480
1481    switch (AI.getKind()) {
1482    case ABIArgInfo::Coerce:
1483      break;
1484
1485    case ABIArgInfo::Indirect:
1486      Attributes |= llvm::Attribute::ByVal;
1487      Attributes |=
1488        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
1489      break;
1490
1491    case ABIArgInfo::Direct:
1492      if (ParamType->isPromotableIntegerType()) {
1493        if (ParamType->isSignedIntegerType()) {
1494          Attributes |= llvm::Attribute::SExt;
1495        } else if (ParamType->isUnsignedIntegerType()) {
1496          Attributes |= llvm::Attribute::ZExt;
1497        }
1498      }
1499      break;
1500
1501    case ABIArgInfo::Ignore:
1502      // Skip increment, no matching LLVM parameter.
1503      continue;
1504
1505    case ABIArgInfo::Expand: {
1506      std::vector<const llvm::Type*> Tys;
1507      // FIXME: This is rather inefficient. Do we ever actually need
1508      // to do anything here? The result should be just reconstructed
1509      // on the other side, so extension should be a non-issue.
1510      getTypes().GetExpandedTypes(ParamType, Tys);
1511      Index += Tys.size();
1512      continue;
1513    }
1514    }
1515
1516    if (Attributes)
1517      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
1518    ++Index;
1519  }
1520  if (FuncAttrs)
1521    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1522}
1523
1524void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1525                                         llvm::Function *Fn,
1526                                         const FunctionArgList &Args) {
1527  // FIXME: We no longer need the types from FunctionArgList; lift up
1528  // and simplify.
1529
1530  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1531  llvm::Function::arg_iterator AI = Fn->arg_begin();
1532
1533  // Name the struct return argument.
1534  if (CGM.ReturnTypeUsesSret(FI)) {
1535    AI->setName("agg.result");
1536    ++AI;
1537  }
1538
1539  assert(FI.arg_size() == Args.size() &&
1540         "Mismatch between function signature & arguments.");
1541  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1542  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1543       i != e; ++i, ++info_it) {
1544    const VarDecl *Arg = i->first;
1545    QualType Ty = info_it->type;
1546    const ABIArgInfo &ArgI = info_it->info;
1547
1548    switch (ArgI.getKind()) {
1549    case ABIArgInfo::Indirect: {
1550      llvm::Value* V = AI;
1551      if (hasAggregateLLVMType(Ty)) {
1552        // Do nothing, aggregates and complex variables are accessed by
1553        // reference.
1554      } else {
1555        // Load scalar value from indirect argument.
1556        V = EmitLoadOfScalar(V, false, Ty);
1557        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1558          // This must be a promotion, for something like
1559          // "void a(x) short x; {..."
1560          V = EmitScalarConversion(V, Ty, Arg->getType());
1561        }
1562      }
1563      EmitParmDecl(*Arg, V);
1564      break;
1565    }
1566
1567    case ABIArgInfo::Direct: {
1568      assert(AI != Fn->arg_end() && "Argument mismatch!");
1569      llvm::Value* V = AI;
1570      if (hasAggregateLLVMType(Ty)) {
1571        // Create a temporary alloca to hold the argument; the rest of
1572        // codegen expects to access aggregates & complex values by
1573        // reference.
1574        V = CreateTempAlloca(ConvertTypeForMem(Ty));
1575        Builder.CreateStore(AI, V);
1576      } else {
1577        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1578          // This must be a promotion, for something like
1579          // "void a(x) short x; {..."
1580          V = EmitScalarConversion(V, Ty, Arg->getType());
1581        }
1582      }
1583      EmitParmDecl(*Arg, V);
1584      break;
1585    }
1586
1587    case ABIArgInfo::Expand: {
1588      // If this structure was expanded into multiple arguments then
1589      // we need to create a temporary and reconstruct it from the
1590      // arguments.
1591      std::string Name = Arg->getNameAsString();
1592      llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
1593                                           (Name + ".addr").c_str());
1594      // FIXME: What are the right qualifiers here?
1595      llvm::Function::arg_iterator End =
1596        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1597      EmitParmDecl(*Arg, Temp);
1598
1599      // Name the arguments used in expansion and increment AI.
1600      unsigned Index = 0;
1601      for (; AI != End; ++AI, ++Index)
1602        AI->setName(Name + "." + llvm::utostr(Index));
1603      continue;
1604    }
1605
1606    case ABIArgInfo::Ignore:
1607      // Initialize the local variable appropriately.
1608      if (hasAggregateLLVMType(Ty)) {
1609        EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
1610      } else {
1611        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
1612      }
1613
1614      // Skip increment, no matching LLVM parameter.
1615      continue;
1616
1617    case ABIArgInfo::Coerce: {
1618      assert(AI != Fn->arg_end() && "Argument mismatch!");
1619      // FIXME: This is very wasteful; EmitParmDecl is just going to
1620      // drop the result in a new alloca anyway, so we could just
1621      // store into that directly if we broke the abstraction down
1622      // more.
1623      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
1624      CreateCoercedStore(AI, V, *this);
1625      // Match to what EmitParmDecl is expecting for this type.
1626      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1627        V = EmitLoadOfScalar(V, false, Ty);
1628        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1629          // This must be a promotion, for something like
1630          // "void a(x) short x; {..."
1631          V = EmitScalarConversion(V, Ty, Arg->getType());
1632        }
1633      }
1634      EmitParmDecl(*Arg, V);
1635      break;
1636    }
1637    }
1638
1639    ++AI;
1640  }
1641  assert(AI == Fn->arg_end() && "Argument mismatch!");
1642}
1643
1644void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1645                                         llvm::Value *ReturnValue) {
1646  llvm::Value *RV = 0;
1647
1648  // Functions with no result always return void.
1649  if (ReturnValue) {
1650    QualType RetTy = FI.getReturnType();
1651    const ABIArgInfo &RetAI = FI.getReturnInfo();
1652
1653    switch (RetAI.getKind()) {
1654    case ABIArgInfo::Indirect:
1655      if (RetTy->isAnyComplexType()) {
1656        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1657        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1658      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1659        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1660      } else {
1661        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1662                          false);
1663      }
1664      break;
1665
1666    case ABIArgInfo::Direct:
1667      // The internal return value temp always will have
1668      // pointer-to-return-type type.
1669      RV = Builder.CreateLoad(ReturnValue);
1670      break;
1671
1672    case ABIArgInfo::Ignore:
1673      break;
1674
1675    case ABIArgInfo::Coerce:
1676      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1677      break;
1678
1679    case ABIArgInfo::Expand:
1680      assert(0 && "Invalid ABI kind for return argument");
1681    }
1682  }
1683
1684  if (RV) {
1685    Builder.CreateRet(RV);
1686  } else {
1687    Builder.CreateRetVoid();
1688  }
1689}
1690
1691RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1692                                 llvm::Value *Callee,
1693                                 const CallArgList &CallArgs,
1694                                 const Decl *TargetDecl) {
1695  // FIXME: We no longer need the types from CallArgs; lift up and
1696  // simplify.
1697  llvm::SmallVector<llvm::Value*, 16> Args;
1698
1699  // Handle struct-return functions by passing a pointer to the
1700  // location that we would like to return into.
1701  QualType RetTy = CallInfo.getReturnType();
1702  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1703  if (CGM.ReturnTypeUsesSret(CallInfo)) {
1704    // Create a temporary alloca to hold the result of the call. :(
1705    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
1706  }
1707
1708  assert(CallInfo.arg_size() == CallArgs.size() &&
1709         "Mismatch between function signature & arguments.");
1710  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1711  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1712       I != E; ++I, ++info_it) {
1713    const ABIArgInfo &ArgInfo = info_it->info;
1714    RValue RV = I->first;
1715
1716    switch (ArgInfo.getKind()) {
1717    case ABIArgInfo::Indirect:
1718      if (RV.isScalar() || RV.isComplex()) {
1719        // Make a temporary alloca to pass the argument.
1720        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
1721        if (RV.isScalar())
1722          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false);
1723        else
1724          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1725      } else {
1726        Args.push_back(RV.getAggregateAddr());
1727      }
1728      break;
1729
1730    case ABIArgInfo::Direct:
1731      if (RV.isScalar()) {
1732        Args.push_back(RV.getScalarVal());
1733      } else if (RV.isComplex()) {
1734        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
1735        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
1736        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
1737        Args.push_back(Tmp);
1738      } else {
1739        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1740      }
1741      break;
1742
1743    case ABIArgInfo::Ignore:
1744      break;
1745
1746    case ABIArgInfo::Coerce: {
1747      // FIXME: Avoid the conversion through memory if possible.
1748      llvm::Value *SrcPtr;
1749      if (RV.isScalar()) {
1750        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
1751        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false);
1752      } else if (RV.isComplex()) {
1753        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
1754        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1755      } else
1756        SrcPtr = RV.getAggregateAddr();
1757      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1758                                       *this));
1759      break;
1760    }
1761
1762    case ABIArgInfo::Expand:
1763      ExpandTypeToArgs(I->second, RV, Args);
1764      break;
1765    }
1766  }
1767
1768  llvm::BasicBlock *InvokeDest = getInvokeDest();
1769  CodeGen::AttributeListType AttributeList;
1770  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
1771  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1772                                                   AttributeList.end());
1773
1774  llvm::Instruction *CI;
1775  if (!InvokeDest || Attrs.getFnAttributes() & (llvm::Attribute::NoUnwind ||
1776                                                llvm::Attribute::NoReturn)) {
1777    llvm::CallInst *CallInstr =
1778      Builder.CreateCall(Callee, &Args[0], &Args[0]+Args.size());
1779    CI = CallInstr;
1780
1781    CallInstr->setAttributes(Attrs);
1782    if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1783      CallInstr->setCallingConv(F->getCallingConv());
1784
1785    // If the call doesn't return, finish the basic block and clear the
1786    // insertion point; this allows the rest of IRgen to discard
1787    // unreachable code.
1788    if (CallInstr->doesNotReturn()) {
1789      Builder.CreateUnreachable();
1790      Builder.ClearInsertionPoint();
1791
1792      // FIXME: For now, emit a dummy basic block because expr
1793      // emitters in generally are not ready to handle emitting
1794      // expressions at unreachable points.
1795      EnsureInsertPoint();
1796
1797      // Return a reasonable RValue.
1798      return GetUndefRValue(RetTy);
1799    }
1800  } else {
1801    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1802    llvm::InvokeInst *InvokeInstr =
1803      Builder.CreateInvoke(Callee, Cont, InvokeDest,
1804                           &Args[0], &Args[0]+Args.size());
1805    CI = InvokeInstr;
1806
1807    InvokeInstr->setAttributes(Attrs);
1808    if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1809      InvokeInstr->setCallingConv(F->getCallingConv());
1810
1811    EmitBlock(Cont);
1812  }
1813
1814  if (CI->getType() != llvm::Type::VoidTy)
1815    CI->setName("call");
1816
1817  switch (RetAI.getKind()) {
1818  case ABIArgInfo::Indirect:
1819    if (RetTy->isAnyComplexType())
1820      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1821    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1822      return RValue::getAggregate(Args[0]);
1823    else
1824      return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1825
1826  case ABIArgInfo::Direct:
1827    if (RetTy->isAnyComplexType()) {
1828      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1829      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1830      return RValue::getComplex(std::make_pair(Real, Imag));
1831    } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1832      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
1833      Builder.CreateStore(CI, V);
1834      return RValue::getAggregate(V);
1835    } else
1836      return RValue::get(CI);
1837
1838  case ABIArgInfo::Ignore:
1839    // If we are ignoring an argument that had a result, make sure to
1840    // construct the appropriate return value for our caller.
1841    return GetUndefRValue(RetTy);
1842
1843  case ABIArgInfo::Coerce: {
1844    // FIXME: Avoid the conversion through memory if possible.
1845    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
1846    CreateCoercedStore(CI, V, *this);
1847    if (RetTy->isAnyComplexType())
1848      return RValue::getComplex(LoadComplexFromAddr(V, false));
1849    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1850      return RValue::getAggregate(V);
1851    else
1852      return RValue::get(EmitLoadOfScalar(V, false, RetTy));
1853  }
1854
1855  case ABIArgInfo::Expand:
1856    assert(0 && "Invalid ABI kind for return argument");
1857  }
1858
1859  assert(0 && "Unhandled ABIArgInfo::Kind");
1860  return RValue::get(0);
1861}
1862
1863/* VarArg handling */
1864
1865llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1866  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1867}
1868