CGCall.cpp revision 6857d9d43b082ae825c29cca80f2f6b7c3aa4e5f
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/RecordLayout.h"
24#include "llvm/ADT/StringExtras.h"
25#include "llvm/Attributes.h"
26#include "llvm/Support/CallSite.h"
27#include "llvm/Support/MathExtras.h"
28#include "llvm/Target/TargetData.h"
29
30#include "ABIInfo.h"
31
32using namespace clang;
33using namespace CodeGen;
34
35/***/
36
37// FIXME: Use iterator and sidestep silly type array creation.
38
39const
40CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
41  return getFunctionInfo(FTNP->getResultType(),
42                         llvm::SmallVector<QualType, 16>());
43}
44
45const
46CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
47  llvm::SmallVector<QualType, 16> ArgTys;
48  // FIXME: Kill copy.
49  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
50    ArgTys.push_back(FTP->getArgType(i));
51  return getFunctionInfo(FTP->getResultType(), ArgTys);
52}
53
54const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
55  llvm::SmallVector<QualType, 16> ArgTys;
56  // Add the 'this' pointer unless this is a static method.
57  if (MD->isInstance())
58    ArgTys.push_back(MD->getThisType(Context));
59
60  const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
61  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
62    ArgTys.push_back(FTP->getArgType(i));
63  return getFunctionInfo(FTP->getResultType(), ArgTys);
64}
65
66const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
67  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
68    if (MD->isInstance())
69      return getFunctionInfo(MD);
70
71  const FunctionType *FTy = FD->getType()->getAsFunctionType();
72  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
73    return getFunctionInfo(FTP);
74  return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
75}
76
77const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
78  llvm::SmallVector<QualType, 16> ArgTys;
79  ArgTys.push_back(MD->getSelfDecl()->getType());
80  ArgTys.push_back(Context.getObjCSelType());
81  // FIXME: Kill copy?
82  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
83         e = MD->param_end(); i != e; ++i)
84    ArgTys.push_back((*i)->getType());
85  return getFunctionInfo(MD->getResultType(), ArgTys);
86}
87
88const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
89                                                    const CallArgList &Args) {
90  // FIXME: Kill copy.
91  llvm::SmallVector<QualType, 16> ArgTys;
92  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
93       i != e; ++i)
94    ArgTys.push_back(i->second);
95  return getFunctionInfo(ResTy, ArgTys);
96}
97
98const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
99                                                  const FunctionArgList &Args) {
100  // FIXME: Kill copy.
101  llvm::SmallVector<QualType, 16> ArgTys;
102  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
103       i != e; ++i)
104    ArgTys.push_back(i->second);
105  return getFunctionInfo(ResTy, ArgTys);
106}
107
108const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
109                               const llvm::SmallVector<QualType, 16> &ArgTys) {
110  // Lookup or create unique function info.
111  llvm::FoldingSetNodeID ID;
112  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
113
114  void *InsertPos = 0;
115  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
116  if (FI)
117    return *FI;
118
119  // Construct the function info.
120  FI = new CGFunctionInfo(ResTy, ArgTys);
121  FunctionInfos.InsertNode(FI, InsertPos);
122
123  // Compute ABI information.
124  getABIInfo().computeInfo(*FI, getContext());
125
126  return *FI;
127}
128
129/***/
130
131ABIInfo::~ABIInfo() {}
132
133void ABIArgInfo::dump() const {
134  fprintf(stderr, "(ABIArgInfo Kind=");
135  switch (TheKind) {
136  case Direct:
137    fprintf(stderr, "Direct");
138    break;
139  case Ignore:
140    fprintf(stderr, "Ignore");
141    break;
142  case Coerce:
143    fprintf(stderr, "Coerce Type=");
144    getCoerceToType()->print(llvm::errs());
145    break;
146  case Indirect:
147    fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
148    break;
149  case Expand:
150    fprintf(stderr, "Expand");
151    break;
152  }
153  fprintf(stderr, ")\n");
154}
155
156/***/
157
158static bool isEmptyRecord(ASTContext &Context, QualType T);
159
160/// isEmptyField - Return true iff a the field is "empty", that is it
161/// is an unnamed bit-field or an (array of) empty record(s).
162static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
163  if (FD->isUnnamedBitfield())
164    return true;
165
166  QualType FT = FD->getType();
167  // Constant arrays of empty records count as empty, strip them off.
168  while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
169    FT = AT->getElementType();
170
171  return isEmptyRecord(Context, FT);
172}
173
174/// isEmptyRecord - Return true iff a structure contains only empty
175/// fields. Note that a structure with a flexible array member is not
176/// considered empty.
177static bool isEmptyRecord(ASTContext &Context, QualType T) {
178  const RecordType *RT = T->getAsRecordType();
179  if (!RT)
180    return 0;
181  const RecordDecl *RD = RT->getDecl();
182  if (RD->hasFlexibleArrayMember())
183    return false;
184  for (RecordDecl::field_iterator i = RD->field_begin(Context),
185         e = RD->field_end(Context); i != e; ++i)
186    if (!isEmptyField(Context, *i))
187      return false;
188  return true;
189}
190
191/// isSingleElementStruct - Determine if a structure is a "single
192/// element struct", i.e. it has exactly one non-empty field or
193/// exactly one field which is itself a single element
194/// struct. Structures with flexible array members are never
195/// considered single element structs.
196///
197/// \return The field declaration for the single non-empty field, if
198/// it exists.
199static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
200  const RecordType *RT = T->getAsStructureType();
201  if (!RT)
202    return 0;
203
204  const RecordDecl *RD = RT->getDecl();
205  if (RD->hasFlexibleArrayMember())
206    return 0;
207
208  const Type *Found = 0;
209  for (RecordDecl::field_iterator i = RD->field_begin(Context),
210         e = RD->field_end(Context); i != e; ++i) {
211    const FieldDecl *FD = *i;
212    QualType FT = FD->getType();
213
214    // Ignore empty fields.
215    if (isEmptyField(Context, FD))
216      continue;
217
218    // If we already found an element then this isn't a single-element
219    // struct.
220    if (Found)
221      return 0;
222
223    // Treat single element arrays as the element.
224    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
225      if (AT->getSize().getZExtValue() != 1)
226        break;
227      FT = AT->getElementType();
228    }
229
230    if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
231      Found = FT.getTypePtr();
232    } else {
233      Found = isSingleElementStruct(FT, Context);
234      if (!Found)
235        return 0;
236    }
237  }
238
239  return Found;
240}
241
242static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
243  if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
244    return false;
245
246  uint64_t Size = Context.getTypeSize(Ty);
247  return Size == 32 || Size == 64;
248}
249
250static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
251                                           ASTContext &Context) {
252  for (RecordDecl::field_iterator i = RD->field_begin(Context),
253         e = RD->field_end(Context); i != e; ++i) {
254    const FieldDecl *FD = *i;
255
256    if (!is32Or64BitBasicType(FD->getType(), Context))
257      return false;
258
259    // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
260    // how to expand them yet, and the predicate for telling if a bitfield still
261    // counts as "basic" is more complicated than what we were doing previously.
262    if (FD->isBitField())
263      return false;
264  }
265
266  return true;
267}
268
269namespace {
270/// DefaultABIInfo - The default implementation for ABI specific
271/// details. This implementation provides information which results in
272/// self-consistent and sensible LLVM IR generation, but does not
273/// conform to any particular ABI.
274class DefaultABIInfo : public ABIInfo {
275  ABIArgInfo classifyReturnType(QualType RetTy,
276                                ASTContext &Context) const;
277
278  ABIArgInfo classifyArgumentType(QualType RetTy,
279                                  ASTContext &Context) const;
280
281  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
282    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
283    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
284         it != ie; ++it)
285      it->info = classifyArgumentType(it->type, Context);
286  }
287
288  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
289                                 CodeGenFunction &CGF) const;
290};
291
292/// X86_32ABIInfo - The X86-32 ABI information.
293class X86_32ABIInfo : public ABIInfo {
294  ASTContext &Context;
295  bool IsDarwin;
296
297  static bool isRegisterSize(unsigned Size) {
298    return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
299  }
300
301  static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
302
303public:
304  ABIArgInfo classifyReturnType(QualType RetTy,
305                                ASTContext &Context) const;
306
307  ABIArgInfo classifyArgumentType(QualType RetTy,
308                                  ASTContext &Context) const;
309
310  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
311    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
312    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
313         it != ie; ++it)
314      it->info = classifyArgumentType(it->type, Context);
315  }
316
317  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
318                                 CodeGenFunction &CGF) const;
319
320  X86_32ABIInfo(ASTContext &Context, bool d)
321    : ABIInfo(), Context(Context), IsDarwin(d) {}
322};
323}
324
325
326/// shouldReturnTypeInRegister - Determine if the given type should be
327/// passed in a register (for the Darwin ABI).
328bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
329                                               ASTContext &Context) {
330  uint64_t Size = Context.getTypeSize(Ty);
331
332  // Type must be register sized.
333  if (!isRegisterSize(Size))
334    return false;
335
336  if (Ty->isVectorType()) {
337    // 64- and 128- bit vectors inside structures are not returned in
338    // registers.
339    if (Size == 64 || Size == 128)
340      return false;
341
342    return true;
343  }
344
345  // If this is a builtin, pointer, or complex type, it is ok.
346  if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
347    return true;
348
349  // Arrays are treated like records.
350  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
351    return shouldReturnTypeInRegister(AT->getElementType(), Context);
352
353  // Otherwise, it must be a record type.
354  const RecordType *RT = Ty->getAsRecordType();
355  if (!RT) return false;
356
357  // Structure types are passed in register if all fields would be
358  // passed in a register.
359  for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
360         e = RT->getDecl()->field_end(Context); i != e; ++i) {
361    const FieldDecl *FD = *i;
362
363    // Empty fields are ignored.
364    if (isEmptyField(Context, FD))
365      continue;
366
367    // Check fields recursively.
368    if (!shouldReturnTypeInRegister(FD->getType(), Context))
369      return false;
370  }
371
372  return true;
373}
374
375ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
376                                            ASTContext &Context) const {
377  if (RetTy->isVoidType()) {
378    return ABIArgInfo::getIgnore();
379  } else if (const VectorType *VT = RetTy->getAsVectorType()) {
380    // On Darwin, some vectors are returned in registers.
381    if (IsDarwin) {
382      uint64_t Size = Context.getTypeSize(RetTy);
383
384      // 128-bit vectors are a special case; they are returned in
385      // registers and we need to make sure to pick a type the LLVM
386      // backend will like.
387      if (Size == 128)
388        return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
389                                                           2));
390
391      // Always return in register if it fits in a general purpose
392      // register, or if it is 64 bits and has a single element.
393      if ((Size == 8 || Size == 16 || Size == 32) ||
394          (Size == 64 && VT->getNumElements() == 1))
395        return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
396
397      return ABIArgInfo::getIndirect(0);
398    }
399
400    return ABIArgInfo::getDirect();
401  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
402    // Structures with flexible arrays are always indirect.
403    if (const RecordType *RT = RetTy->getAsStructureType())
404      if (RT->getDecl()->hasFlexibleArrayMember())
405        return ABIArgInfo::getIndirect(0);
406
407    // Outside of Darwin, structs and unions are always indirect.
408    if (!IsDarwin && !RetTy->isAnyComplexType())
409      return ABIArgInfo::getIndirect(0);
410
411    // Classify "single element" structs as their element type.
412    if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
413      if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
414        if (BT->isIntegerType()) {
415          // We need to use the size of the structure, padding
416          // bit-fields can adjust that to be larger than the single
417          // element type.
418          uint64_t Size = Context.getTypeSize(RetTy);
419          return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
420        } else if (BT->getKind() == BuiltinType::Float) {
421          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
422                 "Unexpect single element structure size!");
423          return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
424        } else if (BT->getKind() == BuiltinType::Double) {
425          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
426                 "Unexpect single element structure size!");
427          return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
428        }
429      } else if (SeltTy->isPointerType()) {
430        // FIXME: It would be really nice if this could come out as the proper
431        // pointer type.
432        llvm::Type *PtrTy =
433          llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
434        return ABIArgInfo::getCoerce(PtrTy);
435      } else if (SeltTy->isVectorType()) {
436        // 64- and 128-bit vectors are never returned in a
437        // register when inside a structure.
438        uint64_t Size = Context.getTypeSize(RetTy);
439        if (Size == 64 || Size == 128)
440          return ABIArgInfo::getIndirect(0);
441
442        return classifyReturnType(QualType(SeltTy, 0), Context);
443      }
444    }
445
446    // Small structures which are register sized are generally returned
447    // in a register.
448    if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
449      uint64_t Size = Context.getTypeSize(RetTy);
450      return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
451    }
452
453    return ABIArgInfo::getIndirect(0);
454  } else {
455    return ABIArgInfo::getDirect();
456  }
457}
458
459ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
460                                               ASTContext &Context) const {
461  // FIXME: Set alignment on indirect arguments.
462  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
463    // Structures with flexible arrays are always indirect.
464    if (const RecordType *RT = Ty->getAsStructureType())
465      if (RT->getDecl()->hasFlexibleArrayMember())
466        return ABIArgInfo::getIndirect(0);
467
468    // Ignore empty structs.
469    uint64_t Size = Context.getTypeSize(Ty);
470    if (Ty->isStructureType() && Size == 0)
471      return ABIArgInfo::getIgnore();
472
473    // Expand structs with size <= 128-bits which consist only of
474    // basic types (int, long long, float, double, xxx*). This is
475    // non-recursive and does not ignore empty fields.
476    if (const RecordType *RT = Ty->getAsStructureType()) {
477      if (Context.getTypeSize(Ty) <= 4*32 &&
478          areAllFields32Or64BitBasicType(RT->getDecl(), Context))
479        return ABIArgInfo::getExpand();
480    }
481
482    return ABIArgInfo::getIndirect(0);
483  } else {
484    return ABIArgInfo::getDirect();
485  }
486}
487
488llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
489                                      CodeGenFunction &CGF) const {
490  const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
491  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
492
493  CGBuilderTy &Builder = CGF.Builder;
494  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
495                                                       "ap");
496  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
497  llvm::Type *PTy =
498    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
499  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
500
501  uint64_t Offset =
502    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
503  llvm::Value *NextAddr =
504    Builder.CreateGEP(Addr,
505                      llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
506                      "ap.next");
507  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
508
509  return AddrTyped;
510}
511
512namespace {
513/// X86_64ABIInfo - The X86_64 ABI information.
514class X86_64ABIInfo : public ABIInfo {
515  enum Class {
516    Integer = 0,
517    SSE,
518    SSEUp,
519    X87,
520    X87Up,
521    ComplexX87,
522    NoClass,
523    Memory
524  };
525
526  /// merge - Implement the X86_64 ABI merging algorithm.
527  ///
528  /// Merge an accumulating classification \arg Accum with a field
529  /// classification \arg Field.
530  ///
531  /// \param Accum - The accumulating classification. This should
532  /// always be either NoClass or the result of a previous merge
533  /// call. In addition, this should never be Memory (the caller
534  /// should just return Memory for the aggregate).
535  Class merge(Class Accum, Class Field) const;
536
537  /// classify - Determine the x86_64 register classes in which the
538  /// given type T should be passed.
539  ///
540  /// \param Lo - The classification for the parts of the type
541  /// residing in the low word of the containing object.
542  ///
543  /// \param Hi - The classification for the parts of the type
544  /// residing in the high word of the containing object.
545  ///
546  /// \param OffsetBase - The bit offset of this type in the
547  /// containing object.  Some parameters are classified different
548  /// depending on whether they straddle an eightbyte boundary.
549  ///
550  /// If a word is unused its result will be NoClass; if a type should
551  /// be passed in Memory then at least the classification of \arg Lo
552  /// will be Memory.
553  ///
554  /// The \arg Lo class will be NoClass iff the argument is ignored.
555  ///
556  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
557  /// also be ComplexX87.
558  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
559                Class &Lo, Class &Hi) const;
560
561  /// getCoerceResult - Given a source type \arg Ty and an LLVM type
562  /// to coerce to, chose the best way to pass Ty in the same place
563  /// that \arg CoerceTo would be passed, but while keeping the
564  /// emitted code as simple as possible.
565  ///
566  /// FIXME: Note, this should be cleaned up to just take an enumeration of all
567  /// the ways we might want to pass things, instead of constructing an LLVM
568  /// type. This makes this code more explicit, and it makes it clearer that we
569  /// are also doing this for correctness in the case of passing scalar types.
570  ABIArgInfo getCoerceResult(QualType Ty,
571                             const llvm::Type *CoerceTo,
572                             ASTContext &Context) const;
573
574  ABIArgInfo classifyReturnType(QualType RetTy,
575                                ASTContext &Context) const;
576
577  ABIArgInfo classifyArgumentType(QualType Ty,
578                                  ASTContext &Context,
579                                  unsigned &neededInt,
580                                  unsigned &neededSSE) const;
581
582public:
583  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
584
585  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
586                                 CodeGenFunction &CGF) const;
587};
588}
589
590X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
591                                          Class Field) const {
592  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
593  // classified recursively so that always two fields are
594  // considered. The resulting class is calculated according to
595  // the classes of the fields in the eightbyte:
596  //
597  // (a) If both classes are equal, this is the resulting class.
598  //
599  // (b) If one of the classes is NO_CLASS, the resulting class is
600  // the other class.
601  //
602  // (c) If one of the classes is MEMORY, the result is the MEMORY
603  // class.
604  //
605  // (d) If one of the classes is INTEGER, the result is the
606  // INTEGER.
607  //
608  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
609  // MEMORY is used as class.
610  //
611  // (f) Otherwise class SSE is used.
612
613  // Accum should never be memory (we should have returned) or
614  // ComplexX87 (because this cannot be passed in a structure).
615  assert((Accum != Memory && Accum != ComplexX87) &&
616         "Invalid accumulated classification during merge.");
617  if (Accum == Field || Field == NoClass)
618    return Accum;
619  else if (Field == Memory)
620    return Memory;
621  else if (Accum == NoClass)
622    return Field;
623  else if (Accum == Integer || Field == Integer)
624    return Integer;
625  else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
626           Accum == X87 || Accum == X87Up)
627    return Memory;
628  else
629    return SSE;
630}
631
632void X86_64ABIInfo::classify(QualType Ty,
633                             ASTContext &Context,
634                             uint64_t OffsetBase,
635                             Class &Lo, Class &Hi) const {
636  // FIXME: This code can be simplified by introducing a simple value class for
637  // Class pairs with appropriate constructor methods for the various
638  // situations.
639
640  // FIXME: Some of the split computations are wrong; unaligned vectors
641  // shouldn't be passed in registers for example, so there is no chance they
642  // can straddle an eightbyte. Verify & simplify.
643
644  Lo = Hi = NoClass;
645
646  Class &Current = OffsetBase < 64 ? Lo : Hi;
647  Current = Memory;
648
649  if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
650    BuiltinType::Kind k = BT->getKind();
651
652    if (k == BuiltinType::Void) {
653      Current = NoClass;
654    } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
655      Lo = Integer;
656      Hi = Integer;
657    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
658      Current = Integer;
659    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
660      Current = SSE;
661    } else if (k == BuiltinType::LongDouble) {
662      Lo = X87;
663      Hi = X87Up;
664    }
665    // FIXME: _Decimal32 and _Decimal64 are SSE.
666    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
667  } else if (const EnumType *ET = Ty->getAsEnumType()) {
668    // Classify the underlying integer type.
669    classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
670  } else if (Ty->hasPointerRepresentation()) {
671    Current = Integer;
672  } else if (const VectorType *VT = Ty->getAsVectorType()) {
673    uint64_t Size = Context.getTypeSize(VT);
674    if (Size == 32) {
675      // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
676      // float> as integer.
677      Current = Integer;
678
679      // If this type crosses an eightbyte boundary, it should be
680      // split.
681      uint64_t EB_Real = (OffsetBase) / 64;
682      uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
683      if (EB_Real != EB_Imag)
684        Hi = Lo;
685    } else if (Size == 64) {
686      // gcc passes <1 x double> in memory. :(
687      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
688        return;
689
690      // gcc passes <1 x long long> as INTEGER.
691      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
692        Current = Integer;
693      else
694        Current = SSE;
695
696      // If this type crosses an eightbyte boundary, it should be
697      // split.
698      if (OffsetBase && OffsetBase != 64)
699        Hi = Lo;
700    } else if (Size == 128) {
701      Lo = SSE;
702      Hi = SSEUp;
703    }
704  } else if (const ComplexType *CT = Ty->getAsComplexType()) {
705    QualType ET = Context.getCanonicalType(CT->getElementType());
706
707    uint64_t Size = Context.getTypeSize(Ty);
708    if (ET->isIntegralType()) {
709      if (Size <= 64)
710        Current = Integer;
711      else if (Size <= 128)
712        Lo = Hi = Integer;
713    } else if (ET == Context.FloatTy)
714      Current = SSE;
715    else if (ET == Context.DoubleTy)
716      Lo = Hi = SSE;
717    else if (ET == Context.LongDoubleTy)
718      Current = ComplexX87;
719
720    // If this complex type crosses an eightbyte boundary then it
721    // should be split.
722    uint64_t EB_Real = (OffsetBase) / 64;
723    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
724    if (Hi == NoClass && EB_Real != EB_Imag)
725      Hi = Lo;
726  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
727    // Arrays are treated like structures.
728
729    uint64_t Size = Context.getTypeSize(Ty);
730
731    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
732    // than two eightbytes, ..., it has class MEMORY.
733    if (Size > 128)
734      return;
735
736    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
737    // fields, it has class MEMORY.
738    //
739    // Only need to check alignment of array base.
740    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
741      return;
742
743    // Otherwise implement simplified merge. We could be smarter about
744    // this, but it isn't worth it and would be harder to verify.
745    Current = NoClass;
746    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
747    uint64_t ArraySize = AT->getSize().getZExtValue();
748    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
749      Class FieldLo, FieldHi;
750      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
751      Lo = merge(Lo, FieldLo);
752      Hi = merge(Hi, FieldHi);
753      if (Lo == Memory || Hi == Memory)
754        break;
755    }
756
757    // Do post merger cleanup (see below). Only case we worry about is Memory.
758    if (Hi == Memory)
759      Lo = Memory;
760    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
761  } else if (const RecordType *RT = Ty->getAsRecordType()) {
762    uint64_t Size = Context.getTypeSize(Ty);
763
764    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
765    // than two eightbytes, ..., it has class MEMORY.
766    if (Size > 128)
767      return;
768
769    const RecordDecl *RD = RT->getDecl();
770
771    // Assume variable sized types are passed in memory.
772    if (RD->hasFlexibleArrayMember())
773      return;
774
775    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
776
777    // Reset Lo class, this will be recomputed.
778    Current = NoClass;
779    unsigned idx = 0;
780    for (RecordDecl::field_iterator i = RD->field_begin(Context),
781           e = RD->field_end(Context); i != e; ++i, ++idx) {
782      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
783      bool BitField = i->isBitField();
784
785      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
786      // fields, it has class MEMORY.
787      //
788      // Note, skip this test for bit-fields, see below.
789      if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
790        Lo = Memory;
791        return;
792      }
793
794      // Classify this field.
795      //
796      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
797      // exceeds a single eightbyte, each is classified
798      // separately. Each eightbyte gets initialized to class
799      // NO_CLASS.
800      Class FieldLo, FieldHi;
801
802      // Bit-fields require special handling, they do not force the
803      // structure to be passed in memory even if unaligned, and
804      // therefore they can straddle an eightbyte.
805      if (BitField) {
806        // Ignore padding bit-fields.
807        if (i->isUnnamedBitfield())
808          continue;
809
810        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
811        uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
812
813        uint64_t EB_Lo = Offset / 64;
814        uint64_t EB_Hi = (Offset + Size - 1) / 64;
815        FieldLo = FieldHi = NoClass;
816        if (EB_Lo) {
817          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
818          FieldLo = NoClass;
819          FieldHi = Integer;
820        } else {
821          FieldLo = Integer;
822          FieldHi = EB_Hi ? Integer : NoClass;
823        }
824      } else
825        classify(i->getType(), Context, Offset, FieldLo, FieldHi);
826      Lo = merge(Lo, FieldLo);
827      Hi = merge(Hi, FieldHi);
828      if (Lo == Memory || Hi == Memory)
829        break;
830    }
831
832    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
833    //
834    // (a) If one of the classes is MEMORY, the whole argument is
835    // passed in memory.
836    //
837    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
838
839    // The first of these conditions is guaranteed by how we implement
840    // the merge (just bail).
841    //
842    // The second condition occurs in the case of unions; for example
843    // union { _Complex double; unsigned; }.
844    if (Hi == Memory)
845      Lo = Memory;
846    if (Hi == SSEUp && Lo != SSE)
847      Hi = SSE;
848  }
849}
850
851ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
852                                          const llvm::Type *CoerceTo,
853                                          ASTContext &Context) const {
854  if (CoerceTo == llvm::Type::Int64Ty) {
855    // Integer and pointer types will end up in a general purpose
856    // register.
857    if (Ty->isIntegralType() || Ty->isPointerType())
858      return ABIArgInfo::getDirect();
859
860  } else if (CoerceTo == llvm::Type::DoubleTy) {
861    // FIXME: It would probably be better to make CGFunctionInfo only map using
862    // canonical types than to canonize here.
863    QualType CTy = Context.getCanonicalType(Ty);
864
865    // Float and double end up in a single SSE reg.
866    if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
867      return ABIArgInfo::getDirect();
868
869  }
870
871  return ABIArgInfo::getCoerce(CoerceTo);
872}
873
874ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
875                                            ASTContext &Context) const {
876  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
877  // classification algorithm.
878  X86_64ABIInfo::Class Lo, Hi;
879  classify(RetTy, Context, 0, Lo, Hi);
880
881  // Check some invariants.
882  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
883  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
884  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
885
886  const llvm::Type *ResType = 0;
887  switch (Lo) {
888  case NoClass:
889    return ABIArgInfo::getIgnore();
890
891  case SSEUp:
892  case X87Up:
893    assert(0 && "Invalid classification for lo word.");
894
895    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
896    // hidden argument.
897  case Memory:
898    return ABIArgInfo::getIndirect(0);
899
900    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
901    // available register of the sequence %rax, %rdx is used.
902  case Integer:
903    ResType = llvm::Type::Int64Ty; break;
904
905    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
906    // available SSE register of the sequence %xmm0, %xmm1 is used.
907  case SSE:
908    ResType = llvm::Type::DoubleTy; break;
909
910    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
911    // returned on the X87 stack in %st0 as 80-bit x87 number.
912  case X87:
913    ResType = llvm::Type::X86_FP80Ty; break;
914
915    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
916    // part of the value is returned in %st0 and the imaginary part in
917    // %st1.
918  case ComplexX87:
919    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
920    ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
921                                    llvm::Type::X86_FP80Ty,
922                                    NULL);
923    break;
924  }
925
926  switch (Hi) {
927    // Memory was handled previously and X87 should
928    // never occur as a hi class.
929  case Memory:
930  case X87:
931    assert(0 && "Invalid classification for hi word.");
932
933  case ComplexX87: // Previously handled.
934  case NoClass: break;
935
936  case Integer:
937    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
938    break;
939  case SSE:
940    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
941    break;
942
943    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
944    // is passed in the upper half of the last used SSE register.
945    //
946    // SSEUP should always be preceeded by SSE, just widen.
947  case SSEUp:
948    assert(Lo == SSE && "Unexpected SSEUp classification.");
949    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
950    break;
951
952    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
953    // returned together with the previous X87 value in %st0.
954  case X87Up:
955    // If X87Up is preceeded by X87, we don't need to do
956    // anything. However, in some cases with unions it may not be
957    // preceeded by X87. In such situations we follow gcc and pass the
958    // extra bits in an SSE reg.
959    if (Lo != X87)
960      ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
961    break;
962  }
963
964  return getCoerceResult(RetTy, ResType, Context);
965}
966
967ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
968                                               unsigned &neededInt,
969                                               unsigned &neededSSE) const {
970  X86_64ABIInfo::Class Lo, Hi;
971  classify(Ty, Context, 0, Lo, Hi);
972
973  // Check some invariants.
974  // FIXME: Enforce these by construction.
975  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
976  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
977  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
978
979  neededInt = 0;
980  neededSSE = 0;
981  const llvm::Type *ResType = 0;
982  switch (Lo) {
983  case NoClass:
984    return ABIArgInfo::getIgnore();
985
986    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
987    // on the stack.
988  case Memory:
989
990    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
991    // COMPLEX_X87, it is passed in memory.
992  case X87:
993  case ComplexX87:
994    return ABIArgInfo::getIndirect(0);
995
996  case SSEUp:
997  case X87Up:
998    assert(0 && "Invalid classification for lo word.");
999
1000    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
1001    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
1002    // and %r9 is used.
1003  case Integer:
1004    ++neededInt;
1005    ResType = llvm::Type::Int64Ty;
1006    break;
1007
1008    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1009    // available SSE register is used, the registers are taken in the
1010    // order from %xmm0 to %xmm7.
1011  case SSE:
1012    ++neededSSE;
1013    ResType = llvm::Type::DoubleTy;
1014    break;
1015  }
1016
1017  switch (Hi) {
1018    // Memory was handled previously, ComplexX87 and X87 should
1019    // never occur as hi classes, and X87Up must be preceed by X87,
1020    // which is passed in memory.
1021  case Memory:
1022  case X87:
1023  case ComplexX87:
1024    assert(0 && "Invalid classification for hi word.");
1025    break;
1026
1027  case NoClass: break;
1028  case Integer:
1029    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
1030    ++neededInt;
1031    break;
1032
1033    // X87Up generally doesn't occur here (long double is passed in
1034    // memory), except in situations involving unions.
1035  case X87Up:
1036  case SSE:
1037    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
1038    ++neededSSE;
1039    break;
1040
1041    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1042    // eightbyte is passed in the upper half of the last used SSE
1043    // register.
1044  case SSEUp:
1045    assert(Lo == SSE && "Unexpected SSEUp classification.");
1046    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
1047    break;
1048  }
1049
1050  return getCoerceResult(Ty, ResType, Context);
1051}
1052
1053void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
1054  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
1055
1056  // Keep track of the number of assigned registers.
1057  unsigned freeIntRegs = 6, freeSSERegs = 8;
1058
1059  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1060  // get assigned (in left-to-right order) for passing as follows...
1061  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1062       it != ie; ++it) {
1063    unsigned neededInt, neededSSE;
1064    it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
1065
1066    // AMD64-ABI 3.2.3p3: If there are no registers available for any
1067    // eightbyte of an argument, the whole argument is passed on the
1068    // stack. If registers have already been assigned for some
1069    // eightbytes of such an argument, the assignments get reverted.
1070    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1071      freeIntRegs -= neededInt;
1072      freeSSERegs -= neededSSE;
1073    } else {
1074      it->info = ABIArgInfo::getIndirect(0);
1075    }
1076  }
1077}
1078
1079static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1080                                        QualType Ty,
1081                                        CodeGenFunction &CGF) {
1082  llvm::Value *overflow_arg_area_p =
1083    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1084  llvm::Value *overflow_arg_area =
1085    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1086
1087  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1088  // byte boundary if alignment needed by type exceeds 8 byte boundary.
1089  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1090  if (Align > 8) {
1091    // Note that we follow the ABI & gcc here, even though the type
1092    // could in theory have an alignment greater than 16. This case
1093    // shouldn't ever matter in practice.
1094
1095    // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1096    llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
1097    overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1098    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1099                                                    llvm::Type::Int64Ty);
1100    llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
1101    overflow_arg_area =
1102      CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1103                                 overflow_arg_area->getType(),
1104                                 "overflow_arg_area.align");
1105  }
1106
1107  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1108  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1109  llvm::Value *Res =
1110    CGF.Builder.CreateBitCast(overflow_arg_area,
1111                              llvm::PointerType::getUnqual(LTy));
1112
1113  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1114  // l->overflow_arg_area + sizeof(type).
1115  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1116  // an 8 byte boundary.
1117
1118  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1119  llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1120                                               (SizeInBytes + 7)  & ~7);
1121  overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1122                                            "overflow_arg_area.next");
1123  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1124
1125  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1126  return Res;
1127}
1128
1129llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1130                                      CodeGenFunction &CGF) const {
1131  // Assume that va_list type is correct; should be pointer to LLVM type:
1132  // struct {
1133  //   i32 gp_offset;
1134  //   i32 fp_offset;
1135  //   i8* overflow_arg_area;
1136  //   i8* reg_save_area;
1137  // };
1138  unsigned neededInt, neededSSE;
1139  ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
1140                                       neededInt, neededSSE);
1141
1142  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1143  // in the registers. If not go to step 7.
1144  if (!neededInt && !neededSSE)
1145    return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1146
1147  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1148  // general purpose registers needed to pass type and num_fp to hold
1149  // the number of floating point registers needed.
1150
1151  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1152  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1153  // l->fp_offset > 304 - num_fp * 16 go to step 7.
1154  //
1155  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1156  // register save space).
1157
1158  llvm::Value *InRegs = 0;
1159  llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1160  llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1161  if (neededInt) {
1162    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1163    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1164    InRegs =
1165      CGF.Builder.CreateICmpULE(gp_offset,
1166                                llvm::ConstantInt::get(llvm::Type::Int32Ty,
1167                                                       48 - neededInt * 8),
1168                                "fits_in_gp");
1169  }
1170
1171  if (neededSSE) {
1172    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1173    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1174    llvm::Value *FitsInFP =
1175      CGF.Builder.CreateICmpULE(fp_offset,
1176                                llvm::ConstantInt::get(llvm::Type::Int32Ty,
1177                                                       176 - neededSSE * 16),
1178                                "fits_in_fp");
1179    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1180  }
1181
1182  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1183  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1184  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1185  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1186
1187  // Emit code to load the value if it was passed in registers.
1188
1189  CGF.EmitBlock(InRegBlock);
1190
1191  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1192  // an offset of l->gp_offset and/or l->fp_offset. This may require
1193  // copying to a temporary location in case the parameter is passed
1194  // in different register classes or requires an alignment greater
1195  // than 8 for general purpose registers and 16 for XMM registers.
1196  //
1197  // FIXME: This really results in shameful code when we end up needing to
1198  // collect arguments from different places; often what should result in a
1199  // simple assembling of a structure from scattered addresses has many more
1200  // loads than necessary. Can we clean this up?
1201  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1202  llvm::Value *RegAddr =
1203    CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1204                           "reg_save_area");
1205  if (neededInt && neededSSE) {
1206    // FIXME: Cleanup.
1207    assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1208    const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1209    llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1210    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1211    const llvm::Type *TyLo = ST->getElementType(0);
1212    const llvm::Type *TyHi = ST->getElementType(1);
1213    assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1214           "Unexpected ABI info for mixed regs");
1215    const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1216    const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1217    llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1218    llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1219    llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1220    llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1221    llvm::Value *V =
1222      CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1223    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1224    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1225    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1226
1227    RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
1228  } else if (neededInt) {
1229    RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1230    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1231                                        llvm::PointerType::getUnqual(LTy));
1232  } else {
1233    if (neededSSE == 1) {
1234      RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1235      RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1236                                          llvm::PointerType::getUnqual(LTy));
1237    } else {
1238      assert(neededSSE == 2 && "Invalid number of needed registers!");
1239      // SSE registers are spaced 16 bytes apart in the register save
1240      // area, we need to collect the two eightbytes together.
1241      llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1242      llvm::Value *RegAddrHi =
1243        CGF.Builder.CreateGEP(RegAddrLo,
1244                              llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
1245      const llvm::Type *DblPtrTy =
1246        llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
1247      const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
1248                                                         llvm::Type::DoubleTy,
1249                                                         NULL);
1250      llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1251      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1252                                                           DblPtrTy));
1253      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1254      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1255                                                           DblPtrTy));
1256      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1257      RegAddr = CGF.Builder.CreateBitCast(Tmp,
1258                                          llvm::PointerType::getUnqual(LTy));
1259    }
1260  }
1261
1262  // AMD64-ABI 3.5.7p5: Step 5. Set:
1263  // l->gp_offset = l->gp_offset + num_gp * 8
1264  // l->fp_offset = l->fp_offset + num_fp * 16.
1265  if (neededInt) {
1266    llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1267                                                 neededInt * 8);
1268    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1269                            gp_offset_p);
1270  }
1271  if (neededSSE) {
1272    llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1273                                                 neededSSE * 16);
1274    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1275                            fp_offset_p);
1276  }
1277  CGF.EmitBranch(ContBlock);
1278
1279  // Emit code to load the value if it was passed in memory.
1280
1281  CGF.EmitBlock(InMemBlock);
1282  llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1283
1284  // Return the appropriate result.
1285
1286  CGF.EmitBlock(ContBlock);
1287  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1288                                                 "vaarg.addr");
1289  ResAddr->reserveOperandSpace(2);
1290  ResAddr->addIncoming(RegAddr, InRegBlock);
1291  ResAddr->addIncoming(MemAddr, InMemBlock);
1292
1293  return ResAddr;
1294}
1295
1296// ABI Info for PIC16
1297class PIC16ABIInfo : public ABIInfo {
1298  ABIArgInfo classifyReturnType(QualType RetTy,
1299                                ASTContext &Context) const;
1300
1301  ABIArgInfo classifyArgumentType(QualType RetTy,
1302                                  ASTContext &Context) const;
1303
1304  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
1305    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
1306    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1307         it != ie; ++it)
1308      it->info = classifyArgumentType(it->type, Context);
1309  }
1310
1311  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1312                                 CodeGenFunction &CGF) const;
1313
1314};
1315
1316ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
1317                                              ASTContext &Context) const {
1318  if (RetTy->isVoidType()) {
1319    return ABIArgInfo::getIgnore();
1320  } else {
1321    return ABIArgInfo::getDirect();
1322  }
1323}
1324
1325ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
1326                                                ASTContext &Context) const {
1327  return ABIArgInfo::getDirect();
1328}
1329
1330llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1331                                       CodeGenFunction &CGF) const {
1332  return 0;
1333}
1334
1335class ARMABIInfo : public ABIInfo {
1336  ABIArgInfo classifyReturnType(QualType RetTy,
1337                                ASTContext &Context) const;
1338
1339  ABIArgInfo classifyArgumentType(QualType RetTy,
1340                                  ASTContext &Context) const;
1341
1342  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
1343
1344  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1345                                 CodeGenFunction &CGF) const;
1346};
1347
1348void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
1349  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
1350  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1351       it != ie; ++it) {
1352    it->info = classifyArgumentType(it->type, Context);
1353  }
1354}
1355
1356ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
1357                                            ASTContext &Context) const {
1358  if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1359    return ABIArgInfo::getDirect();
1360  }
1361  // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1362  // backend doesn't support byval.
1363  // FIXME: This doesn't handle alignment > 64 bits.
1364  const llvm::Type* ElemTy;
1365  unsigned SizeRegs;
1366  if (Context.getTypeAlign(Ty) > 32) {
1367    ElemTy = llvm::Type::Int64Ty;
1368    SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1369  } else {
1370    ElemTy = llvm::Type::Int32Ty;
1371    SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1372  }
1373  std::vector<const llvm::Type*> LLVMFields;
1374  LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
1375  const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
1376  return ABIArgInfo::getCoerce(STy);
1377}
1378
1379ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
1380                                          ASTContext &Context) const {
1381  if (RetTy->isVoidType()) {
1382    return ABIArgInfo::getIgnore();
1383  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1384    // Aggregates <= 4 bytes are returned in r0; other aggregates
1385    // are returned indirectly.
1386    uint64_t Size = Context.getTypeSize(RetTy);
1387    if (Size <= 32)
1388      return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
1389    return ABIArgInfo::getIndirect(0);
1390  } else {
1391    return ABIArgInfo::getDirect();
1392  }
1393}
1394
1395llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1396                                      CodeGenFunction &CGF) const {
1397  // FIXME: Need to handle alignment
1398  const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
1399  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
1400
1401  CGBuilderTy &Builder = CGF.Builder;
1402  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1403                                                       "ap");
1404  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1405  llvm::Type *PTy =
1406    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1407  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1408
1409  uint64_t Offset =
1410    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1411  llvm::Value *NextAddr =
1412    Builder.CreateGEP(Addr,
1413                      llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
1414                      "ap.next");
1415  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1416
1417  return AddrTyped;
1418}
1419
1420ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
1421                                              ASTContext &Context) const {
1422  if (RetTy->isVoidType()) {
1423    return ABIArgInfo::getIgnore();
1424  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1425    return ABIArgInfo::getIndirect(0);
1426  } else {
1427    return ABIArgInfo::getDirect();
1428  }
1429}
1430
1431ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
1432                                                ASTContext &Context) const {
1433  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1434    return ABIArgInfo::getIndirect(0);
1435  } else {
1436    return ABIArgInfo::getDirect();
1437  }
1438}
1439
1440llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1441                                       CodeGenFunction &CGF) const {
1442  return 0;
1443}
1444
1445const ABIInfo &CodeGenTypes::getABIInfo() const {
1446  if (TheABIInfo)
1447    return *TheABIInfo;
1448
1449  // For now we just cache this in the CodeGenTypes and don't bother
1450  // to free it.
1451  const char *TargetPrefix = getContext().Target.getTargetPrefix();
1452  if (strcmp(TargetPrefix, "x86") == 0) {
1453    bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin");
1454    switch (getContext().Target.getPointerWidth(0)) {
1455    case 32:
1456      return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin));
1457    case 64:
1458      return *(TheABIInfo = new X86_64ABIInfo());
1459    }
1460  } else if (strcmp(TargetPrefix, "arm") == 0) {
1461    // FIXME: Support for OABI?
1462    return *(TheABIInfo = new ARMABIInfo());
1463  } else if (strcmp(TargetPrefix, "pic16") == 0) {
1464    return *(TheABIInfo = new PIC16ABIInfo());
1465  }
1466
1467  return *(TheABIInfo = new DefaultABIInfo);
1468}
1469
1470/***/
1471
1472CGFunctionInfo::CGFunctionInfo(QualType ResTy,
1473                               const llvm::SmallVector<QualType, 16> &ArgTys) {
1474  NumArgs = ArgTys.size();
1475  Args = new ArgInfo[1 + NumArgs];
1476  Args[0].type = ResTy;
1477  for (unsigned i = 0; i < NumArgs; ++i)
1478    Args[1 + i].type = ArgTys[i];
1479}
1480
1481/***/
1482
1483void CodeGenTypes::GetExpandedTypes(QualType Ty,
1484                                    std::vector<const llvm::Type*> &ArgTys) {
1485  const RecordType *RT = Ty->getAsStructureType();
1486  assert(RT && "Can only expand structure types.");
1487  const RecordDecl *RD = RT->getDecl();
1488  assert(!RD->hasFlexibleArrayMember() &&
1489         "Cannot expand structure with flexible array.");
1490
1491  for (RecordDecl::field_iterator i = RD->field_begin(Context),
1492         e = RD->field_end(Context); i != e; ++i) {
1493    const FieldDecl *FD = *i;
1494    assert(!FD->isBitField() &&
1495           "Cannot expand structure with bit-field members.");
1496
1497    QualType FT = FD->getType();
1498    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1499      GetExpandedTypes(FT, ArgTys);
1500    } else {
1501      ArgTys.push_back(ConvertType(FT));
1502    }
1503  }
1504}
1505
1506llvm::Function::arg_iterator
1507CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1508                                    llvm::Function::arg_iterator AI) {
1509  const RecordType *RT = Ty->getAsStructureType();
1510  assert(RT && "Can only expand structure types.");
1511
1512  RecordDecl *RD = RT->getDecl();
1513  assert(LV.isSimple() &&
1514         "Unexpected non-simple lvalue during struct expansion.");
1515  llvm::Value *Addr = LV.getAddress();
1516  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
1517         e = RD->field_end(getContext()); i != e; ++i) {
1518    FieldDecl *FD = *i;
1519    QualType FT = FD->getType();
1520
1521    // FIXME: What are the right qualifiers here?
1522    LValue LV = EmitLValueForField(Addr, FD, false, 0);
1523    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1524      AI = ExpandTypeFromArgs(FT, LV, AI);
1525    } else {
1526      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
1527      ++AI;
1528    }
1529  }
1530
1531  return AI;
1532}
1533
1534void
1535CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1536                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
1537  const RecordType *RT = Ty->getAsStructureType();
1538  assert(RT && "Can only expand structure types.");
1539
1540  RecordDecl *RD = RT->getDecl();
1541  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1542  llvm::Value *Addr = RV.getAggregateAddr();
1543  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
1544         e = RD->field_end(getContext()); i != e; ++i) {
1545    FieldDecl *FD = *i;
1546    QualType FT = FD->getType();
1547
1548    // FIXME: What are the right qualifiers here?
1549    LValue LV = EmitLValueForField(Addr, FD, false, 0);
1550    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1551      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
1552    } else {
1553      RValue RV = EmitLoadOfLValue(LV, FT);
1554      assert(RV.isScalar() &&
1555             "Unexpected non-scalar rvalue during struct expansion.");
1556      Args.push_back(RV.getScalarVal());
1557    }
1558  }
1559}
1560
1561/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1562/// a pointer to an object of type \arg Ty.
1563///
1564/// This safely handles the case when the src type is smaller than the
1565/// destination type; in this situation the values of bits which not
1566/// present in the src are undefined.
1567static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
1568                                      const llvm::Type *Ty,
1569                                      CodeGenFunction &CGF) {
1570  const llvm::Type *SrcTy =
1571    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
1572  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
1573  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
1574
1575  // If load is legal, just bitcast the src pointer.
1576  if (SrcSize >= DstSize) {
1577    // Generally SrcSize is never greater than DstSize, since this means we are
1578    // losing bits. However, this can happen in cases where the structure has
1579    // additional padding, for example due to a user specified alignment.
1580    //
1581    // FIXME: Assert that we aren't truncating non-padding bits when have access
1582    // to that information.
1583    llvm::Value *Casted =
1584      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
1585    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
1586    // FIXME: Use better alignment / avoid requiring aligned load.
1587    Load->setAlignment(1);
1588    return Load;
1589  } else {
1590    // Otherwise do coercion through memory. This is stupid, but
1591    // simple.
1592    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
1593    llvm::Value *Casted =
1594      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
1595    llvm::StoreInst *Store =
1596      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
1597    // FIXME: Use better alignment / avoid requiring aligned store.
1598    Store->setAlignment(1);
1599    return CGF.Builder.CreateLoad(Tmp);
1600  }
1601}
1602
1603/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1604/// where the source and destination may have different types.
1605///
1606/// This safely handles the case when the src type is larger than the
1607/// destination type; the upper bits of the src will be lost.
1608static void CreateCoercedStore(llvm::Value *Src,
1609                               llvm::Value *DstPtr,
1610                               CodeGenFunction &CGF) {
1611  const llvm::Type *SrcTy = Src->getType();
1612  const llvm::Type *DstTy =
1613    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1614
1615  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
1616  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
1617
1618  // If store is legal, just bitcast the src pointer.
1619  if (SrcSize >= DstSize) {
1620    // Generally SrcSize is never greater than DstSize, since this means we are
1621    // losing bits. However, this can happen in cases where the structure has
1622    // additional padding, for example due to a user specified alignment.
1623    //
1624    // FIXME: Assert that we aren't truncating non-padding bits when have access
1625    // to that information.
1626    llvm::Value *Casted =
1627      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1628    // FIXME: Use better alignment / avoid requiring aligned store.
1629    CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
1630  } else {
1631    // Otherwise do coercion through memory. This is stupid, but
1632    // simple.
1633    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1634    CGF.Builder.CreateStore(Src, Tmp);
1635    llvm::Value *Casted =
1636      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
1637    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
1638    // FIXME: Use better alignment / avoid requiring aligned load.
1639    Load->setAlignment(1);
1640    CGF.Builder.CreateStore(Load, DstPtr);
1641  }
1642}
1643
1644/***/
1645
1646bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
1647  return FI.getReturnInfo().isIndirect();
1648}
1649
1650const llvm::FunctionType *
1651CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
1652  std::vector<const llvm::Type*> ArgTys;
1653
1654  const llvm::Type *ResultType = 0;
1655
1656  QualType RetTy = FI.getReturnType();
1657  const ABIArgInfo &RetAI = FI.getReturnInfo();
1658  switch (RetAI.getKind()) {
1659  case ABIArgInfo::Expand:
1660    assert(0 && "Invalid ABI kind for return argument");
1661
1662  case ABIArgInfo::Direct:
1663    ResultType = ConvertType(RetTy);
1664    break;
1665
1666  case ABIArgInfo::Indirect: {
1667    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
1668    ResultType = llvm::Type::VoidTy;
1669    const llvm::Type *STy = ConvertType(RetTy);
1670    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
1671    break;
1672  }
1673
1674  case ABIArgInfo::Ignore:
1675    ResultType = llvm::Type::VoidTy;
1676    break;
1677
1678  case ABIArgInfo::Coerce:
1679    ResultType = RetAI.getCoerceToType();
1680    break;
1681  }
1682
1683  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1684         ie = FI.arg_end(); it != ie; ++it) {
1685    const ABIArgInfo &AI = it->info;
1686
1687    switch (AI.getKind()) {
1688    case ABIArgInfo::Ignore:
1689      break;
1690
1691    case ABIArgInfo::Coerce:
1692      ArgTys.push_back(AI.getCoerceToType());
1693      break;
1694
1695    case ABIArgInfo::Indirect: {
1696      // indirect arguments are always on the stack, which is addr space #0.
1697      const llvm::Type *LTy = ConvertTypeForMem(it->type);
1698      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
1699      break;
1700    }
1701
1702    case ABIArgInfo::Direct:
1703      ArgTys.push_back(ConvertType(it->type));
1704      break;
1705
1706    case ABIArgInfo::Expand:
1707      GetExpandedTypes(it->type, ArgTys);
1708      break;
1709    }
1710  }
1711
1712  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
1713}
1714
1715void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1716                                           const Decl *TargetDecl,
1717                                           AttributeListType &PAL) {
1718  unsigned FuncAttrs = 0;
1719  unsigned RetAttrs = 0;
1720
1721  // FIXME: handle sseregparm someday...
1722  if (TargetDecl) {
1723    if (TargetDecl->hasAttr<NoThrowAttr>())
1724      FuncAttrs |= llvm::Attribute::NoUnwind;
1725    if (TargetDecl->hasAttr<NoReturnAttr>())
1726      FuncAttrs |= llvm::Attribute::NoReturn;
1727    if (TargetDecl->hasAttr<ConstAttr>())
1728      FuncAttrs |= llvm::Attribute::ReadNone;
1729    else if (TargetDecl->hasAttr<PureAttr>())
1730      FuncAttrs |= llvm::Attribute::ReadOnly;
1731  }
1732
1733  QualType RetTy = FI.getReturnType();
1734  unsigned Index = 1;
1735  const ABIArgInfo &RetAI = FI.getReturnInfo();
1736  switch (RetAI.getKind()) {
1737  case ABIArgInfo::Direct:
1738    if (RetTy->isPromotableIntegerType()) {
1739      if (RetTy->isSignedIntegerType()) {
1740        RetAttrs |= llvm::Attribute::SExt;
1741      } else if (RetTy->isUnsignedIntegerType()) {
1742        RetAttrs |= llvm::Attribute::ZExt;
1743      }
1744    }
1745    break;
1746
1747  case ABIArgInfo::Indirect:
1748    PAL.push_back(llvm::AttributeWithIndex::get(Index,
1749                                                llvm::Attribute::StructRet |
1750                                                llvm::Attribute::NoAlias));
1751    ++Index;
1752    // sret disables readnone and readonly
1753    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1754                   llvm::Attribute::ReadNone);
1755    break;
1756
1757  case ABIArgInfo::Ignore:
1758  case ABIArgInfo::Coerce:
1759    break;
1760
1761  case ABIArgInfo::Expand:
1762    assert(0 && "Invalid ABI kind for return argument");
1763  }
1764
1765  if (RetAttrs)
1766    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
1767
1768  // FIXME: we need to honour command line settings also...
1769  // FIXME: RegParm should be reduced in case of nested functions and/or global
1770  // register variable.
1771  signed RegParm = 0;
1772  if (TargetDecl)
1773    if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>())
1774      RegParm = RegParmAttr->getNumParams();
1775
1776  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
1777  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1778         ie = FI.arg_end(); it != ie; ++it) {
1779    QualType ParamType = it->type;
1780    const ABIArgInfo &AI = it->info;
1781    unsigned Attributes = 0;
1782
1783    switch (AI.getKind()) {
1784    case ABIArgInfo::Coerce:
1785      break;
1786
1787    case ABIArgInfo::Indirect:
1788      Attributes |= llvm::Attribute::ByVal;
1789      Attributes |=
1790        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
1791      // byval disables readnone and readonly.
1792      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1793                     llvm::Attribute::ReadNone);
1794      break;
1795
1796    case ABIArgInfo::Direct:
1797      if (ParamType->isPromotableIntegerType()) {
1798        if (ParamType->isSignedIntegerType()) {
1799          Attributes |= llvm::Attribute::SExt;
1800        } else if (ParamType->isUnsignedIntegerType()) {
1801          Attributes |= llvm::Attribute::ZExt;
1802        }
1803      }
1804      if (RegParm > 0 &&
1805          (ParamType->isIntegerType() || ParamType->isPointerType())) {
1806        RegParm -=
1807          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
1808        if (RegParm >= 0)
1809          Attributes |= llvm::Attribute::InReg;
1810      }
1811      // FIXME: handle sseregparm someday...
1812      break;
1813
1814    case ABIArgInfo::Ignore:
1815      // Skip increment, no matching LLVM parameter.
1816      continue;
1817
1818    case ABIArgInfo::Expand: {
1819      std::vector<const llvm::Type*> Tys;
1820      // FIXME: This is rather inefficient. Do we ever actually need to do
1821      // anything here? The result should be just reconstructed on the other
1822      // side, so extension should be a non-issue.
1823      getTypes().GetExpandedTypes(ParamType, Tys);
1824      Index += Tys.size();
1825      continue;
1826    }
1827    }
1828
1829    if (Attributes)
1830      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
1831    ++Index;
1832  }
1833  if (FuncAttrs)
1834    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1835}
1836
1837void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1838                                         llvm::Function *Fn,
1839                                         const FunctionArgList &Args) {
1840  // FIXME: We no longer need the types from FunctionArgList; lift up and
1841  // simplify.
1842
1843  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1844  llvm::Function::arg_iterator AI = Fn->arg_begin();
1845
1846  // Name the struct return argument.
1847  if (CGM.ReturnTypeUsesSret(FI)) {
1848    AI->setName("agg.result");
1849    ++AI;
1850  }
1851
1852  assert(FI.arg_size() == Args.size() &&
1853         "Mismatch between function signature & arguments.");
1854  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1855  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1856       i != e; ++i, ++info_it) {
1857    const VarDecl *Arg = i->first;
1858    QualType Ty = info_it->type;
1859    const ABIArgInfo &ArgI = info_it->info;
1860
1861    switch (ArgI.getKind()) {
1862    case ABIArgInfo::Indirect: {
1863      llvm::Value* V = AI;
1864      if (hasAggregateLLVMType(Ty)) {
1865        // Do nothing, aggregates and complex variables are accessed by
1866        // reference.
1867      } else {
1868        // Load scalar value from indirect argument.
1869        V = EmitLoadOfScalar(V, false, Ty);
1870        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1871          // This must be a promotion, for something like
1872          // "void a(x) short x; {..."
1873          V = EmitScalarConversion(V, Ty, Arg->getType());
1874        }
1875      }
1876      EmitParmDecl(*Arg, V);
1877      break;
1878    }
1879
1880    case ABIArgInfo::Direct: {
1881      assert(AI != Fn->arg_end() && "Argument mismatch!");
1882      llvm::Value* V = AI;
1883      if (hasAggregateLLVMType(Ty)) {
1884        // Create a temporary alloca to hold the argument; the rest of
1885        // codegen expects to access aggregates & complex values by
1886        // reference.
1887        V = CreateTempAlloca(ConvertTypeForMem(Ty));
1888        Builder.CreateStore(AI, V);
1889      } else {
1890        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1891          // This must be a promotion, for something like
1892          // "void a(x) short x; {..."
1893          V = EmitScalarConversion(V, Ty, Arg->getType());
1894        }
1895      }
1896      EmitParmDecl(*Arg, V);
1897      break;
1898    }
1899
1900    case ABIArgInfo::Expand: {
1901      // If this structure was expanded into multiple arguments then
1902      // we need to create a temporary and reconstruct it from the
1903      // arguments.
1904      std::string Name = Arg->getNameAsString();
1905      llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
1906                                           (Name + ".addr").c_str());
1907      // FIXME: What are the right qualifiers here?
1908      llvm::Function::arg_iterator End =
1909        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1910      EmitParmDecl(*Arg, Temp);
1911
1912      // Name the arguments used in expansion and increment AI.
1913      unsigned Index = 0;
1914      for (; AI != End; ++AI, ++Index)
1915        AI->setName(Name + "." + llvm::utostr(Index));
1916      continue;
1917    }
1918
1919    case ABIArgInfo::Ignore:
1920      // Initialize the local variable appropriately.
1921      if (hasAggregateLLVMType(Ty)) {
1922        EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
1923      } else {
1924        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
1925      }
1926
1927      // Skip increment, no matching LLVM parameter.
1928      continue;
1929
1930    case ABIArgInfo::Coerce: {
1931      assert(AI != Fn->arg_end() && "Argument mismatch!");
1932      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
1933      // result in a new alloca anyway, so we could just store into that
1934      // directly if we broke the abstraction down more.
1935      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
1936      CreateCoercedStore(AI, V, *this);
1937      // Match to what EmitParmDecl is expecting for this type.
1938      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1939        V = EmitLoadOfScalar(V, false, Ty);
1940        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1941          // This must be a promotion, for something like
1942          // "void a(x) short x; {..."
1943          V = EmitScalarConversion(V, Ty, Arg->getType());
1944        }
1945      }
1946      EmitParmDecl(*Arg, V);
1947      break;
1948    }
1949    }
1950
1951    ++AI;
1952  }
1953  assert(AI == Fn->arg_end() && "Argument mismatch!");
1954}
1955
1956void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1957                                         llvm::Value *ReturnValue) {
1958  llvm::Value *RV = 0;
1959
1960  // Functions with no result always return void.
1961  if (ReturnValue) {
1962    QualType RetTy = FI.getReturnType();
1963    const ABIArgInfo &RetAI = FI.getReturnInfo();
1964
1965    switch (RetAI.getKind()) {
1966    case ABIArgInfo::Indirect:
1967      if (RetTy->isAnyComplexType()) {
1968        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1969        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1970      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1971        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1972      } else {
1973        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1974                          false, RetTy);
1975      }
1976      break;
1977
1978    case ABIArgInfo::Direct:
1979      // The internal return value temp always will have
1980      // pointer-to-return-type type.
1981      RV = Builder.CreateLoad(ReturnValue);
1982      break;
1983
1984    case ABIArgInfo::Ignore:
1985      break;
1986
1987    case ABIArgInfo::Coerce:
1988      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1989      break;
1990
1991    case ABIArgInfo::Expand:
1992      assert(0 && "Invalid ABI kind for return argument");
1993    }
1994  }
1995
1996  if (RV) {
1997    Builder.CreateRet(RV);
1998  } else {
1999    Builder.CreateRetVoid();
2000  }
2001}
2002
2003RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
2004  if (ArgType->isReferenceType())
2005    return EmitReferenceBindingToExpr(E, ArgType);
2006
2007  return EmitAnyExprToTemp(E);
2008}
2009
2010RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2011                                 llvm::Value *Callee,
2012                                 const CallArgList &CallArgs,
2013                                 const Decl *TargetDecl) {
2014  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2015  llvm::SmallVector<llvm::Value*, 16> Args;
2016
2017  // Handle struct-return functions by passing a pointer to the
2018  // location that we would like to return into.
2019  QualType RetTy = CallInfo.getReturnType();
2020  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2021  if (CGM.ReturnTypeUsesSret(CallInfo)) {
2022    // Create a temporary alloca to hold the result of the call. :(
2023    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
2024  }
2025
2026  assert(CallInfo.arg_size() == CallArgs.size() &&
2027         "Mismatch between function signature & arguments.");
2028  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2029  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2030       I != E; ++I, ++info_it) {
2031    const ABIArgInfo &ArgInfo = info_it->info;
2032    RValue RV = I->first;
2033
2034    switch (ArgInfo.getKind()) {
2035    case ABIArgInfo::Indirect:
2036      if (RV.isScalar() || RV.isComplex()) {
2037        // Make a temporary alloca to pass the argument.
2038        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
2039        if (RV.isScalar())
2040          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
2041        else
2042          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
2043      } else {
2044        Args.push_back(RV.getAggregateAddr());
2045      }
2046      break;
2047
2048    case ABIArgInfo::Direct:
2049      if (RV.isScalar()) {
2050        Args.push_back(RV.getScalarVal());
2051      } else if (RV.isComplex()) {
2052        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
2053        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
2054        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
2055        Args.push_back(Tmp);
2056      } else {
2057        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
2058      }
2059      break;
2060
2061    case ABIArgInfo::Ignore:
2062      break;
2063
2064    case ABIArgInfo::Coerce: {
2065      // FIXME: Avoid the conversion through memory if possible.
2066      llvm::Value *SrcPtr;
2067      if (RV.isScalar()) {
2068        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
2069        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
2070      } else if (RV.isComplex()) {
2071        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
2072        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2073      } else
2074        SrcPtr = RV.getAggregateAddr();
2075      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2076                                       *this));
2077      break;
2078    }
2079
2080    case ABIArgInfo::Expand:
2081      ExpandTypeToArgs(I->second, RV, Args);
2082      break;
2083    }
2084  }
2085
2086  llvm::BasicBlock *InvokeDest = getInvokeDest();
2087  CodeGen::AttributeListType AttributeList;
2088  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
2089  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
2090                                                   AttributeList.end());
2091
2092  llvm::CallSite CS;
2093  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
2094    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
2095  } else {
2096    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2097    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
2098                              Args.data(), Args.data()+Args.size());
2099    EmitBlock(Cont);
2100  }
2101
2102  CS.setAttributes(Attrs);
2103  if (const llvm::Function *F =  dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
2104    CS.setCallingConv(F->getCallingConv());
2105
2106  // If the call doesn't return, finish the basic block and clear the
2107  // insertion point; this allows the rest of IRgen to discard
2108  // unreachable code.
2109  if (CS.doesNotReturn()) {
2110    Builder.CreateUnreachable();
2111    Builder.ClearInsertionPoint();
2112
2113    // FIXME: For now, emit a dummy basic block because expr emitters in
2114    // generally are not ready to handle emitting expressions at unreachable
2115    // points.
2116    EnsureInsertPoint();
2117
2118    // Return a reasonable RValue.
2119    return GetUndefRValue(RetTy);
2120  }
2121
2122  llvm::Instruction *CI = CS.getInstruction();
2123  if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
2124    CI->setName("call");
2125
2126  switch (RetAI.getKind()) {
2127  case ABIArgInfo::Indirect:
2128    if (RetTy->isAnyComplexType())
2129      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2130    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2131      return RValue::getAggregate(Args[0]);
2132    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
2133
2134  case ABIArgInfo::Direct:
2135    if (RetTy->isAnyComplexType()) {
2136      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2137      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2138      return RValue::getComplex(std::make_pair(Real, Imag));
2139    }
2140    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2141      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
2142      Builder.CreateStore(CI, V);
2143      return RValue::getAggregate(V);
2144    }
2145    return RValue::get(CI);
2146
2147  case ABIArgInfo::Ignore:
2148    // If we are ignoring an argument that had a result, make sure to
2149    // construct the appropriate return value for our caller.
2150    return GetUndefRValue(RetTy);
2151
2152  case ABIArgInfo::Coerce: {
2153    // FIXME: Avoid the conversion through memory if possible.
2154    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
2155    CreateCoercedStore(CI, V, *this);
2156    if (RetTy->isAnyComplexType())
2157      return RValue::getComplex(LoadComplexFromAddr(V, false));
2158    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2159      return RValue::getAggregate(V);
2160    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
2161  }
2162
2163  case ABIArgInfo::Expand:
2164    assert(0 && "Invalid ABI kind for return argument");
2165  }
2166
2167  assert(0 && "Unhandled ABIArgInfo::Kind");
2168  return RValue::get(0);
2169}
2170
2171/* VarArg handling */
2172
2173llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2174  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2175}
2176