CGCall.cpp revision 8b29a387788bbb7a7c3b64c37473bc46299d2132
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/RecordLayout.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/Attributes.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Target/TargetData.h"
27
28#include "ABIInfo.h"
29
30using namespace clang;
31using namespace CodeGen;
32
33/***/
34
35// FIXME: Use iterator and sidestep silly type array creation.
36
37const
38CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionTypeNoProto *FTNP) {
39  return getFunctionInfo(FTNP->getResultType(),
40                         llvm::SmallVector<QualType, 16>());
41}
42
43const
44CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionTypeProto *FTP) {
45  llvm::SmallVector<QualType, 16> ArgTys;
46  // FIXME: Kill copy.
47  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
48    ArgTys.push_back(FTP->getArgType(i));
49  return getFunctionInfo(FTP->getResultType(), ArgTys);
50}
51
52const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
53  const FunctionType *FTy = FD->getType()->getAsFunctionType();
54  if (const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(FTy))
55    return getFunctionInfo(FTP);
56  return getFunctionInfo(cast<FunctionTypeNoProto>(FTy));
57}
58
59const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
60  llvm::SmallVector<QualType, 16> ArgTys;
61  ArgTys.push_back(MD->getSelfDecl()->getType());
62  ArgTys.push_back(Context.getObjCSelType());
63  // FIXME: Kill copy?
64  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
65         e = MD->param_end(); i != e; ++i)
66    ArgTys.push_back((*i)->getType());
67  return getFunctionInfo(MD->getResultType(), ArgTys);
68}
69
70const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
71                                                    const CallArgList &Args) {
72  // FIXME: Kill copy.
73  llvm::SmallVector<QualType, 16> ArgTys;
74  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
75       i != e; ++i)
76    ArgTys.push_back(i->second);
77  return getFunctionInfo(ResTy, ArgTys);
78}
79
80const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
81                                                  const FunctionArgList &Args) {
82  // FIXME: Kill copy.
83  llvm::SmallVector<QualType, 16> ArgTys;
84  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
85       i != e; ++i)
86    ArgTys.push_back(i->second);
87  return getFunctionInfo(ResTy, ArgTys);
88}
89
90const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
91                               const llvm::SmallVector<QualType, 16> &ArgTys) {
92  // Lookup or create unique function info.
93  llvm::FoldingSetNodeID ID;
94  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
95
96  void *InsertPos = 0;
97  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
98  if (FI)
99    return *FI;
100
101  // Construct the function info.
102  FI = new CGFunctionInfo(ResTy, ArgTys);
103  FunctionInfos.InsertNode(FI, InsertPos);
104
105  // Compute ABI information.
106  getABIInfo().computeInfo(*FI, getContext());
107
108  return *FI;
109}
110
111/***/
112
113ABIInfo::~ABIInfo() {}
114
115/// isEmptyStruct - Return true iff a structure has no non-empty
116/// members. Note that a structure with a flexible array member is not
117/// considered empty.
118static bool isEmptyStruct(QualType T) {
119  const RecordType *RT = T->getAsStructureType();
120  if (!RT)
121    return 0;
122  const RecordDecl *RD = RT->getDecl();
123  if (RD->hasFlexibleArrayMember())
124    return false;
125  for (RecordDecl::field_iterator i = RD->field_begin(),
126         e = RD->field_end(); i != e; ++i) {
127    const FieldDecl *FD = *i;
128    if (!isEmptyStruct(FD->getType()))
129      return false;
130  }
131  return true;
132}
133
134/// isSingleElementStruct - Determine if a structure is a "single
135/// element struct", i.e. it has exactly one non-empty field or
136/// exactly one field which is itself a single element
137/// struct. Structures with flexible array members are never
138/// considered single element structs.
139///
140/// \return The field declaration for the single non-empty field, if
141/// it exists.
142static const FieldDecl *isSingleElementStruct(QualType T) {
143  const RecordType *RT = T->getAsStructureType();
144  if (!RT)
145    return 0;
146
147  const RecordDecl *RD = RT->getDecl();
148  if (RD->hasFlexibleArrayMember())
149    return 0;
150
151  const FieldDecl *Found = 0;
152  for (RecordDecl::field_iterator i = RD->field_begin(),
153         e = RD->field_end(); i != e; ++i) {
154    const FieldDecl *FD = *i;
155    QualType FT = FD->getType();
156
157    if (isEmptyStruct(FT)) {
158      // Ignore
159    } else if (Found) {
160      return 0;
161    } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
162      Found = FD;
163    } else {
164      Found = isSingleElementStruct(FT);
165      if (!Found)
166        return 0;
167    }
168  }
169
170  return Found;
171}
172
173static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
174  if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
175    return false;
176
177  uint64_t Size = Context.getTypeSize(Ty);
178  return Size == 32 || Size == 64;
179}
180
181static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
182                                           ASTContext &Context) {
183  for (RecordDecl::field_iterator i = RD->field_begin(),
184         e = RD->field_end(); i != e; ++i) {
185    const FieldDecl *FD = *i;
186
187    if (!is32Or64BitBasicType(FD->getType(), Context))
188      return false;
189
190    // If this is a bit-field we need to make sure it is still a
191    // 32-bit or 64-bit type.
192    if (Expr *BW = FD->getBitWidth()) {
193      unsigned Width = BW->getIntegerConstantExprValue(Context).getZExtValue();
194      if (Width <= 16)
195        return false;
196    }
197  }
198  return true;
199}
200
201namespace {
202/// DefaultABIInfo - The default implementation for ABI specific
203/// details. This implementation provides information which results in
204/// self-consistent and sensible LLVM IR generation, but does not
205/// conform to any particular ABI.
206class DefaultABIInfo : public ABIInfo {
207  ABIArgInfo classifyReturnType(QualType RetTy,
208                                ASTContext &Context) const;
209
210  ABIArgInfo classifyArgumentType(QualType RetTy,
211                                  ASTContext &Context) const;
212
213  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
214    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
215    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
216         it != ie; ++it)
217      it->info = classifyArgumentType(it->type, Context);
218  }
219};
220
221/// X86_32ABIInfo - The X86-32 ABI information.
222class X86_32ABIInfo : public ABIInfo {
223public:
224  ABIArgInfo classifyReturnType(QualType RetTy,
225                                ASTContext &Context) const;
226
227  ABIArgInfo classifyArgumentType(QualType RetTy,
228                                  ASTContext &Context) const;
229
230  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
231    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
232    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
233         it != ie; ++it)
234      it->info = classifyArgumentType(it->type, Context);
235  }
236};
237}
238
239ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
240                                            ASTContext &Context) const {
241  if (RetTy->isVoidType()) {
242    return ABIArgInfo::getIgnore();
243  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
244    // Classify "single element" structs as their element type.
245    const FieldDecl *SeltFD = isSingleElementStruct(RetTy);
246    if (SeltFD) {
247      QualType SeltTy = SeltFD->getType()->getDesugaredType();
248      if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
249        // FIXME: This is gross, it would be nice if we could just
250        // pass back SeltTy and have clients deal with it. Is it worth
251        // supporting coerce to both LLVM and clang Types?
252        if (BT->isIntegerType()) {
253          uint64_t Size = Context.getTypeSize(SeltTy);
254          return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
255        } else if (BT->getKind() == BuiltinType::Float) {
256          return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
257        } else if (BT->getKind() == BuiltinType::Double) {
258          return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
259        }
260      } else if (SeltTy->isPointerType()) {
261        // FIXME: It would be really nice if this could come out as
262        // the proper pointer type.
263        llvm::Type *PtrTy =
264          llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
265        return ABIArgInfo::getCoerce(PtrTy);
266      }
267    }
268
269    uint64_t Size = Context.getTypeSize(RetTy);
270    if (Size == 8) {
271      return ABIArgInfo::getCoerce(llvm::Type::Int8Ty);
272    } else if (Size == 16) {
273      return ABIArgInfo::getCoerce(llvm::Type::Int16Ty);
274    } else if (Size == 32) {
275      return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
276    } else if (Size == 64) {
277      return ABIArgInfo::getCoerce(llvm::Type::Int64Ty);
278    } else {
279      return ABIArgInfo::getStructRet();
280    }
281  } else {
282    return ABIArgInfo::getDirect();
283  }
284}
285
286ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
287                                              ASTContext &Context) const {
288  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
289    // Structures with flexible arrays are always byval.
290    if (const RecordType *RT = Ty->getAsStructureType())
291      if (RT->getDecl()->hasFlexibleArrayMember())
292        return ABIArgInfo::getByVal(0);
293
294    // Expand empty structs (i.e. ignore)
295    uint64_t Size = Context.getTypeSize(Ty);
296    if (Ty->isStructureType() && Size == 0)
297      return ABIArgInfo::getExpand();
298
299    // Expand structs with size <= 128-bits which consist only of
300    // basic types (int, long long, float, double, xxx*). This is
301    // non-recursive and does not ignore empty fields.
302    if (const RecordType *RT = Ty->getAsStructureType()) {
303      if (Context.getTypeSize(Ty) <= 4*32 &&
304          areAllFields32Or64BitBasicType(RT->getDecl(), Context))
305        return ABIArgInfo::getExpand();
306    }
307
308    return ABIArgInfo::getByVal(0);
309  } else {
310    return ABIArgInfo::getDirect();
311  }
312}
313
314namespace {
315/// X86_64ABIInfo - The X86_64 ABI information.
316class X86_64ABIInfo : public ABIInfo {
317  enum Class {
318    Integer = 0,
319    SSE,
320    SSEUp,
321    X87,
322    X87Up,
323    ComplexX87,
324    NoClass,
325    Memory
326  };
327
328  /// merge - Implement the X86_64 ABI merging algorithm.
329  ///
330  /// Merge an accumulating classification \arg Accum with a field
331  /// classification \arg Field.
332  ///
333  /// \param Accum - The accumulating classification. This should
334  /// always be either NoClass or the result of a previous merge
335  /// call. In addition, this should never be Memory (the caller
336  /// should just return Memory for the aggregate).
337  Class merge(Class Accum, Class Field) const;
338
339  /// classify - Determine the x86_64 register classes in which the
340  /// given type T should be passed.
341  ///
342  /// \param Lo - The classification for the parts of the type
343  /// residing in the low word of the containing object.
344  ///
345  /// \param Hi - The classification for the parts of the type
346  /// residing in the high word of the containing object.
347  ///
348  /// \param OffsetBase - The bit offset of this type in the
349  /// containing object.  Some parameters are classified different
350  /// depending on whether they straddle an eightbyte boundary.
351  ///
352  /// If a word is unused its result will be NoClass; if a type should
353  /// be passed in Memory then at least the classification of \arg Lo
354  /// will be Memory.
355  ///
356  /// The \arg Lo class will be NoClass iff the argument is ignored.
357  ///
358  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
359  /// be NoClass.
360  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
361                Class &Lo, Class &Hi) const;
362
363  ABIArgInfo classifyReturnType(QualType RetTy,
364                                ASTContext &Context) const;
365
366  ABIArgInfo classifyArgumentType(QualType Ty,
367                                  ASTContext &Context,
368                                  unsigned &freeIntRegs,
369                                  unsigned &freeSSERegs) const;
370
371public:
372  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
373};
374}
375
376X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
377                                          Class Field) const {
378  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
379  // classified recursively so that always two fields are
380  // considered. The resulting class is calculated according to
381  // the classes of the fields in the eightbyte:
382  //
383  // (a) If both classes are equal, this is the resulting class.
384  //
385  // (b) If one of the classes is NO_CLASS, the resulting class is
386  // the other class.
387  //
388  // (c) If one of the classes is MEMORY, the result is the MEMORY
389  // class.
390  //
391  // (d) If one of the classes is INTEGER, the result is the
392  // INTEGER.
393  //
394  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
395  // MEMORY is used as class.
396  //
397  // (f) Otherwise class SSE is used.
398  assert((Accum == NoClass || Accum == Integer ||
399          Accum == SSE || Accum == SSEUp) &&
400         "Invalid accumulated classification during merge.");
401  if (Accum == Field || Field == NoClass)
402    return Accum;
403  else if (Field == Memory)
404    return Memory;
405  else if (Accum == NoClass)
406    return Field;
407  else if (Accum == Integer || Field == Integer)
408    return Integer;
409  else if (Field == X87 || Field == X87Up || Field == ComplexX87)
410    return Memory;
411  else
412    return SSE;
413}
414
415void X86_64ABIInfo::classify(QualType Ty,
416                             ASTContext &Context,
417                             uint64_t OffsetBase,
418                             Class &Lo, Class &Hi) const {
419  // FIXME: This code can be simplified by introducing a simple value
420  // class for Class pairs with appropriate constructor methods for
421  // the various situations.
422
423  Lo = Hi = NoClass;
424
425  Class &Current = OffsetBase < 64 ? Lo : Hi;
426  Current = Memory;
427
428  if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
429    BuiltinType::Kind k = BT->getKind();
430
431    if (k == BuiltinType::Void) {
432      Current = NoClass;
433    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
434      Current = Integer;
435    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
436      Current = SSE;
437    } else if (k == BuiltinType::LongDouble) {
438      Lo = X87;
439      Hi = X87Up;
440    }
441    // FIXME: _Decimal32 and _Decimal64 are SSE.
442    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
443    // FIXME: __int128 is (Integer, Integer).
444  } else if (Ty->isPointerLikeType() || Ty->isBlockPointerType() ||
445             Ty->isObjCQualifiedInterfaceType()) {
446    Current = Integer;
447  } else if (const VectorType *VT = Ty->getAsVectorType()) {
448    uint64_t Size = Context.getTypeSize(VT);
449    if (Size == 64) {
450      // gcc passes <1 x double> in memory.
451      if (VT->getElementType() == Context.DoubleTy)
452        return;
453
454      Current = SSE;
455
456      // If this type crosses an eightbyte boundary, it should be
457      // split.
458      if (OffsetBase && OffsetBase != 64)
459        Hi = Lo;
460    } else if (Size == 128) {
461      Lo = SSE;
462      Hi = SSEUp;
463    }
464  } else if (const ComplexType *CT = Ty->getAsComplexType()) {
465    QualType ET = CT->getElementType();
466
467    uint64_t Size = Context.getTypeSize(Ty);
468    if (ET->isIntegerType()) {
469      if (Size <= 64)
470        Current = Integer;
471      else if (Size <= 128)
472        Lo = Hi = Integer;
473    } else if (ET == Context.FloatTy)
474      Current = SSE;
475    else if (ET == Context.DoubleTy)
476      Lo = Hi = SSE;
477    else if (ET == Context.LongDoubleTy)
478      Current = ComplexX87;
479
480    // If this complex type crosses an eightbyte boundary then it
481    // should be split.
482    uint64_t EB_Real = (OffsetBase) / 64;
483    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
484    if (Hi == NoClass && EB_Real != EB_Imag)
485      Hi = Lo;
486  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
487    // Arrays are treated like structures.
488
489    uint64_t Size = Context.getTypeSize(Ty);
490
491    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
492    // than two eightbytes, ..., it has class MEMORY.
493    if (Size > 128)
494      return;
495
496    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
497    // fields, it has class MEMORY.
498    //
499    // Only need to check alignment of array base.
500    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
501      return;
502
503    // Otherwise implement simplified merge. We could be smarter about
504    // this, but it isn't worth it and would be harder to verify.
505    Current = NoClass;
506    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
507    uint64_t ArraySize = AT->getSize().getZExtValue();
508    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
509      Class FieldLo, FieldHi;
510      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
511      Lo = merge(Lo, FieldLo);
512      Hi = merge(Hi, FieldHi);
513      if (Lo == Memory || Hi == Memory)
514        break;
515    }
516
517    // Do post merger cleanup (see below). Only case we worry about is Memory.
518    if (Hi == Memory)
519      Lo = Memory;
520    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
521  } else if (const RecordType *RT = Ty->getAsRecordType()) {
522    uint64_t Size = Context.getTypeSize(Ty);
523
524    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
525    // than two eightbytes, ..., it has class MEMORY.
526    if (Size > 128)
527      return;
528
529    const RecordDecl *RD = RT->getDecl();
530
531    // Assume variable sized types are passed in memory.
532    if (RD->hasFlexibleArrayMember())
533      return;
534
535    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
536
537    // Reset Lo class, this will be recomputed.
538    Current = NoClass;
539    unsigned idx = 0;
540    for (RecordDecl::field_iterator i = RD->field_begin(),
541           e = RD->field_end(); i != e; ++i, ++idx) {
542      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
543
544      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
545      // fields, it has class MEMORY.
546      if (Offset % Context.getTypeAlign(i->getType())) {
547        Lo = Memory;
548        return;
549      }
550
551      // Classify this field.
552      //
553      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
554      // exceeds a single eightbyte, each is classified
555      // separately. Each eightbyte gets initialized to class
556      // NO_CLASS.
557      Class FieldLo, FieldHi;
558      classify(i->getType(), Context, Offset, FieldLo, FieldHi);
559      Lo = merge(Lo, FieldLo);
560      Hi = merge(Hi, FieldHi);
561      if (Lo == Memory || Hi == Memory)
562        break;
563    }
564
565    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
566    //
567    // (a) If one of the classes is MEMORY, the whole argument is
568    // passed in memory.
569    //
570    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
571
572    // The first of these conditions is guaranteed by how we implement
573    // the merge (just bail).
574    //
575    // The second condition occurs in the case of unions; for example
576    // union { _Complex double; unsigned; }.
577    if (Hi == Memory)
578      Lo = Memory;
579    if (Hi == SSEUp && Lo != SSE)
580      Hi = SSE;
581  }
582}
583
584
585ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
586                                            ASTContext &Context) const {
587  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
588  // classification algorithm.
589  X86_64ABIInfo::Class Lo, Hi;
590  classify(RetTy, Context, 0, Lo, Hi);
591
592  // Check some invariants.
593  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
594  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
595  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
596
597  const llvm::Type *ResType = 0;
598  switch (Lo) {
599  case NoClass:
600    return ABIArgInfo::getIgnore();
601
602  case SSEUp:
603  case X87Up:
604    assert(0 && "Invalid classification for lo word.");
605
606    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
607    // hidden argument, i.e. structret.
608  case Memory:
609    return ABIArgInfo::getStructRet();
610
611    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
612    // available register of the sequence %rax, %rdx is used.
613  case Integer:
614    ResType = llvm::Type::Int64Ty; break;
615
616    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
617    // available SSE register of the sequence %xmm0, %xmm1 is used.
618  case SSE:
619    ResType = llvm::Type::DoubleTy; break;
620
621    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
622    // returned on the X87 stack in %st0 as 80-bit x87 number.
623  case X87:
624    ResType = llvm::Type::X86_FP80Ty; break;
625
626    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
627    // part of the value is returned in %st0 and the imaginary part in
628    // %st1.
629  case ComplexX87:
630    assert(Hi == NoClass && "Unexpected ComplexX87 classification.");
631    ResType = llvm::VectorType::get(llvm::Type::X86_FP80Ty, 2);
632    break;
633  }
634
635  switch (Hi) {
636    // Memory was handled previously, and ComplexX87 and X87 should
637    // never occur as hi classes.
638  case Memory:
639  case X87:
640  case ComplexX87:
641    assert(0 && "Invalid classification for hi word.");
642
643  case NoClass: break;
644  case Integer:
645    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
646    break;
647  case SSE:
648    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
649    break;
650
651    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
652    // is passed in the upper half of the last used SSE register.
653    //
654    // SSEUP should always be preceeded by SSE, just widen.
655  case SSEUp:
656    assert(Lo == SSE && "Unexpected SSEUp classification.");
657    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
658    break;
659
660    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
661    // returned together with the previous X87 value in %st0.
662    //
663    // X87UP should always be preceeded by X87, so we don't need to do
664    // anything here.
665  case X87Up:
666    assert(Lo == X87 && "Unexpected X87Up classification.");
667    break;
668  }
669
670  return ABIArgInfo::getCoerce(ResType);
671}
672
673ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
674                                               unsigned &freeIntRegs,
675                                               unsigned &freeSSERegs) const {
676  X86_64ABIInfo::Class Lo, Hi;
677  classify(Ty, Context, 0, Lo, Hi);
678
679  // Check some invariants.
680  // FIXME: Enforce these by construction.
681  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
682  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
683  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
684
685  unsigned neededInt = 0, neededSSE = 0;
686  const llvm::Type *ResType = 0;
687  switch (Lo) {
688  case NoClass:
689    return ABIArgInfo::getIgnore();
690
691    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
692    // on the stack.
693  case Memory:
694
695    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
696    // COMPLEX_X87, it is passed in memory.
697  case X87:
698  case ComplexX87:
699    // Choose appropriate in memory type.
700    if (CodeGenFunction::hasAggregateLLVMType(Ty))
701      return ABIArgInfo::getByVal(0);
702    else
703      return ABIArgInfo::getDirect();
704
705  case SSEUp:
706  case X87Up:
707    assert(0 && "Invalid classification for lo word.");
708
709    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
710    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
711    // and %r9 is used.
712  case Integer:
713    ++neededInt;
714    ResType = llvm::Type::Int64Ty;
715    break;
716
717    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
718    // available SSE register is used, the registers are taken in the
719    // order from %xmm0 to %xmm7.
720  case SSE:
721    ++neededSSE;
722    ResType = llvm::Type::DoubleTy;
723    break;
724  }
725
726  switch (Hi) {
727    // Memory was handled previously, ComplexX87 and X87 should
728    // never occur as hi classes, and X87Up must be preceed by X87,
729    // which is passed in memory.
730  case Memory:
731  case X87:
732  case X87Up:
733  case ComplexX87:
734    assert(0 && "Invalid classification for hi word.");
735
736  case NoClass: break;
737  case Integer:
738    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
739    ++neededInt;
740    break;
741  case SSE:
742    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
743    ++neededSSE;
744    break;
745
746    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
747    // eightbyte is passed in the upper half of the last used SSE
748    // register.
749  case SSEUp:
750    assert(Lo == SSE && "Unexpected SSEUp classification.");
751    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
752    break;
753  }
754
755  // AMD64-ABI 3.2.3p3: If there are no registers available for any
756  // eightbyte of an argument, the whole argument is passed on the
757  // stack. If registers have already been assigned for some
758  // eightbytes of such an argument, the assignments get reverted.
759  if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
760    freeIntRegs -= neededInt;
761    freeSSERegs -= neededSSE;
762    return ABIArgInfo::getCoerce(ResType);
763  } else {
764    // Choose appropriate in memory type.
765    if (CodeGenFunction::hasAggregateLLVMType(Ty))
766      return ABIArgInfo::getByVal(0);
767    else
768      return ABIArgInfo::getDirect();
769  }
770}
771
772void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
773  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
774
775  // Keep track of the number of assigned registers.
776  unsigned freeIntRegs = 6, freeSSERegs = 8;
777
778  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
779  // get assigned (in left-to-right order) for passing as follows...
780  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
781       it != ie; ++it)
782    it->info = classifyArgumentType(it->type, Context, freeIntRegs, freeSSERegs);
783}
784
785ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
786                                            ASTContext &Context) const {
787  if (RetTy->isVoidType()) {
788    return ABIArgInfo::getIgnore();
789  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
790    return ABIArgInfo::getStructRet();
791  } else {
792    return ABIArgInfo::getDirect();
793  }
794}
795
796ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
797                                              ASTContext &Context) const {
798  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
799    return ABIArgInfo::getByVal(0);
800  } else {
801    return ABIArgInfo::getDirect();
802  }
803}
804
805const ABIInfo &CodeGenTypes::getABIInfo() const {
806  if (TheABIInfo)
807    return *TheABIInfo;
808
809  // For now we just cache this in the CodeGenTypes and don't bother
810  // to free it.
811  const char *TargetPrefix = getContext().Target.getTargetPrefix();
812  if (strcmp(TargetPrefix, "x86") == 0) {
813    switch (getContext().Target.getPointerWidth(0)) {
814    case 32:
815      return *(TheABIInfo = new X86_32ABIInfo());
816    case 64:
817      return *(TheABIInfo = new X86_64ABIInfo());
818    }
819  }
820
821  return *(TheABIInfo = new DefaultABIInfo);
822}
823
824/***/
825
826CGFunctionInfo::CGFunctionInfo(QualType ResTy,
827                               const llvm::SmallVector<QualType, 16> &ArgTys) {
828  NumArgs = ArgTys.size();
829  Args = new ArgInfo[1 + NumArgs];
830  Args[0].type = ResTy;
831  for (unsigned i = 0; i < NumArgs; ++i)
832    Args[1 + i].type = ArgTys[i];
833}
834
835/***/
836
837void CodeGenTypes::GetExpandedTypes(QualType Ty,
838                                    std::vector<const llvm::Type*> &ArgTys) {
839  const RecordType *RT = Ty->getAsStructureType();
840  assert(RT && "Can only expand structure types.");
841  const RecordDecl *RD = RT->getDecl();
842  assert(!RD->hasFlexibleArrayMember() &&
843         "Cannot expand structure with flexible array.");
844
845  for (RecordDecl::field_iterator i = RD->field_begin(),
846         e = RD->field_end(); i != e; ++i) {
847    const FieldDecl *FD = *i;
848    assert(!FD->isBitField() &&
849           "Cannot expand structure with bit-field members.");
850
851    QualType FT = FD->getType();
852    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
853      GetExpandedTypes(FT, ArgTys);
854    } else {
855      ArgTys.push_back(ConvertType(FT));
856    }
857  }
858}
859
860llvm::Function::arg_iterator
861CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
862                                    llvm::Function::arg_iterator AI) {
863  const RecordType *RT = Ty->getAsStructureType();
864  assert(RT && "Can only expand structure types.");
865
866  RecordDecl *RD = RT->getDecl();
867  assert(LV.isSimple() &&
868         "Unexpected non-simple lvalue during struct expansion.");
869  llvm::Value *Addr = LV.getAddress();
870  for (RecordDecl::field_iterator i = RD->field_begin(),
871         e = RD->field_end(); i != e; ++i) {
872    FieldDecl *FD = *i;
873    QualType FT = FD->getType();
874
875    // FIXME: What are the right qualifiers here?
876    LValue LV = EmitLValueForField(Addr, FD, false, 0);
877    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
878      AI = ExpandTypeFromArgs(FT, LV, AI);
879    } else {
880      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
881      ++AI;
882    }
883  }
884
885  return AI;
886}
887
888void
889CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
890                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
891  const RecordType *RT = Ty->getAsStructureType();
892  assert(RT && "Can only expand structure types.");
893
894  RecordDecl *RD = RT->getDecl();
895  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
896  llvm::Value *Addr = RV.getAggregateAddr();
897  for (RecordDecl::field_iterator i = RD->field_begin(),
898         e = RD->field_end(); i != e; ++i) {
899    FieldDecl *FD = *i;
900    QualType FT = FD->getType();
901
902    // FIXME: What are the right qualifiers here?
903    LValue LV = EmitLValueForField(Addr, FD, false, 0);
904    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
905      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
906    } else {
907      RValue RV = EmitLoadOfLValue(LV, FT);
908      assert(RV.isScalar() &&
909             "Unexpected non-scalar rvalue during struct expansion.");
910      Args.push_back(RV.getScalarVal());
911    }
912  }
913}
914
915/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
916/// a pointer to an object of type \arg Ty.
917///
918/// This safely handles the case when the src type is smaller than the
919/// destination type; in this situation the values of bits which not
920/// present in the src are undefined.
921static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
922                                      const llvm::Type *Ty,
923                                      CodeGenFunction &CGF) {
924  const llvm::Type *SrcTy =
925    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
926  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
927  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty);
928
929  // If load is legal, just bitcast the src pointer.
930  if (SrcSize == DstSize) {
931    llvm::Value *Casted =
932      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
933    return CGF.Builder.CreateLoad(Casted);
934  } else {
935    assert(SrcSize < DstSize && "Coercion is losing source bits!");
936
937    // Otherwise do coercion through memory. This is stupid, but
938    // simple.
939    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
940    llvm::Value *Casted =
941      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
942    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
943    return CGF.Builder.CreateLoad(Tmp);
944  }
945}
946
947/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
948/// where the source and destination may have different types.
949///
950/// This safely handles the case when the src type is larger than the
951/// destination type; the upper bits of the src will be lost.
952static void CreateCoercedStore(llvm::Value *Src,
953                               llvm::Value *DstPtr,
954                               CodeGenFunction &CGF) {
955  const llvm::Type *SrcTy = Src->getType();
956  const llvm::Type *DstTy =
957    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
958
959  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
960  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy);
961
962  // If store is legal, just bitcast the src pointer.
963  if (SrcSize == DstSize) {
964    llvm::Value *Casted =
965      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
966    CGF.Builder.CreateStore(Src, Casted);
967  } else {
968    assert(SrcSize > DstSize && "Coercion is missing bits!");
969
970    // Otherwise do coercion through memory. This is stupid, but
971    // simple.
972    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
973    CGF.Builder.CreateStore(Src, Tmp);
974    llvm::Value *Casted =
975      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
976    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(Casted), DstPtr);
977  }
978}
979
980/***/
981
982bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
983  return FI.getReturnInfo().isStructRet();
984}
985
986const llvm::FunctionType *
987CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
988  std::vector<const llvm::Type*> ArgTys;
989
990  const llvm::Type *ResultType = 0;
991
992  QualType RetTy = FI.getReturnType();
993  const ABIArgInfo &RetAI = FI.getReturnInfo();
994  switch (RetAI.getKind()) {
995  case ABIArgInfo::ByVal:
996  case ABIArgInfo::Expand:
997    assert(0 && "Invalid ABI kind for return argument");
998
999  case ABIArgInfo::Direct:
1000    ResultType = ConvertType(RetTy);
1001    break;
1002
1003  case ABIArgInfo::StructRet: {
1004    ResultType = llvm::Type::VoidTy;
1005    const llvm::Type *STy = ConvertType(RetTy);
1006    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
1007    break;
1008  }
1009
1010  case ABIArgInfo::Ignore:
1011    ResultType = llvm::Type::VoidTy;
1012    break;
1013
1014  case ABIArgInfo::Coerce:
1015    ResultType = RetAI.getCoerceToType();
1016    break;
1017  }
1018
1019  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1020         ie = FI.arg_end(); it != ie; ++it) {
1021    const ABIArgInfo &AI = it->info;
1022    const llvm::Type *Ty = ConvertType(it->type);
1023
1024    switch (AI.getKind()) {
1025    case ABIArgInfo::Ignore:
1026      break;
1027
1028    case ABIArgInfo::Coerce:
1029      ArgTys.push_back(AI.getCoerceToType());
1030      break;
1031
1032    case ABIArgInfo::StructRet:
1033      assert(0 && "Invalid ABI kind for non-return argument");
1034
1035    case ABIArgInfo::ByVal:
1036      // byval arguments are always on the stack, which is addr space #0.
1037      ArgTys.push_back(llvm::PointerType::getUnqual(Ty));
1038      assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled");
1039      break;
1040
1041    case ABIArgInfo::Direct:
1042      ArgTys.push_back(Ty);
1043      break;
1044
1045    case ABIArgInfo::Expand:
1046      GetExpandedTypes(it->type, ArgTys);
1047      break;
1048    }
1049  }
1050
1051  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
1052}
1053
1054void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1055                                           const Decl *TargetDecl,
1056                                           AttributeListType &PAL) {
1057  unsigned FuncAttrs = 0;
1058  unsigned RetAttrs = 0;
1059
1060  if (TargetDecl) {
1061    if (TargetDecl->getAttr<NoThrowAttr>())
1062      FuncAttrs |= llvm::Attribute::NoUnwind;
1063    if (TargetDecl->getAttr<NoReturnAttr>())
1064      FuncAttrs |= llvm::Attribute::NoReturn;
1065    if (TargetDecl->getAttr<PureAttr>())
1066      FuncAttrs |= llvm::Attribute::ReadOnly;
1067    if (TargetDecl->getAttr<ConstAttr>())
1068      FuncAttrs |= llvm::Attribute::ReadNone;
1069  }
1070
1071  QualType RetTy = FI.getReturnType();
1072  unsigned Index = 1;
1073  const ABIArgInfo &RetAI = FI.getReturnInfo();
1074  switch (RetAI.getKind()) {
1075  case ABIArgInfo::Direct:
1076    if (RetTy->isPromotableIntegerType()) {
1077      if (RetTy->isSignedIntegerType()) {
1078        RetAttrs |= llvm::Attribute::SExt;
1079      } else if (RetTy->isUnsignedIntegerType()) {
1080        RetAttrs |= llvm::Attribute::ZExt;
1081      }
1082    }
1083    break;
1084
1085  case ABIArgInfo::StructRet:
1086    PAL.push_back(llvm::AttributeWithIndex::get(Index,
1087                                                llvm::Attribute::StructRet |
1088                                                llvm::Attribute::NoAlias));
1089    ++Index;
1090    break;
1091
1092  case ABIArgInfo::Ignore:
1093  case ABIArgInfo::Coerce:
1094    break;
1095
1096  case ABIArgInfo::ByVal:
1097  case ABIArgInfo::Expand:
1098    assert(0 && "Invalid ABI kind for return argument");
1099  }
1100
1101  if (RetAttrs)
1102    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
1103  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1104         ie = FI.arg_end(); it != ie; ++it) {
1105    QualType ParamType = it->type;
1106    const ABIArgInfo &AI = it->info;
1107    unsigned Attributes = 0;
1108
1109    switch (AI.getKind()) {
1110    case ABIArgInfo::StructRet:
1111      assert(0 && "Invalid ABI kind for non-return argument");
1112
1113    case ABIArgInfo::Coerce:
1114      break;
1115
1116    case ABIArgInfo::ByVal:
1117      Attributes |= llvm::Attribute::ByVal;
1118      assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled");
1119      break;
1120
1121    case ABIArgInfo::Direct:
1122      if (ParamType->isPromotableIntegerType()) {
1123        if (ParamType->isSignedIntegerType()) {
1124          Attributes |= llvm::Attribute::SExt;
1125        } else if (ParamType->isUnsignedIntegerType()) {
1126          Attributes |= llvm::Attribute::ZExt;
1127        }
1128      }
1129      break;
1130
1131    case ABIArgInfo::Ignore:
1132      // Skip increment, no matching LLVM parameter.
1133      continue;
1134
1135    case ABIArgInfo::Expand: {
1136      std::vector<const llvm::Type*> Tys;
1137      // FIXME: This is rather inefficient. Do we ever actually need
1138      // to do anything here? The result should be just reconstructed
1139      // on the other side, so extension should be a non-issue.
1140      getTypes().GetExpandedTypes(ParamType, Tys);
1141      Index += Tys.size();
1142      continue;
1143    }
1144    }
1145
1146    if (Attributes)
1147      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
1148    ++Index;
1149  }
1150  if (FuncAttrs)
1151    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1152
1153}
1154
1155void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1156                                         llvm::Function *Fn,
1157                                         const FunctionArgList &Args) {
1158  // FIXME: We no longer need the types from FunctionArgList; lift up
1159  // and simplify.
1160
1161  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1162  llvm::Function::arg_iterator AI = Fn->arg_begin();
1163
1164  // Name the struct return argument.
1165  if (CGM.ReturnTypeUsesSret(FI)) {
1166    AI->setName("agg.result");
1167    ++AI;
1168  }
1169
1170  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1171  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1172       i != e; ++i, ++info_it) {
1173    const VarDecl *Arg = i->first;
1174    QualType Ty = info_it->type;
1175    const ABIArgInfo &ArgI = info_it->info;
1176
1177    switch (ArgI.getKind()) {
1178    case ABIArgInfo::ByVal:
1179    case ABIArgInfo::Direct: {
1180      assert(AI != Fn->arg_end() && "Argument mismatch!");
1181      llvm::Value* V = AI;
1182      if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1183        // This must be a promotion, for something like
1184        // "void a(x) short x; {..."
1185        V = EmitScalarConversion(V, Ty, Arg->getType());
1186      }
1187      EmitParmDecl(*Arg, V);
1188      break;
1189    }
1190
1191    case ABIArgInfo::Expand: {
1192      // If this structure was expanded into multiple arguments then
1193      // we need to create a temporary and reconstruct it from the
1194      // arguments.
1195      std::string Name = Arg->getNameAsString();
1196      llvm::Value *Temp = CreateTempAlloca(ConvertType(Ty),
1197                                           (Name + ".addr").c_str());
1198      // FIXME: What are the right qualifiers here?
1199      llvm::Function::arg_iterator End =
1200        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1201      EmitParmDecl(*Arg, Temp);
1202
1203      // Name the arguments used in expansion and increment AI.
1204      unsigned Index = 0;
1205      for (; AI != End; ++AI, ++Index)
1206        AI->setName(Name + "." + llvm::utostr(Index));
1207      continue;
1208    }
1209
1210    case ABIArgInfo::Ignore:
1211      // Skip increment, no matching LLVM parameter.
1212      continue;
1213
1214    case ABIArgInfo::Coerce: {
1215      assert(AI != Fn->arg_end() && "Argument mismatch!");
1216      // FIXME: This is very wasteful; EmitParmDecl is just going to
1217      // drop the result in a new alloca anyway, so we could just
1218      // store into that directly if we broke the abstraction down
1219      // more.
1220      llvm::Value *V = CreateTempAlloca(ConvertType(Ty), "coerce");
1221      CreateCoercedStore(AI, V, *this);
1222      // Match to what EmitParmDecl is expecting for this type.
1223      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1224        V = Builder.CreateLoad(V);
1225        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1226          // This must be a promotion, for something like
1227          // "void a(x) short x; {..."
1228          V = EmitScalarConversion(V, Ty, Arg->getType());
1229        }
1230      }
1231      EmitParmDecl(*Arg, V);
1232      break;
1233    }
1234
1235    case ABIArgInfo::StructRet:
1236      assert(0 && "Invalid ABI kind for non-return argument");
1237    }
1238
1239    ++AI;
1240  }
1241  assert(AI == Fn->arg_end() && "Argument mismatch!");
1242}
1243
1244void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1245                                         llvm::Value *ReturnValue) {
1246  llvm::Value *RV = 0;
1247
1248  // Functions with no result always return void.
1249  if (ReturnValue) {
1250    QualType RetTy = FI.getReturnType();
1251    const ABIArgInfo &RetAI = FI.getReturnInfo();
1252
1253    switch (RetAI.getKind()) {
1254    case ABIArgInfo::StructRet:
1255      if (RetTy->isAnyComplexType()) {
1256        // FIXME: Volatile
1257        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1258        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1259      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1260        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1261      } else {
1262        Builder.CreateStore(Builder.CreateLoad(ReturnValue),
1263                            CurFn->arg_begin());
1264      }
1265      break;
1266
1267    case ABIArgInfo::Direct:
1268      RV = Builder.CreateLoad(ReturnValue);
1269      break;
1270
1271    case ABIArgInfo::Ignore:
1272      break;
1273
1274    case ABIArgInfo::Coerce: {
1275      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1276      break;
1277    }
1278
1279    case ABIArgInfo::ByVal:
1280    case ABIArgInfo::Expand:
1281      assert(0 && "Invalid ABI kind for return argument");
1282    }
1283  }
1284
1285  if (RV) {
1286    Builder.CreateRet(RV);
1287  } else {
1288    Builder.CreateRetVoid();
1289  }
1290}
1291
1292RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1293                                 llvm::Value *Callee,
1294                                 const CallArgList &CallArgs) {
1295  // FIXME: We no longer need the types from CallArgs; lift up and
1296  // simplify.
1297  llvm::SmallVector<llvm::Value*, 16> Args;
1298
1299  // Handle struct-return functions by passing a pointer to the
1300  // location that we would like to return into.
1301  QualType RetTy = CallInfo.getReturnType();
1302  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1303  switch (RetAI.getKind()) {
1304  case ABIArgInfo::StructRet:
1305    // Create a temporary alloca to hold the result of the call. :(
1306    Args.push_back(CreateTempAlloca(ConvertType(RetTy)));
1307    break;
1308
1309  case ABIArgInfo::Direct:
1310  case ABIArgInfo::Ignore:
1311  case ABIArgInfo::Coerce:
1312    break;
1313
1314  case ABIArgInfo::ByVal:
1315  case ABIArgInfo::Expand:
1316    assert(0 && "Invalid ABI kind for return argument");
1317  }
1318
1319  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1320  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1321       I != E; ++I, ++info_it) {
1322    const ABIArgInfo &ArgInfo = info_it->info;
1323    RValue RV = I->first;
1324
1325    switch (ArgInfo.getKind()) {
1326    case ABIArgInfo::ByVal: // Direct is byval
1327    case ABIArgInfo::Direct:
1328      if (RV.isScalar()) {
1329        Args.push_back(RV.getScalarVal());
1330      } else if (RV.isComplex()) {
1331        // Make a temporary alloca to pass the argument.
1332        Args.push_back(CreateTempAlloca(ConvertType(I->second)));
1333        StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1334      } else {
1335        Args.push_back(RV.getAggregateAddr());
1336      }
1337      break;
1338
1339    case ABIArgInfo::Ignore:
1340      break;
1341
1342    case ABIArgInfo::Coerce: {
1343      // FIXME: Avoid the conversion through memory if possible.
1344      llvm::Value *SrcPtr;
1345      if (RV.isScalar()) {
1346        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
1347        Builder.CreateStore(RV.getScalarVal(), SrcPtr);
1348      } else if (RV.isComplex()) {
1349        SrcPtr = CreateTempAlloca(ConvertType(I->second), "coerce");
1350        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1351      } else
1352        SrcPtr = RV.getAggregateAddr();
1353      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1354                                       *this));
1355      break;
1356    }
1357
1358    case ABIArgInfo::StructRet:
1359      assert(0 && "Invalid ABI kind for non-return argument");
1360      break;
1361
1362    case ABIArgInfo::Expand:
1363      ExpandTypeToArgs(I->second, RV, Args);
1364      break;
1365    }
1366  }
1367
1368  llvm::CallInst *CI = Builder.CreateCall(Callee,&Args[0],&Args[0]+Args.size());
1369
1370  // FIXME: Provide TargetDecl so nounwind, noreturn, etc, etc get set.
1371  CodeGen::AttributeListType AttributeList;
1372  CGM.ConstructAttributeList(CallInfo, 0, AttributeList);
1373  CI->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
1374                                           AttributeList.size()));
1375
1376  if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1377    CI->setCallingConv(F->getCallingConv());
1378  if (CI->getType() != llvm::Type::VoidTy)
1379    CI->setName("call");
1380
1381  switch (RetAI.getKind()) {
1382  case ABIArgInfo::StructRet:
1383    if (RetTy->isAnyComplexType())
1384      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1385    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1386      return RValue::getAggregate(Args[0]);
1387    else
1388      return RValue::get(Builder.CreateLoad(Args[0]));
1389
1390  case ABIArgInfo::Direct:
1391    assert((!RetTy->isAnyComplexType() &&
1392            !CodeGenFunction::hasAggregateLLVMType(RetTy)) &&
1393           "FIXME: Implement return for non-scalar direct types.");
1394    return RValue::get(CI);
1395
1396  case ABIArgInfo::Ignore:
1397    if (RetTy->isVoidType())
1398      return RValue::get(0);
1399
1400    // If we are ignoring an argument that had a result, make sure to
1401    // construct the appropriate return value for our caller.
1402    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1403      llvm::Value *Res =
1404        llvm::UndefValue::get(llvm::PointerType::getUnqual(ConvertType(RetTy)));
1405      return RValue::getAggregate(Res);
1406    }
1407    return RValue::get(llvm::UndefValue::get(ConvertType(RetTy)));
1408
1409  case ABIArgInfo::Coerce: {
1410    // FIXME: Avoid the conversion through memory if possible.
1411    llvm::Value *V = CreateTempAlloca(ConvertType(RetTy), "coerce");
1412    CreateCoercedStore(CI, V, *this);
1413    if (RetTy->isAnyComplexType())
1414      return RValue::getComplex(LoadComplexFromAddr(V, false));
1415    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1416      return RValue::getAggregate(V);
1417    else
1418      return RValue::get(Builder.CreateLoad(V));
1419  }
1420
1421  case ABIArgInfo::ByVal:
1422  case ABIArgInfo::Expand:
1423    assert(0 && "Invalid ABI kind for return argument");
1424  }
1425
1426  assert(0 && "Unhandled ABIArgInfo::Kind");
1427  return RValue::get(0);
1428}
1429