CGCall.cpp revision 99037e5a2118bc194251a8033a0885810bf61c95
1f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//
3f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//                     The LLVM Compiler Infrastructure
4f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//
5f4a0148381866692a1177a49620854c90294e0a8Greg Clayton// This file is distributed under the University of Illinois Open Source
6f4a0148381866692a1177a49620854c90294e0a8Greg Clayton// License. See LICENSE.TXT for details.
7f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//
8f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//===----------------------------------------------------------------------===//
9f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//
109f39a7b47cd7633397c8cc6fb0f141343173dd44Johnny Chen// These classes wrap the information about a call or function
11f4a0148381866692a1177a49620854c90294e0a8Greg Clayton// definition used to handle ABI compliancy.
12f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//
13f4a0148381866692a1177a49620854c90294e0a8Greg Clayton//===----------------------------------------------------------------------===//
14f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
15f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "CGCall.h"
16f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "CodeGenFunction.h"
17f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "CodeGenModule.h"
18f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "clang/Basic/TargetInfo.h"
19f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "clang/AST/ASTContext.h"
20f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "clang/AST/Decl.h"
21f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "clang/AST/DeclObjC.h"
22f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "clang/AST/RecordLayout.h"
23f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "llvm/ADT/StringExtras.h"
24f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "llvm/Attributes.h"
25f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "llvm/Support/CommandLine.h"
26f4a0148381866692a1177a49620854c90294e0a8Greg Clayton#include "llvm/Target/TargetData.h"
27f4a0148381866692a1177a49620854c90294e0a8Greg Claytonusing namespace clang;
28f4a0148381866692a1177a49620854c90294e0a8Greg Claytonusing namespace CodeGen;
29f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
30f4a0148381866692a1177a49620854c90294e0a8Greg Claytonstatic llvm::cl::opt<bool>
31f4a0148381866692a1177a49620854c90294e0a8Greg ClaytonUseX86_64ABI("use-x86_64-abi",
32f4a0148381866692a1177a49620854c90294e0a8Greg Clayton           llvm::cl::desc("Enable use of experimental x86_64 ABI."),
33f4a0148381866692a1177a49620854c90294e0a8Greg Clayton           llvm::cl::init(false));
34f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
35f4a0148381866692a1177a49620854c90294e0a8Greg Clayton/***/
36f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
37f4a0148381866692a1177a49620854c90294e0a8Greg Clayton// FIXME: Use iterator and sidestep silly type array creation.
38f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
39f4a0148381866692a1177a49620854c90294e0a8Greg ClaytonCGFunctionInfo::CGFunctionInfo(const FunctionTypeNoProto *FTNP)
40f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  : IsVariadic(true)
41f4a0148381866692a1177a49620854c90294e0a8Greg Clayton{
42f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  ArgTypes.push_back(FTNP->getResultType());
43f4a0148381866692a1177a49620854c90294e0a8Greg Clayton}
44f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
45f4a0148381866692a1177a49620854c90294e0a8Greg ClaytonCGFunctionInfo::CGFunctionInfo(const FunctionTypeProto *FTP)
46f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  : IsVariadic(FTP->isVariadic())
47f4a0148381866692a1177a49620854c90294e0a8Greg Clayton{
48f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  ArgTypes.push_back(FTP->getResultType());
49f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
50f4a0148381866692a1177a49620854c90294e0a8Greg Clayton    ArgTypes.push_back(FTP->getArgType(i));
51f4a0148381866692a1177a49620854c90294e0a8Greg Clayton}
52f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
53f4a0148381866692a1177a49620854c90294e0a8Greg Clayton// FIXME: Is there really any reason to have this still?
54f4a0148381866692a1177a49620854c90294e0a8Greg ClaytonCGFunctionInfo::CGFunctionInfo(const FunctionDecl *FD)
55f4a0148381866692a1177a49620854c90294e0a8Greg Clayton{
56f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  const FunctionType *FTy = FD->getType()->getAsFunctionType();
57f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(FTy);
58f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
59f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  ArgTypes.push_back(FTy->getResultType());
60f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  if (FTP) {
61f4a0148381866692a1177a49620854c90294e0a8Greg Clayton    IsVariadic = FTP->isVariadic();
62f4a0148381866692a1177a49620854c90294e0a8Greg Clayton    for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
63f4a0148381866692a1177a49620854c90294e0a8Greg Clayton      ArgTypes.push_back(FTP->getArgType(i));
64f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  } else {
65f4a0148381866692a1177a49620854c90294e0a8Greg Clayton    IsVariadic = true;
66f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  }
67f4a0148381866692a1177a49620854c90294e0a8Greg Clayton}
68f4a0148381866692a1177a49620854c90294e0a8Greg Clayton
69f4a0148381866692a1177a49620854c90294e0a8Greg ClaytonCGFunctionInfo::CGFunctionInfo(const ObjCMethodDecl *MD,
70f4a0148381866692a1177a49620854c90294e0a8Greg Clayton                               const ASTContext &Context)
71f4a0148381866692a1177a49620854c90294e0a8Greg Clayton  : IsVariadic(MD->isVariadic())
729f39a7b47cd7633397c8cc6fb0f141343173dd44Johnny Chen{
73  ArgTypes.push_back(MD->getResultType());
74  ArgTypes.push_back(MD->getSelfDecl()->getType());
75  ArgTypes.push_back(Context.getObjCSelType());
76  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
77         e = MD->param_end(); i != e; ++i)
78    ArgTypes.push_back((*i)->getType());
79}
80
81ArgTypeIterator CGFunctionInfo::argtypes_begin() const {
82  return ArgTypes.begin();
83}
84
85ArgTypeIterator CGFunctionInfo::argtypes_end() const {
86  return ArgTypes.end();
87}
88
89/***/
90
91CGCallInfo::CGCallInfo(QualType _ResultType, const CallArgList &_Args) {
92  ArgTypes.push_back(_ResultType);
93  for (CallArgList::const_iterator i = _Args.begin(), e = _Args.end(); i!=e; ++i)
94    ArgTypes.push_back(i->second);
95}
96
97ArgTypeIterator CGCallInfo::argtypes_begin() const {
98  return ArgTypes.begin();
99}
100
101ArgTypeIterator CGCallInfo::argtypes_end() const {
102  return ArgTypes.end();
103}
104
105/***/
106
107/// ABIArgInfo - Helper class to encapsulate information about how a
108/// specific C type should be passed to or returned from a function.
109class ABIArgInfo {
110public:
111  enum Kind {
112    Default,
113    StructRet, /// Only valid for return values. The return value
114               /// should be passed through a pointer to a caller
115               /// allocated location passed as an implicit first
116               /// argument to the function.
117
118    Ignore,    /// Ignore the argument (treat as void). Useful for
119               /// void and empty structs.
120
121    Coerce,    /// Only valid for aggregate return types, the argument
122               /// should be accessed by coercion to a provided type.
123
124    ByVal,     /// Only valid for aggregate argument types. The
125               /// structure should be passed "byval" with the
126               /// specified alignment (0 indicates default
127               /// alignment).
128
129    Expand,    /// Only valid for aggregate argument types. The
130               /// structure should be expanded into consecutive
131               /// arguments for its constituent fields. Currently
132               /// expand is only allowed on structures whose fields
133               /// are all scalar types or are themselves expandable
134               /// types.
135
136    KindFirst=Default, KindLast=Expand
137  };
138
139private:
140  Kind TheKind;
141  const llvm::Type *TypeData;
142  unsigned UIntData;
143
144  ABIArgInfo(Kind K, const llvm::Type *TD=0,
145             unsigned UI=0) : TheKind(K),
146                              TypeData(TD),
147                              UIntData(0) {}
148public:
149  static ABIArgInfo getDefault() {
150    return ABIArgInfo(Default);
151  }
152  static ABIArgInfo getStructRet() {
153    return ABIArgInfo(StructRet);
154  }
155  static ABIArgInfo getIgnore() {
156    return ABIArgInfo(Ignore);
157  }
158  static ABIArgInfo getCoerce(const llvm::Type *T) {
159    return ABIArgInfo(Coerce, T);
160  }
161  static ABIArgInfo getByVal(unsigned Alignment) {
162    return ABIArgInfo(ByVal, 0, Alignment);
163  }
164  static ABIArgInfo getExpand() {
165    return ABIArgInfo(Expand);
166  }
167
168  Kind getKind() const { return TheKind; }
169  bool isDefault() const { return TheKind == Default; }
170  bool isStructRet() const { return TheKind == StructRet; }
171  bool isIgnore() const { return TheKind == Ignore; }
172  bool isCoerce() const { return TheKind == Coerce; }
173  bool isByVal() const { return TheKind == ByVal; }
174  bool isExpand() const { return TheKind == Expand; }
175
176  // Coerce accessors
177  const llvm::Type *getCoerceToType() const {
178    assert(TheKind == Coerce && "Invalid kind!");
179    return TypeData;
180  }
181
182  // ByVal accessors
183  unsigned getByValAlignment() const {
184    assert(TheKind == ByVal && "Invalid kind!");
185    return UIntData;
186  }
187};
188
189/***/
190
191/* FIXME: All of this stuff should be part of the target interface
192   somehow. It is currently here because it is not clear how to factor
193   the targets to support this, since the Targets currently live in a
194   layer below types n'stuff.
195 */
196
197/// ABIInfo - Target specific hooks for defining how a type should be
198/// passed or returned from functions.
199class clang::ABIInfo {
200public:
201  virtual ~ABIInfo();
202
203  virtual ABIArgInfo classifyReturnType(QualType RetTy,
204                                        ASTContext &Context) const = 0;
205
206  virtual ABIArgInfo classifyArgumentType(QualType Ty,
207                                          ASTContext &Context) const = 0;
208};
209
210ABIInfo::~ABIInfo() {}
211
212/// isEmptyStruct - Return true iff a structure has no non-empty
213/// members. Note that a structure with a flexible array member is not
214/// considered empty.
215static bool isEmptyStruct(QualType T) {
216  const RecordType *RT = T->getAsStructureType();
217  if (!RT)
218    return 0;
219  const RecordDecl *RD = RT->getDecl();
220  if (RD->hasFlexibleArrayMember())
221    return false;
222  for (RecordDecl::field_iterator i = RD->field_begin(),
223         e = RD->field_end(); i != e; ++i) {
224    const FieldDecl *FD = *i;
225    if (!isEmptyStruct(FD->getType()))
226      return false;
227  }
228  return true;
229}
230
231/// isSingleElementStruct - Determine if a structure is a "single
232/// element struct", i.e. it has exactly one non-empty field or
233/// exactly one field which is itself a single element
234/// struct. Structures with flexible array members are never
235/// considered single element structs.
236///
237/// \return The field declaration for the single non-empty field, if
238/// it exists.
239static const FieldDecl *isSingleElementStruct(QualType T) {
240  const RecordType *RT = T->getAsStructureType();
241  if (!RT)
242    return 0;
243
244  const RecordDecl *RD = RT->getDecl();
245  if (RD->hasFlexibleArrayMember())
246    return 0;
247
248  const FieldDecl *Found = 0;
249  for (RecordDecl::field_iterator i = RD->field_begin(),
250         e = RD->field_end(); i != e; ++i) {
251    const FieldDecl *FD = *i;
252    QualType FT = FD->getType();
253
254    if (isEmptyStruct(FT)) {
255      // Ignore
256    } else if (Found) {
257      return 0;
258    } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
259      Found = FD;
260    } else {
261      Found = isSingleElementStruct(FT);
262      if (!Found)
263        return 0;
264    }
265  }
266
267  return Found;
268}
269
270static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
271  if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
272    return false;
273
274  uint64_t Size = Context.getTypeSize(Ty);
275  return Size == 32 || Size == 64;
276}
277
278static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
279                                           ASTContext &Context) {
280  for (RecordDecl::field_iterator i = RD->field_begin(),
281         e = RD->field_end(); i != e; ++i) {
282    const FieldDecl *FD = *i;
283
284    if (!is32Or64BitBasicType(FD->getType(), Context))
285      return false;
286
287    // If this is a bit-field we need to make sure it is still a
288    // 32-bit or 64-bit type.
289    if (Expr *BW = FD->getBitWidth()) {
290      unsigned Width = BW->getIntegerConstantExprValue(Context).getZExtValue();
291      if (Width <= 16)
292        return false;
293    }
294  }
295  return true;
296}
297
298namespace {
299/// DefaultABIInfo - The default implementation for ABI specific
300/// details. This implementation provides information which results in
301/// sensible LLVM IR generation, but does not conform to any
302/// particular ABI.
303class DefaultABIInfo : public ABIInfo {
304  virtual ABIArgInfo classifyReturnType(QualType RetTy,
305                                        ASTContext &Context) const;
306
307  virtual ABIArgInfo classifyArgumentType(QualType RetTy,
308                                          ASTContext &Context) const;
309};
310
311/// X86_32ABIInfo - The X86-32 ABI information.
312class X86_32ABIInfo : public ABIInfo {
313public:
314  virtual ABIArgInfo classifyReturnType(QualType RetTy,
315                                        ASTContext &Context) const;
316
317  virtual ABIArgInfo classifyArgumentType(QualType RetTy,
318                                          ASTContext &Context) const;
319};
320}
321
322ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
323                                            ASTContext &Context) const {
324  if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
325    // Classify "single element" structs as their element type.
326    const FieldDecl *SeltFD = isSingleElementStruct(RetTy);
327    if (SeltFD) {
328      QualType SeltTy = SeltFD->getType()->getDesugaredType();
329      if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
330        // FIXME: This is gross, it would be nice if we could just
331        // pass back SeltTy and have clients deal with it. Is it worth
332        // supporting coerce to both LLVM and clang Types?
333        if (BT->isIntegerType()) {
334          uint64_t Size = Context.getTypeSize(SeltTy);
335          return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
336        } else if (BT->getKind() == BuiltinType::Float) {
337          return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
338        } else if (BT->getKind() == BuiltinType::Double) {
339          return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
340        }
341      } else if (SeltTy->isPointerType()) {
342        // FIXME: It would be really nice if this could come out as
343        // the proper pointer type.
344        llvm::Type *PtrTy =
345          llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
346        return ABIArgInfo::getCoerce(PtrTy);
347      }
348    }
349
350    uint64_t Size = Context.getTypeSize(RetTy);
351    if (Size == 8) {
352      return ABIArgInfo::getCoerce(llvm::Type::Int8Ty);
353    } else if (Size == 16) {
354      return ABIArgInfo::getCoerce(llvm::Type::Int16Ty);
355    } else if (Size == 32) {
356      return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
357    } else if (Size == 64) {
358      return ABIArgInfo::getCoerce(llvm::Type::Int64Ty);
359    } else {
360      return ABIArgInfo::getStructRet();
361    }
362  } else {
363    return ABIArgInfo::getDefault();
364  }
365}
366
367ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
368                                              ASTContext &Context) const {
369  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
370    // Structures with flexible arrays are always byval.
371    if (const RecordType *RT = Ty->getAsStructureType())
372      if (RT->getDecl()->hasFlexibleArrayMember())
373        return ABIArgInfo::getByVal(0);
374
375    // Expand empty structs (i.e. ignore)
376    uint64_t Size = Context.getTypeSize(Ty);
377    if (Ty->isStructureType() && Size == 0)
378      return ABIArgInfo::getExpand();
379
380    // Expand structs with size <= 128-bits which consist only of
381    // basic types (int, long long, float, double, xxx*). This is
382    // non-recursive and does not ignore empty fields.
383    if (const RecordType *RT = Ty->getAsStructureType()) {
384      if (Context.getTypeSize(Ty) <= 4*32 &&
385          areAllFields32Or64BitBasicType(RT->getDecl(), Context))
386        return ABIArgInfo::getExpand();
387    }
388
389    return ABIArgInfo::getByVal(0);
390  } else {
391    return ABIArgInfo::getDefault();
392  }
393}
394
395namespace {
396/// X86_32ABIInfo - The X86_64 ABI information.
397class X86_64ABIInfo : public ABIInfo {
398  enum Class {
399    Integer = 0,
400    SSE,
401    SSEUp,
402    X87,
403    X87Up,
404    ComplexX87,
405    NoClass,
406    Memory
407  };
408
409  /// classify - Determine the x86_64 register classes in which the
410  /// given type T should be passed.
411  ///
412  /// \param Lo - The classification for the low word of the type.
413  /// \param Hi - The classification for the high word of the type.
414  ///
415  /// If a word is unused its result will be NoClass; if a type should
416  /// be passed in Memory then at least the classification of \arg Lo
417  /// will be Memory.
418  ///
419  /// The \arg Lo class will be NoClass iff the argument is ignored.
420  ///
421  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
422  /// be NoClass.
423  void classify(QualType T, ASTContext &Context,
424                Class &Lo, Class &Hi) const;
425
426public:
427  virtual ABIArgInfo classifyReturnType(QualType RetTy,
428                                        ASTContext &Context) const;
429
430  virtual ABIArgInfo classifyArgumentType(QualType RetTy,
431                                          ASTContext &Context) const;
432};
433}
434
435void X86_64ABIInfo::classify(QualType Ty,
436                             ASTContext &Context,
437                             Class &Lo, Class &Hi) const {
438  Lo = Memory;
439  Hi = NoClass;
440  if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
441    BuiltinType::Kind k = BT->getKind();
442
443    if (k == BuiltinType::Void) {
444      Lo = NoClass;
445    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
446      Lo = Integer;
447    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
448      Lo = SSE;
449    } else if (k == BuiltinType::LongDouble) {
450      Lo = X87;
451      Hi = X87Up;
452    }
453
454    // FIXME: _Decimal32 and _Decimal64 are SSE.
455    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
456    // FIXME: __int128 is (Integer, Integer).
457  } else if (Ty->isPointerLikeType() || Ty->isBlockPointerType() ||
458             Ty->isObjCQualifiedInterfaceType()) {
459    Lo = Integer;
460  } else if (const VectorType *VT = Ty->getAsVectorType()) {
461    unsigned Size = Context.getTypeSize(VT);
462    if (Size == 64) {
463      // FIXME: For some reason, gcc appears to be treating <1 x
464      // double> as INTEGER; this seems wrong, but we will match for
465      // now (icc rejects <1 x double>, so...).
466      Lo = (VT->getElementType() == Context.DoubleTy) ? Integer : SSE;
467    } else if (Size == 128) {
468      Lo = SSE;
469      Hi = SSEUp;
470    }
471  } else if (const ComplexType *CT = Ty->getAsComplexType()) {
472    QualType ET = CT->getElementType();
473
474    if (ET->isIntegerType()) {
475      unsigned Size = Context.getTypeSize(Ty);
476      if (Size <= 64)
477        Lo = Integer;
478      else if (Size <= 128)
479        Lo = Hi = Integer;
480    } else if (ET == Context.FloatTy)
481      Lo = SSE;
482    else if (ET == Context.DoubleTy)
483      Lo = Hi = SSE;
484    else if (ET == Context.LongDoubleTy)
485      Lo = ComplexX87;
486  } else if (const RecordType *RT = Ty->getAsRecordType()) {
487    unsigned Size = Context.getTypeSize(Ty);
488
489    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
490    // than two eightbytes, ..., it has class MEMORY.
491    if (Size > 128)
492      return;
493
494    const RecordDecl *RD = RT->getDecl();
495
496    // Assume variable sized types are passed in memory.
497    if (RD->hasFlexibleArrayMember())
498      return;
499
500    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
501
502    // Reset Lo class, this will be recomputed.
503    Lo = NoClass;
504    unsigned idx = 0;
505    for (RecordDecl::field_iterator i = RD->field_begin(),
506           e = RD->field_end(); i != e; ++i, ++idx) {
507      unsigned Offset = Layout.getFieldOffset(idx);
508
509      //  AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
510      //  fields, it has class MEMORY.
511      if (Offset % Context.getTypeAlign(i->getType())) {
512        Lo = Memory;
513        return;
514      }
515
516      // Determine which half of the structure we are classifying.
517      //
518      // AMD64-ABI 3.2.3p2: Rule 3. f the size of the aggregate
519      // exceeds a single eightbyte, each is classified
520      // separately. Each eightbyte gets initialized to class
521      // NO_CLASS.
522      Class &Target = Offset < 64 ? Lo : Hi;
523
524      // Classify this field.
525      Class FieldLo, FieldHi;
526      classify(i->getType(), Context, FieldLo, FieldHi);
527
528      // Merge the lo field classifcation.
529      //
530      // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
531      // classified recursively so that always two fields are
532      // considered. The resulting class is calculated according to
533      // the classes of the fields in the eightbyte:
534      //
535      // (a) If both classes are equal, this is the resulting class.
536      //
537      // (b) If one of the classes is NO_CLASS, the resulting class is
538      // the other class.
539      //
540      // (c) If one of the classes is MEMORY, the result is the MEMORY
541      // class.
542      //
543      // (d) If one of the classes is INTEGER, the result is the
544      // INTEGER.
545      //
546      // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
547      // MEMORY is used as class.
548      //
549      // (f) Otherwise class SSE is used.
550      if (Target == FieldLo || FieldLo == NoClass) ;
551      else if (Target == NoClass)
552        Target = FieldLo;
553      else if (FieldLo == Memory) {
554        // Memory is never over-ridden, just bail.
555        Lo = Memory;
556        return;
557      }
558      else if (Target == Integer || FieldLo == Integer)
559        Target = Integer;
560      else if (FieldLo == X87 || FieldLo == X87Up || FieldLo == ComplexX87) {
561        // As before, just bail once we generate a memory class.
562        Lo = Memory;
563        return;
564      } else
565        Target = SSE;
566
567      // It isn't clear from the ABI spec what the role of the high
568      // classification is here, but since this should only happen
569      // when we have a struct with a two eightbyte member, we can
570      // just push the field high class into the overall high class.
571      if (FieldHi != NoClass)
572        Hi = FieldHi;
573    }
574
575    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
576    //
577    // (a) If one of the classes is MEMORY, the whole argument is
578    // passed in memory.
579    //
580    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
581
582    // The first of these conditions is guaranteed by how we implement
583    // the merge (just bail). I don't believe the second is actually
584    // possible at all.
585    assert(Lo != Memory && "Unexpected memory classification.");
586    if (Hi == SSEUp && Lo != SSE)
587        Hi = SSE;
588  }
589}
590
591ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
592                                            ASTContext &Context) const {
593  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
594  // classification algorithm.
595  X86_64ABIInfo::Class Lo, Hi;
596  classify(RetTy, Context, Lo, Hi);
597
598  const llvm::Type *ResType = 0;
599  switch (Lo) {
600  case NoClass:
601    return ABIArgInfo::getIgnore();
602
603  case SSEUp:
604  case X87Up:
605    assert(0 && "Invalid classification for lo word.");
606
607  // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
608  // hidden argument, i.e. structret.
609  case Memory:
610    return ABIArgInfo::getStructRet();
611
612    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
613    // available register of the sequence %rax, %rdx is used.
614  case Integer:
615    ResType = llvm::Type::Int64Ty; break;
616
617    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
618    // available SSE register of the sequence %xmm0, %xmm1 is used.
619  case SSE:
620    ResType = llvm::Type::DoubleTy; break;
621
622    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
623    // returned on the X87 stack in %st0 as 80-bit x87 number.
624  case X87:
625    ResType = llvm::Type::X86_FP80Ty; break;
626
627  // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
628  // part of the value is returned in %st0 and the imaginary part in
629  // %st1.
630  case ComplexX87:
631    assert(Hi == NoClass && "Unexpected ComplexX87 classification.");
632    ResType = llvm::VectorType::get(llvm::Type::X86_FP80Ty, 2);
633    break;
634  }
635
636  switch (Hi) {
637    // Memory was handled previously, and ComplexX87 and X87 should
638    // never occur as hi classes.
639  case Memory:
640  case X87:
641  case ComplexX87:
642    assert(0 && "Invalid classification for hi word.");
643
644  case NoClass: break;
645  case Integer:
646    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
647    break;
648  case SSE:
649    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
650    break;
651
652    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
653    // is passed in the upper half of the last used SSE register.
654    //
655    // SSEUP should always be preceeded by SSE, just widen.
656  case SSEUp:
657    assert(Lo == SSE && "Unexpected SSEUp classification.");
658    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
659    break;
660
661    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
662    // returned together with the previous X87 value in %st0.
663    //
664    // X87UP should always be preceeded by X87, so we don't need to do
665    // anything here.
666  case X87Up:
667    assert(Lo == X87 && "Unexpected X87Up classification.");
668    break;
669  }
670
671  return ABIArgInfo::getCoerce(ResType);
672}
673
674ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
675                                              ASTContext &Context) const {
676  return ABIArgInfo::getDefault();
677}
678
679ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
680                                            ASTContext &Context) const {
681  return ABIArgInfo::getDefault();
682}
683
684ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
685                                              ASTContext &Context) const {
686  return ABIArgInfo::getDefault();
687}
688
689const ABIInfo &CodeGenTypes::getABIInfo() const {
690  if (TheABIInfo)
691    return *TheABIInfo;
692
693  // For now we just cache this in the CodeGenTypes and don't bother
694  // to free it.
695  const char *TargetPrefix = getContext().Target.getTargetPrefix();
696  if (strcmp(TargetPrefix, "x86") == 0) {
697    switch (getContext().Target.getPointerWidth(0)) {
698    case 32:
699      return *(TheABIInfo = new X86_32ABIInfo());
700    case 64:
701      if (UseX86_64ABI)
702        return *(TheABIInfo = new X86_64ABIInfo());
703    }
704  }
705
706  return *(TheABIInfo = new DefaultABIInfo);
707}
708
709// getABIReturnInfo - Wrap the ABIInfo getABIReturnInfo, altering
710// "default" types to StructRet when appropriate for simplicity.
711static ABIArgInfo getABIReturnInfo(QualType Ty, CodeGenTypes &CGT) {
712  assert(!Ty->isArrayType() &&
713         "Array types cannot be passed directly.");
714  ABIArgInfo Info = CGT.getABIInfo().classifyReturnType(Ty, CGT.getContext());
715  // Ensure default on aggregate types is StructRet.
716  if (Info.isDefault() && CodeGenFunction::hasAggregateLLVMType(Ty))
717    return ABIArgInfo::getStructRet();
718  return Info;
719}
720
721// getABIArgumentInfo - Wrap the ABIInfo getABIReturnInfo, altering
722// "default" types to ByVal when appropriate for simplicity.
723static ABIArgInfo getABIArgumentInfo(QualType Ty, CodeGenTypes &CGT) {
724  assert(!Ty->isArrayType() &&
725         "Array types cannot be passed directly.");
726  ABIArgInfo Info = CGT.getABIInfo().classifyArgumentType(Ty, CGT.getContext());
727  // Ensure default on aggregate types is ByVal.
728  if (Info.isDefault() && CodeGenFunction::hasAggregateLLVMType(Ty))
729    return ABIArgInfo::getByVal(0);
730  return Info;
731}
732
733/***/
734
735void CodeGenTypes::GetExpandedTypes(QualType Ty,
736                                    std::vector<const llvm::Type*> &ArgTys) {
737  const RecordType *RT = Ty->getAsStructureType();
738  assert(RT && "Can only expand structure types.");
739  const RecordDecl *RD = RT->getDecl();
740  assert(!RD->hasFlexibleArrayMember() &&
741         "Cannot expand structure with flexible array.");
742
743  for (RecordDecl::field_iterator i = RD->field_begin(),
744         e = RD->field_end(); i != e; ++i) {
745    const FieldDecl *FD = *i;
746    assert(!FD->isBitField() &&
747           "Cannot expand structure with bit-field members.");
748
749    QualType FT = FD->getType();
750    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
751      GetExpandedTypes(FT, ArgTys);
752    } else {
753      ArgTys.push_back(ConvertType(FT));
754    }
755  }
756}
757
758llvm::Function::arg_iterator
759CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
760                                    llvm::Function::arg_iterator AI) {
761  const RecordType *RT = Ty->getAsStructureType();
762  assert(RT && "Can only expand structure types.");
763
764  RecordDecl *RD = RT->getDecl();
765  assert(LV.isSimple() &&
766         "Unexpected non-simple lvalue during struct expansion.");
767  llvm::Value *Addr = LV.getAddress();
768  for (RecordDecl::field_iterator i = RD->field_begin(),
769         e = RD->field_end(); i != e; ++i) {
770    FieldDecl *FD = *i;
771    QualType FT = FD->getType();
772
773    // FIXME: What are the right qualifiers here?
774    LValue LV = EmitLValueForField(Addr, FD, false, 0);
775    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
776      AI = ExpandTypeFromArgs(FT, LV, AI);
777    } else {
778      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
779      ++AI;
780    }
781  }
782
783  return AI;
784}
785
786void
787CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
788                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
789  const RecordType *RT = Ty->getAsStructureType();
790  assert(RT && "Can only expand structure types.");
791
792  RecordDecl *RD = RT->getDecl();
793  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
794  llvm::Value *Addr = RV.getAggregateAddr();
795  for (RecordDecl::field_iterator i = RD->field_begin(),
796         e = RD->field_end(); i != e; ++i) {
797    FieldDecl *FD = *i;
798    QualType FT = FD->getType();
799
800    // FIXME: What are the right qualifiers here?
801    LValue LV = EmitLValueForField(Addr, FD, false, 0);
802    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
803      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
804    } else {
805      RValue RV = EmitLoadOfLValue(LV, FT);
806      assert(RV.isScalar() &&
807             "Unexpected non-scalar rvalue during struct expansion.");
808      Args.push_back(RV.getScalarVal());
809    }
810  }
811}
812
813/***/
814
815const llvm::FunctionType *
816CodeGenTypes::GetFunctionType(const CGCallInfo &CI, bool IsVariadic) {
817  return GetFunctionType(CI.argtypes_begin(), CI.argtypes_end(), IsVariadic);
818}
819
820const llvm::FunctionType *
821CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
822  return GetFunctionType(FI.argtypes_begin(), FI.argtypes_end(), FI.isVariadic());
823}
824
825const llvm::FunctionType *
826CodeGenTypes::GetFunctionType(ArgTypeIterator begin, ArgTypeIterator end,
827                              bool IsVariadic) {
828  std::vector<const llvm::Type*> ArgTys;
829
830  const llvm::Type *ResultType = 0;
831
832  QualType RetTy = *begin;
833  ABIArgInfo RetAI = getABIReturnInfo(RetTy, *this);
834  switch (RetAI.getKind()) {
835  case ABIArgInfo::ByVal:
836  case ABIArgInfo::Expand:
837    assert(0 && "Invalid ABI kind for return argument");
838
839  case ABIArgInfo::Default:
840    if (RetTy->isVoidType()) {
841      ResultType = llvm::Type::VoidTy;
842    } else {
843      ResultType = ConvertType(RetTy);
844    }
845    break;
846
847  case ABIArgInfo::StructRet: {
848    ResultType = llvm::Type::VoidTy;
849    const llvm::Type *STy = ConvertType(RetTy);
850    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
851    break;
852  }
853
854  case ABIArgInfo::Ignore:
855    ResultType = llvm::Type::VoidTy;
856    break;
857
858  case ABIArgInfo::Coerce:
859    ResultType = RetAI.getCoerceToType();
860    break;
861  }
862
863  for (++begin; begin != end; ++begin) {
864    ABIArgInfo AI = getABIArgumentInfo(*begin, *this);
865    const llvm::Type *Ty = ConvertType(*begin);
866
867    switch (AI.getKind()) {
868    case ABIArgInfo::Ignore:
869      break;
870
871    case ABIArgInfo::Coerce:
872    case ABIArgInfo::StructRet:
873      assert(0 && "Invalid ABI kind for non-return argument");
874
875    case ABIArgInfo::ByVal:
876      // byval arguments are always on the stack, which is addr space #0.
877      ArgTys.push_back(llvm::PointerType::getUnqual(Ty));
878      assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled");
879      break;
880
881    case ABIArgInfo::Default:
882      ArgTys.push_back(Ty);
883      break;
884
885    case ABIArgInfo::Expand:
886      GetExpandedTypes(*begin, ArgTys);
887      break;
888    }
889  }
890
891  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
892}
893
894bool CodeGenModule::ReturnTypeUsesSret(QualType RetTy) {
895  return getABIReturnInfo(RetTy, getTypes()).isStructRet();
896}
897
898void CodeGenModule::ConstructAttributeList(const Decl *TargetDecl,
899                                           ArgTypeIterator begin,
900                                           ArgTypeIterator end,
901                                           AttributeListType &PAL) {
902  unsigned FuncAttrs = 0;
903  unsigned RetAttrs = 0;
904
905  if (TargetDecl) {
906    if (TargetDecl->getAttr<NoThrowAttr>())
907      FuncAttrs |= llvm::Attribute::NoUnwind;
908    if (TargetDecl->getAttr<NoReturnAttr>())
909      FuncAttrs |= llvm::Attribute::NoReturn;
910    if (TargetDecl->getAttr<PureAttr>())
911      FuncAttrs |= llvm::Attribute::ReadOnly;
912    if (TargetDecl->getAttr<ConstAttr>())
913      FuncAttrs |= llvm::Attribute::ReadNone;
914  }
915
916  QualType RetTy = *begin;
917  unsigned Index = 1;
918  ABIArgInfo RetAI = getABIReturnInfo(RetTy, getTypes());
919  switch (RetAI.getKind()) {
920  case ABIArgInfo::Default:
921    if (RetTy->isPromotableIntegerType()) {
922      if (RetTy->isSignedIntegerType()) {
923        RetAttrs |= llvm::Attribute::SExt;
924      } else if (RetTy->isUnsignedIntegerType()) {
925        RetAttrs |= llvm::Attribute::ZExt;
926      }
927    }
928    break;
929
930  case ABIArgInfo::StructRet:
931    PAL.push_back(llvm::AttributeWithIndex::get(Index,
932                                                  llvm::Attribute::StructRet|
933                                                  llvm::Attribute::NoAlias));
934    ++Index;
935    break;
936
937  case ABIArgInfo::Ignore:
938  case ABIArgInfo::Coerce:
939    break;
940
941  case ABIArgInfo::ByVal:
942  case ABIArgInfo::Expand:
943    assert(0 && "Invalid ABI kind for return argument");
944  }
945
946  if (RetAttrs)
947    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
948  for (++begin; begin != end; ++begin) {
949    QualType ParamType = *begin;
950    unsigned Attributes = 0;
951    ABIArgInfo AI = getABIArgumentInfo(ParamType, getTypes());
952
953    switch (AI.getKind()) {
954    case ABIArgInfo::StructRet:
955    case ABIArgInfo::Coerce:
956      assert(0 && "Invalid ABI kind for non-return argument");
957
958    case ABIArgInfo::ByVal:
959      Attributes |= llvm::Attribute::ByVal;
960      assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled");
961      break;
962
963    case ABIArgInfo::Default:
964      if (ParamType->isPromotableIntegerType()) {
965        if (ParamType->isSignedIntegerType()) {
966          Attributes |= llvm::Attribute::SExt;
967        } else if (ParamType->isUnsignedIntegerType()) {
968          Attributes |= llvm::Attribute::ZExt;
969        }
970      }
971      break;
972
973    case ABIArgInfo::Ignore:
974      // Skip increment, no matching LLVM parameter.
975      continue;
976
977    case ABIArgInfo::Expand: {
978      std::vector<const llvm::Type*> Tys;
979      // FIXME: This is rather inefficient. Do we ever actually need
980      // to do anything here? The result should be just reconstructed
981      // on the other side, so extension should be a non-issue.
982      getTypes().GetExpandedTypes(ParamType, Tys);
983      Index += Tys.size();
984      continue;
985    }
986    }
987
988    if (Attributes)
989      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
990    ++Index;
991  }
992  if (FuncAttrs)
993    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
994
995}
996
997void CodeGenFunction::EmitFunctionProlog(llvm::Function *Fn,
998                                         QualType RetTy,
999                                         const FunctionArgList &Args) {
1000  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1001  llvm::Function::arg_iterator AI = Fn->arg_begin();
1002
1003  // Name the struct return argument.
1004  if (CGM.ReturnTypeUsesSret(RetTy)) {
1005    AI->setName("agg.result");
1006    ++AI;
1007  }
1008
1009  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1010       i != e; ++i) {
1011    const VarDecl *Arg = i->first;
1012    QualType Ty = i->second;
1013    ABIArgInfo ArgI = getABIArgumentInfo(Ty, CGM.getTypes());
1014
1015    switch (ArgI.getKind()) {
1016    case ABIArgInfo::ByVal:
1017    case ABIArgInfo::Default: {
1018      assert(AI != Fn->arg_end() && "Argument mismatch!");
1019      llvm::Value* V = AI;
1020      if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1021        // This must be a promotion, for something like
1022        // "void a(x) short x; {..."
1023        V = EmitScalarConversion(V, Ty, Arg->getType());
1024      }
1025      EmitParmDecl(*Arg, V);
1026      break;
1027    }
1028
1029    case ABIArgInfo::Expand: {
1030      // If this was structure was expand into multiple arguments then
1031      // we need to create a temporary and reconstruct it from the
1032      // arguments.
1033      std::string Name = Arg->getNameAsString();
1034      llvm::Value *Temp = CreateTempAlloca(ConvertType(Ty),
1035                                           (Name + ".addr").c_str());
1036      // FIXME: What are the right qualifiers here?
1037      llvm::Function::arg_iterator End =
1038        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1039      EmitParmDecl(*Arg, Temp);
1040
1041      // Name the arguments used in expansion and increment AI.
1042      unsigned Index = 0;
1043      for (; AI != End; ++AI, ++Index)
1044        AI->setName(Name + "." + llvm::utostr(Index));
1045      continue;
1046    }
1047
1048    case ABIArgInfo::Ignore:
1049      break;
1050
1051    case ABIArgInfo::Coerce:
1052    case ABIArgInfo::StructRet:
1053      assert(0 && "Invalid ABI kind for non-return argument");
1054    }
1055
1056    ++AI;
1057  }
1058  assert(AI == Fn->arg_end() && "Argument mismatch!");
1059}
1060
1061/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1062/// a pointer to an object of type \arg Ty.
1063///
1064/// This safely handles the case when the src type is smaller than the
1065/// destination type; in this situation the values of bits which not
1066/// present in the src are undefined.
1067static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
1068                                      const llvm::Type *Ty,
1069                                      CodeGenFunction &CGF) {
1070  const llvm::Type *SrcTy =
1071    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
1072  unsigned SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1073  unsigned DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty);
1074
1075  // If load is legal, just bitcase the src pointer.
1076  if (SrcSize == DstSize) {
1077    llvm::Value *Casted =
1078      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
1079    return CGF.Builder.CreateLoad(Casted);
1080  } else {
1081    assert(SrcSize < DstSize && "Coercion is losing source bits!");
1082
1083    // Otherwise do coercion through memory. This is stupid, but
1084    // simple.
1085    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
1086    llvm::Value *Casted =
1087      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
1088    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
1089    return CGF.Builder.CreateLoad(Tmp);
1090  }
1091}
1092
1093/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1094/// where the source and destination may have different types.
1095///
1096/// This safely handles the case when the src type is larger than the
1097/// destination type; the upper bits of the src will be lost.
1098static void CreateCoercedStore(llvm::Value *Src,
1099                               llvm::Value *DstPtr,
1100                               CodeGenFunction &CGF) {
1101  const llvm::Type *SrcTy = Src->getType();
1102  const llvm::Type *DstTy =
1103    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1104
1105  unsigned SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1106  unsigned DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy);
1107
1108  // If store is legal, just bitcase the src pointer.
1109  if (SrcSize == DstSize) {
1110    llvm::Value *Casted =
1111      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1112    CGF.Builder.CreateStore(Src, Casted);
1113  } else {
1114    assert(SrcSize > DstSize && "Coercion is missing bits!");
1115
1116    // Otherwise do coercion through memory. This is stupid, but
1117    // simple.
1118    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1119    CGF.Builder.CreateStore(Src, Tmp);
1120    llvm::Value *Casted =
1121      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
1122    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(Casted), DstPtr);
1123  }
1124}
1125
1126void CodeGenFunction::EmitFunctionEpilog(QualType RetTy,
1127                                         llvm::Value *ReturnValue) {
1128  llvm::Value *RV = 0;
1129
1130  // Functions with no result always return void.
1131  if (ReturnValue) {
1132    ABIArgInfo RetAI = getABIReturnInfo(RetTy, CGM.getTypes());
1133
1134    switch (RetAI.getKind()) {
1135    case ABIArgInfo::StructRet:
1136      if (RetTy->isAnyComplexType()) {
1137        // FIXME: Volatile
1138        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1139        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1140      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1141        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1142      } else {
1143        Builder.CreateStore(Builder.CreateLoad(ReturnValue),
1144                            CurFn->arg_begin());
1145      }
1146      break;
1147
1148    case ABIArgInfo::Default:
1149      RV = Builder.CreateLoad(ReturnValue);
1150      break;
1151
1152    case ABIArgInfo::Ignore:
1153      break;
1154
1155    case ABIArgInfo::Coerce: {
1156      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1157      break;
1158    }
1159
1160    case ABIArgInfo::ByVal:
1161    case ABIArgInfo::Expand:
1162      assert(0 && "Invalid ABI kind for return argument");
1163    }
1164  }
1165
1166  if (RV) {
1167    Builder.CreateRet(RV);
1168  } else {
1169    Builder.CreateRetVoid();
1170  }
1171}
1172
1173RValue CodeGenFunction::EmitCall(llvm::Value *Callee,
1174                                 QualType RetTy,
1175                                 const CallArgList &CallArgs) {
1176  llvm::SmallVector<llvm::Value*, 16> Args;
1177
1178  // Handle struct-return functions by passing a pointer to the
1179  // location that we would like to return into.
1180  ABIArgInfo RetAI = getABIReturnInfo(RetTy, CGM.getTypes());
1181  switch (RetAI.getKind()) {
1182  case ABIArgInfo::StructRet:
1183    // Create a temporary alloca to hold the result of the call. :(
1184    Args.push_back(CreateTempAlloca(ConvertType(RetTy)));
1185    break;
1186
1187  case ABIArgInfo::Default:
1188  case ABIArgInfo::Ignore:
1189  case ABIArgInfo::Coerce:
1190    break;
1191
1192  case ABIArgInfo::ByVal:
1193  case ABIArgInfo::Expand:
1194    assert(0 && "Invalid ABI kind for return argument");
1195  }
1196
1197  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1198       I != E; ++I) {
1199    ABIArgInfo ArgInfo = getABIArgumentInfo(I->second, CGM.getTypes());
1200    RValue RV = I->first;
1201
1202    switch (ArgInfo.getKind()) {
1203    case ABIArgInfo::ByVal: // Default is byval
1204    case ABIArgInfo::Default:
1205      if (RV.isScalar()) {
1206        Args.push_back(RV.getScalarVal());
1207      } else if (RV.isComplex()) {
1208        // Make a temporary alloca to pass the argument.
1209        Args.push_back(CreateTempAlloca(ConvertType(I->second)));
1210        StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1211      } else {
1212        Args.push_back(RV.getAggregateAddr());
1213      }
1214      break;
1215
1216    case ABIArgInfo::Ignore:
1217      break;
1218
1219    case ABIArgInfo::StructRet:
1220    case ABIArgInfo::Coerce:
1221      assert(0 && "Invalid ABI kind for non-return argument");
1222      break;
1223
1224    case ABIArgInfo::Expand:
1225      ExpandTypeToArgs(I->second, RV, Args);
1226      break;
1227    }
1228  }
1229
1230  llvm::CallInst *CI = Builder.CreateCall(Callee,&Args[0],&Args[0]+Args.size());
1231  CGCallInfo CallInfo(RetTy, CallArgs);
1232
1233  // FIXME: Provide TargetDecl so nounwind, noreturn, etc, etc get set.
1234  CodeGen::AttributeListType AttributeList;
1235  CGM.ConstructAttributeList(0,
1236                             CallInfo.argtypes_begin(), CallInfo.argtypes_end(),
1237                             AttributeList);
1238  CI->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
1239                                         AttributeList.size()));
1240
1241  if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1242    CI->setCallingConv(F->getCallingConv());
1243  if (CI->getType() != llvm::Type::VoidTy)
1244    CI->setName("call");
1245
1246  switch (RetAI.getKind()) {
1247  case ABIArgInfo::StructRet:
1248    if (RetTy->isAnyComplexType())
1249      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1250    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1251      return RValue::getAggregate(Args[0]);
1252    else
1253      return RValue::get(Builder.CreateLoad(Args[0]));
1254
1255  case ABIArgInfo::Default:
1256    return RValue::get(RetTy->isVoidType() ? 0 : CI);
1257
1258  case ABIArgInfo::Ignore:
1259    return RValue::get(0);
1260
1261  case ABIArgInfo::Coerce: {
1262    llvm::Value *V = CreateTempAlloca(ConvertType(RetTy), "coerce");
1263    CreateCoercedStore(CI, V, *this);
1264    if (RetTy->isAnyComplexType())
1265      return RValue::getComplex(LoadComplexFromAddr(V, false));
1266    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1267      return RValue::getAggregate(V);
1268    else
1269      return RValue::get(Builder.CreateLoad(V));
1270  }
1271
1272  case ABIArgInfo::ByVal:
1273  case ABIArgInfo::Expand:
1274    assert(0 && "Invalid ABI kind for return argument");
1275  }
1276
1277  assert(0 && "Unhandled ABIArgInfo::Kind");
1278  return RValue::get(0);
1279}
1280