CGCall.cpp revision 9a82b52ae83fa1c09266b2fa5f0375392f7d127f
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/RecordLayout.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/Attributes.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Target/TargetData.h"
27using namespace clang;
28using namespace CodeGen;
29
30/***/
31
32// FIXME: Use iterator and sidestep silly type array creation.
33
34CGFunctionInfo::CGFunctionInfo(const FunctionTypeNoProto *FTNP)
35  : IsVariadic(true)
36{
37  ArgTypes.push_back(FTNP->getResultType());
38}
39
40CGFunctionInfo::CGFunctionInfo(const FunctionTypeProto *FTP)
41  : IsVariadic(FTP->isVariadic())
42{
43  ArgTypes.push_back(FTP->getResultType());
44  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
45    ArgTypes.push_back(FTP->getArgType(i));
46}
47
48// FIXME: Is there really any reason to have this still?
49CGFunctionInfo::CGFunctionInfo(const FunctionDecl *FD)
50{
51  const FunctionType *FTy = FD->getType()->getAsFunctionType();
52  const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(FTy);
53
54  ArgTypes.push_back(FTy->getResultType());
55  if (FTP) {
56    IsVariadic = FTP->isVariadic();
57    for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
58      ArgTypes.push_back(FTP->getArgType(i));
59  } else {
60    IsVariadic = true;
61  }
62}
63
64CGFunctionInfo::CGFunctionInfo(const ObjCMethodDecl *MD,
65                               const ASTContext &Context)
66  : IsVariadic(MD->isVariadic())
67{
68  ArgTypes.push_back(MD->getResultType());
69  ArgTypes.push_back(MD->getSelfDecl()->getType());
70  ArgTypes.push_back(Context.getObjCSelType());
71  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
72         e = MD->param_end(); i != e; ++i)
73    ArgTypes.push_back((*i)->getType());
74}
75
76CGFunctionInfo::CGFunctionInfo(QualType ResTy, const CallArgList &Args,
77                               bool _IsVariadic)
78  : IsVariadic(_IsVariadic)
79{
80  ArgTypes.push_back(ResTy);
81  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
82       i != e; ++i)
83    ArgTypes.push_back(i->second);
84}
85
86ArgTypeIterator CGFunctionInfo::argtypes_begin() const {
87  return ArgTypes.begin();
88}
89
90ArgTypeIterator CGFunctionInfo::argtypes_end() const {
91  return ArgTypes.end();
92}
93
94/***/
95
96/// ABIArgInfo - Helper class to encapsulate information about how a
97/// specific C type should be passed to or returned from a function.
98class ABIArgInfo {
99public:
100  enum Kind {
101    Default,
102    StructRet, /// Only valid for return values. The return value
103               /// should be passed through a pointer to a caller
104               /// allocated location passed as an implicit first
105               /// argument to the function.
106
107    Ignore,    /// Ignore the argument (treat as void). Useful for
108               /// void and empty structs.
109
110    Coerce,    /// Only valid for aggregate return types, the argument
111               /// should be accessed by coercion to a provided type.
112
113    ByVal,     /// Only valid for aggregate argument types. The
114               /// structure should be passed "byval" with the
115               /// specified alignment (0 indicates default
116               /// alignment).
117
118    Expand,    /// Only valid for aggregate argument types. The
119               /// structure should be expanded into consecutive
120               /// arguments for its constituent fields. Currently
121               /// expand is only allowed on structures whose fields
122               /// are all scalar types or are themselves expandable
123               /// types.
124
125    KindFirst=Default, KindLast=Expand
126  };
127
128private:
129  Kind TheKind;
130  const llvm::Type *TypeData;
131  unsigned UIntData;
132
133  ABIArgInfo(Kind K, const llvm::Type *TD=0,
134             unsigned UI=0) : TheKind(K),
135                              TypeData(TD),
136                              UIntData(0) {}
137public:
138  static ABIArgInfo getDefault() {
139    return ABIArgInfo(Default);
140  }
141  static ABIArgInfo getStructRet() {
142    return ABIArgInfo(StructRet);
143  }
144  static ABIArgInfo getIgnore() {
145    return ABIArgInfo(Ignore);
146  }
147  static ABIArgInfo getCoerce(const llvm::Type *T) {
148    return ABIArgInfo(Coerce, T);
149  }
150  static ABIArgInfo getByVal(unsigned Alignment) {
151    return ABIArgInfo(ByVal, 0, Alignment);
152  }
153  static ABIArgInfo getExpand() {
154    return ABIArgInfo(Expand);
155  }
156
157  Kind getKind() const { return TheKind; }
158  bool isDefault() const { return TheKind == Default; }
159  bool isStructRet() const { return TheKind == StructRet; }
160  bool isIgnore() const { return TheKind == Ignore; }
161  bool isCoerce() const { return TheKind == Coerce; }
162  bool isByVal() const { return TheKind == ByVal; }
163  bool isExpand() const { return TheKind == Expand; }
164
165  // Coerce accessors
166  const llvm::Type *getCoerceToType() const {
167    assert(TheKind == Coerce && "Invalid kind!");
168    return TypeData;
169  }
170
171  // ByVal accessors
172  unsigned getByValAlignment() const {
173    assert(TheKind == ByVal && "Invalid kind!");
174    return UIntData;
175  }
176};
177
178/***/
179
180/* FIXME: All of this stuff should be part of the target interface
181   somehow. It is currently here because it is not clear how to factor
182   the targets to support this, since the Targets currently live in a
183   layer below types n'stuff.
184 */
185
186/// ABIInfo - Target specific hooks for defining how a type should be
187/// passed or returned from functions.
188class clang::ABIInfo {
189public:
190  virtual ~ABIInfo();
191
192  virtual ABIArgInfo classifyReturnType(QualType RetTy,
193                                        ASTContext &Context) const = 0;
194
195  virtual ABIArgInfo classifyArgumentType(QualType Ty,
196                                          ASTContext &Context) const = 0;
197};
198
199ABIInfo::~ABIInfo() {}
200
201/// isEmptyStruct - Return true iff a structure has no non-empty
202/// members. Note that a structure with a flexible array member is not
203/// considered empty.
204static bool isEmptyStruct(QualType T) {
205  const RecordType *RT = T->getAsStructureType();
206  if (!RT)
207    return 0;
208  const RecordDecl *RD = RT->getDecl();
209  if (RD->hasFlexibleArrayMember())
210    return false;
211  for (RecordDecl::field_iterator i = RD->field_begin(),
212         e = RD->field_end(); i != e; ++i) {
213    const FieldDecl *FD = *i;
214    if (!isEmptyStruct(FD->getType()))
215      return false;
216  }
217  return true;
218}
219
220/// isSingleElementStruct - Determine if a structure is a "single
221/// element struct", i.e. it has exactly one non-empty field or
222/// exactly one field which is itself a single element
223/// struct. Structures with flexible array members are never
224/// considered single element structs.
225///
226/// \return The field declaration for the single non-empty field, if
227/// it exists.
228static const FieldDecl *isSingleElementStruct(QualType T) {
229  const RecordType *RT = T->getAsStructureType();
230  if (!RT)
231    return 0;
232
233  const RecordDecl *RD = RT->getDecl();
234  if (RD->hasFlexibleArrayMember())
235    return 0;
236
237  const FieldDecl *Found = 0;
238  for (RecordDecl::field_iterator i = RD->field_begin(),
239         e = RD->field_end(); i != e; ++i) {
240    const FieldDecl *FD = *i;
241    QualType FT = FD->getType();
242
243    if (isEmptyStruct(FT)) {
244      // Ignore
245    } else if (Found) {
246      return 0;
247    } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
248      Found = FD;
249    } else {
250      Found = isSingleElementStruct(FT);
251      if (!Found)
252        return 0;
253    }
254  }
255
256  return Found;
257}
258
259static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
260  if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
261    return false;
262
263  uint64_t Size = Context.getTypeSize(Ty);
264  return Size == 32 || Size == 64;
265}
266
267static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
268                                           ASTContext &Context) {
269  for (RecordDecl::field_iterator i = RD->field_begin(),
270         e = RD->field_end(); i != e; ++i) {
271    const FieldDecl *FD = *i;
272
273    if (!is32Or64BitBasicType(FD->getType(), Context))
274      return false;
275
276    // If this is a bit-field we need to make sure it is still a
277    // 32-bit or 64-bit type.
278    if (Expr *BW = FD->getBitWidth()) {
279      unsigned Width = BW->getIntegerConstantExprValue(Context).getZExtValue();
280      if (Width <= 16)
281        return false;
282    }
283  }
284  return true;
285}
286
287namespace {
288/// DefaultABIInfo - The default implementation for ABI specific
289/// details. This implementation provides information which results in
290/// sensible LLVM IR generation, but does not conform to any
291/// particular ABI.
292class DefaultABIInfo : public ABIInfo {
293  virtual ABIArgInfo classifyReturnType(QualType RetTy,
294                                        ASTContext &Context) const;
295
296  virtual ABIArgInfo classifyArgumentType(QualType RetTy,
297                                          ASTContext &Context) const;
298};
299
300/// X86_32ABIInfo - The X86-32 ABI information.
301class X86_32ABIInfo : public ABIInfo {
302public:
303  virtual ABIArgInfo classifyReturnType(QualType RetTy,
304                                        ASTContext &Context) const;
305
306  virtual ABIArgInfo classifyArgumentType(QualType RetTy,
307                                          ASTContext &Context) const;
308};
309}
310
311ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
312                                            ASTContext &Context) const {
313  if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
314    // Classify "single element" structs as their element type.
315    const FieldDecl *SeltFD = isSingleElementStruct(RetTy);
316    if (SeltFD) {
317      QualType SeltTy = SeltFD->getType()->getDesugaredType();
318      if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
319        // FIXME: This is gross, it would be nice if we could just
320        // pass back SeltTy and have clients deal with it. Is it worth
321        // supporting coerce to both LLVM and clang Types?
322        if (BT->isIntegerType()) {
323          uint64_t Size = Context.getTypeSize(SeltTy);
324          return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
325        } else if (BT->getKind() == BuiltinType::Float) {
326          return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
327        } else if (BT->getKind() == BuiltinType::Double) {
328          return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
329        }
330      } else if (SeltTy->isPointerType()) {
331        // FIXME: It would be really nice if this could come out as
332        // the proper pointer type.
333        llvm::Type *PtrTy =
334          llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
335        return ABIArgInfo::getCoerce(PtrTy);
336      }
337    }
338
339    uint64_t Size = Context.getTypeSize(RetTy);
340    if (Size == 8) {
341      return ABIArgInfo::getCoerce(llvm::Type::Int8Ty);
342    } else if (Size == 16) {
343      return ABIArgInfo::getCoerce(llvm::Type::Int16Ty);
344    } else if (Size == 32) {
345      return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
346    } else if (Size == 64) {
347      return ABIArgInfo::getCoerce(llvm::Type::Int64Ty);
348    } else {
349      return ABIArgInfo::getStructRet();
350    }
351  } else {
352    return ABIArgInfo::getDefault();
353  }
354}
355
356ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
357                                              ASTContext &Context) const {
358  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
359    // Structures with flexible arrays are always byval.
360    if (const RecordType *RT = Ty->getAsStructureType())
361      if (RT->getDecl()->hasFlexibleArrayMember())
362        return ABIArgInfo::getByVal(0);
363
364    // Expand empty structs (i.e. ignore)
365    uint64_t Size = Context.getTypeSize(Ty);
366    if (Ty->isStructureType() && Size == 0)
367      return ABIArgInfo::getExpand();
368
369    // Expand structs with size <= 128-bits which consist only of
370    // basic types (int, long long, float, double, xxx*). This is
371    // non-recursive and does not ignore empty fields.
372    if (const RecordType *RT = Ty->getAsStructureType()) {
373      if (Context.getTypeSize(Ty) <= 4*32 &&
374          areAllFields32Or64BitBasicType(RT->getDecl(), Context))
375        return ABIArgInfo::getExpand();
376    }
377
378    return ABIArgInfo::getByVal(0);
379  } else {
380    return ABIArgInfo::getDefault();
381  }
382}
383
384namespace {
385/// X86_64ABIInfo - The X86_64 ABI information.
386class X86_64ABIInfo : public ABIInfo {
387  enum Class {
388    Integer = 0,
389    SSE,
390    SSEUp,
391    X87,
392    X87Up,
393    ComplexX87,
394    NoClass,
395    Memory
396  };
397
398  /// merge - Implement the X86_64 ABI merging algorithm.
399  ///
400  /// Merge an accumulating classification \arg Accum with a field
401  /// classification \arg Field.
402  ///
403  /// \param Accum - The accumulating classification. This should
404  /// always be either NoClass or the result of a previous merge
405  /// call. In addition, this should never be Memory (the caller
406  /// should just return Memory for the aggregate).
407  Class merge(Class Accum, Class Field) const;
408
409  /// classify - Determine the x86_64 register classes in which the
410  /// given type T should be passed.
411  ///
412  /// \param Lo - The classification for the parts of the type
413  /// residing in the low word of the containing object.
414  ///
415  /// \param Hi - The classification for the parts of the type
416  /// residing in the high word of the containing object.
417  ///
418  /// \param OffsetBase - The bit offset of this type in the
419  /// containing object.  Some parameters are classified different
420  /// depending on whether they straddle an eightbyte boundary.
421  ///
422  /// If a word is unused its result will be NoClass; if a type should
423  /// be passed in Memory then at least the classification of \arg Lo
424  /// will be Memory.
425  ///
426  /// The \arg Lo class will be NoClass iff the argument is ignored.
427  ///
428  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
429  /// be NoClass.
430  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
431                Class &Lo, Class &Hi) const;
432
433public:
434  virtual ABIArgInfo classifyReturnType(QualType RetTy,
435                                        ASTContext &Context) const;
436
437  virtual ABIArgInfo classifyArgumentType(QualType RetTy,
438                                          ASTContext &Context) const;
439};
440}
441
442X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
443                                          Class Field) const {
444  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
445  // classified recursively so that always two fields are
446  // considered. The resulting class is calculated according to
447  // the classes of the fields in the eightbyte:
448  //
449  // (a) If both classes are equal, this is the resulting class.
450  //
451  // (b) If one of the classes is NO_CLASS, the resulting class is
452  // the other class.
453  //
454  // (c) If one of the classes is MEMORY, the result is the MEMORY
455  // class.
456  //
457  // (d) If one of the classes is INTEGER, the result is the
458  // INTEGER.
459  //
460  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
461  // MEMORY is used as class.
462  //
463  // (f) Otherwise class SSE is used.
464  assert((Accum == NoClass || Accum == Integer ||
465          Accum == SSE || Accum == SSEUp) &&
466         "Invalid accumulated classification during merge.");
467  if (Accum == Field || Field == NoClass)
468    return Accum;
469  else if (Field == Memory)
470    return Memory;
471  else if (Accum == NoClass)
472    return Field;
473  else if (Accum == Integer || Field == Integer)
474    return Integer;
475  else if (Field == X87 || Field == X87Up || Field == ComplexX87)
476    return Memory;
477  else
478    return SSE;
479}
480
481void X86_64ABIInfo::classify(QualType Ty,
482                             ASTContext &Context,
483                             uint64_t OffsetBase,
484                             Class &Lo, Class &Hi) const {
485  // FIXME: This code can be simplified by introducing a simple value
486  // class for Class pairs with appropriate constructor methods for
487  // the various situations.
488
489  Lo = Hi = NoClass;
490
491  Class &Current = OffsetBase < 64 ? Lo : Hi;
492  Current = Memory;
493
494  if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
495    BuiltinType::Kind k = BT->getKind();
496
497    if (k == BuiltinType::Void) {
498      Current = NoClass;
499    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
500      Current = Integer;
501    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
502      Current = SSE;
503    } else if (k == BuiltinType::LongDouble) {
504      Lo = X87;
505      Hi = X87Up;
506    }
507    // FIXME: _Decimal32 and _Decimal64 are SSE.
508    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
509    // FIXME: __int128 is (Integer, Integer).
510  } else if (Ty->isPointerLikeType() || Ty->isBlockPointerType() ||
511             Ty->isObjCQualifiedInterfaceType()) {
512    Current = Integer;
513  } else if (const VectorType *VT = Ty->getAsVectorType()) {
514    uint64_t Size = Context.getTypeSize(VT);
515    if (Size == 64) {
516      // gcc passes <1 x double> in memory.
517      if (VT->getElementType() == Context.DoubleTy)
518        return;
519
520      Current = SSE;
521
522      // If this type crosses an eightbyte boundary, it should be
523      // split.
524      if (OffsetBase && OffsetBase != 64)
525        Hi = Lo;
526    } else if (Size == 128) {
527      Lo = SSE;
528      Hi = SSEUp;
529    }
530  } else if (const ComplexType *CT = Ty->getAsComplexType()) {
531    QualType ET = CT->getElementType();
532
533    uint64_t Size = Context.getTypeSize(Ty);
534    if (ET->isIntegerType()) {
535      if (Size <= 64)
536        Current = Integer;
537      else if (Size <= 128)
538        Lo = Hi = Integer;
539    } else if (ET == Context.FloatTy)
540      Current = SSE;
541    else if (ET == Context.DoubleTy)
542      Lo = Hi = SSE;
543    else if (ET == Context.LongDoubleTy)
544      Current = ComplexX87;
545
546    // If this complex type crosses an eightbyte boundary then it
547    // should be split.
548    uint64_t EB_Real = (OffsetBase) / 64;
549    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
550    if (Hi == NoClass && EB_Real != EB_Imag)
551      Hi = Lo;
552  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
553    // Arrays are treated like structures.
554
555    uint64_t Size = Context.getTypeSize(Ty);
556
557    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
558    // than two eightbytes, ..., it has class MEMORY.
559    if (Size > 128)
560      return;
561
562    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
563    // fields, it has class MEMORY.
564    //
565    // Only need to check alignment of array base.
566    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
567      return;
568
569    // Otherwise implement simplified merge. We could be smarter about
570    // this, but it isn't worth it and would be harder to verify.
571    Current = NoClass;
572    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
573    uint64_t ArraySize = AT->getSize().getZExtValue();
574    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
575      Class FieldLo, FieldHi;
576      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
577      Lo = merge(Lo, FieldLo);
578      Hi = merge(Hi, FieldHi);
579      if (Lo == Memory || Hi == Memory)
580        break;
581    }
582
583    // Do post merger cleanup (see below). Only case we worry about is Memory.
584    if (Hi == Memory)
585      Lo = Memory;
586    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
587  } else if (const RecordType *RT = Ty->getAsRecordType()) {
588    uint64_t Size = Context.getTypeSize(Ty);
589
590    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
591    // than two eightbytes, ..., it has class MEMORY.
592    if (Size > 128)
593      return;
594
595    const RecordDecl *RD = RT->getDecl();
596
597    // Assume variable sized types are passed in memory.
598    if (RD->hasFlexibleArrayMember())
599      return;
600
601    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
602
603    // Reset Lo class, this will be recomputed.
604    Current = NoClass;
605    unsigned idx = 0;
606    for (RecordDecl::field_iterator i = RD->field_begin(),
607           e = RD->field_end(); i != e; ++i, ++idx) {
608      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
609
610      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
611      // fields, it has class MEMORY.
612      if (Offset % Context.getTypeAlign(i->getType())) {
613        Lo = Memory;
614        return;
615      }
616
617      // Classify this field.
618      //
619      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
620      // exceeds a single eightbyte, each is classified
621      // separately. Each eightbyte gets initialized to class
622      // NO_CLASS.
623      Class FieldLo, FieldHi;
624      classify(i->getType(), Context, Offset, FieldLo, FieldHi);
625      Lo = merge(Lo, FieldLo);
626      Hi = merge(Hi, FieldHi);
627      if (Lo == Memory || Hi == Memory)
628        break;
629    }
630
631    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
632    //
633    // (a) If one of the classes is MEMORY, the whole argument is
634    // passed in memory.
635    //
636    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
637
638    // The first of these conditions is guaranteed by how we implement
639    // the merge (just bail).
640    //
641    // The second condition occurs in the case of unions; for example
642    // union { _Complex double; unsigned; }.
643    if (Hi == Memory)
644      Lo = Memory;
645    if (Hi == SSEUp && Lo != SSE)
646      Hi = SSE;
647  }
648}
649
650
651ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
652                                            ASTContext &Context) const {
653  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
654  // classification algorithm.
655  X86_64ABIInfo::Class Lo, Hi;
656  classify(RetTy, Context, 0, Lo, Hi);
657
658  // Check some invariants.
659  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
660  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
661  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
662
663  const llvm::Type *ResType = 0;
664  switch (Lo) {
665  case NoClass:
666    return ABIArgInfo::getIgnore();
667
668  case SSEUp:
669  case X87Up:
670    assert(0 && "Invalid classification for lo word.");
671
672    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
673    // hidden argument, i.e. structret.
674  case Memory:
675    return ABIArgInfo::getStructRet();
676
677    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
678    // available register of the sequence %rax, %rdx is used.
679  case Integer:
680    ResType = llvm::Type::Int64Ty; break;
681
682    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
683    // available SSE register of the sequence %xmm0, %xmm1 is used.
684  case SSE:
685    ResType = llvm::Type::DoubleTy; break;
686
687    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
688    // returned on the X87 stack in %st0 as 80-bit x87 number.
689  case X87:
690    ResType = llvm::Type::X86_FP80Ty; break;
691
692    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
693    // part of the value is returned in %st0 and the imaginary part in
694    // %st1.
695  case ComplexX87:
696    assert(Hi == NoClass && "Unexpected ComplexX87 classification.");
697    ResType = llvm::VectorType::get(llvm::Type::X86_FP80Ty, 2);
698    break;
699  }
700
701  switch (Hi) {
702    // Memory was handled previously, and ComplexX87 and X87 should
703    // never occur as hi classes.
704  case Memory:
705  case X87:
706  case ComplexX87:
707    assert(0 && "Invalid classification for hi word.");
708
709  case NoClass: break;
710  case Integer:
711    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
712    break;
713  case SSE:
714    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
715    break;
716
717    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
718    // is passed in the upper half of the last used SSE register.
719    //
720    // SSEUP should always be preceeded by SSE, just widen.
721  case SSEUp:
722    assert(Lo == SSE && "Unexpected SSEUp classification.");
723    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
724    break;
725
726    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
727    // returned together with the previous X87 value in %st0.
728    //
729    // X87UP should always be preceeded by X87, so we don't need to do
730    // anything here.
731  case X87Up:
732    assert(Lo == X87 && "Unexpected X87Up classification.");
733    break;
734  }
735
736  return ABIArgInfo::getCoerce(ResType);
737}
738
739ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
740                                              ASTContext &Context) const {
741  return ABIArgInfo::getDefault();
742}
743
744ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
745                                            ASTContext &Context) const {
746  return ABIArgInfo::getDefault();
747}
748
749ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
750                                              ASTContext &Context) const {
751  return ABIArgInfo::getDefault();
752}
753
754const ABIInfo &CodeGenTypes::getABIInfo() const {
755  if (TheABIInfo)
756    return *TheABIInfo;
757
758  // For now we just cache this in the CodeGenTypes and don't bother
759  // to free it.
760  const char *TargetPrefix = getContext().Target.getTargetPrefix();
761  if (strcmp(TargetPrefix, "x86") == 0) {
762    switch (getContext().Target.getPointerWidth(0)) {
763    case 32:
764      return *(TheABIInfo = new X86_32ABIInfo());
765    case 64:
766      return *(TheABIInfo = new X86_64ABIInfo());
767    }
768  }
769
770  return *(TheABIInfo = new DefaultABIInfo);
771}
772
773// getABIReturnInfo - Wrap the ABIInfo getABIReturnInfo, altering
774// "default" types to StructRet when appropriate for simplicity.
775static ABIArgInfo getABIReturnInfo(QualType Ty, CodeGenTypes &CGT) {
776  assert(!Ty->isArrayType() &&
777         "Array types cannot be passed directly.");
778  ABIArgInfo Info = CGT.getABIInfo().classifyReturnType(Ty, CGT.getContext());
779  // Ensure default on aggregate types is StructRet.
780  if (Info.isDefault() && CodeGenFunction::hasAggregateLLVMType(Ty))
781    return ABIArgInfo::getStructRet();
782  return Info;
783}
784
785// getABIArgumentInfo - Wrap the ABIInfo getABIReturnInfo, altering
786// "default" types to ByVal when appropriate for simplicity.
787static ABIArgInfo getABIArgumentInfo(QualType Ty, CodeGenTypes &CGT) {
788  assert(!Ty->isArrayType() &&
789         "Array types cannot be passed directly.");
790  ABIArgInfo Info = CGT.getABIInfo().classifyArgumentType(Ty, CGT.getContext());
791  // Ensure default on aggregate types is ByVal.
792  if (Info.isDefault() && CodeGenFunction::hasAggregateLLVMType(Ty))
793    return ABIArgInfo::getByVal(0);
794  return Info;
795}
796
797/***/
798
799void CodeGenTypes::GetExpandedTypes(QualType Ty,
800                                    std::vector<const llvm::Type*> &ArgTys) {
801  const RecordType *RT = Ty->getAsStructureType();
802  assert(RT && "Can only expand structure types.");
803  const RecordDecl *RD = RT->getDecl();
804  assert(!RD->hasFlexibleArrayMember() &&
805         "Cannot expand structure with flexible array.");
806
807  for (RecordDecl::field_iterator i = RD->field_begin(),
808         e = RD->field_end(); i != e; ++i) {
809    const FieldDecl *FD = *i;
810    assert(!FD->isBitField() &&
811           "Cannot expand structure with bit-field members.");
812
813    QualType FT = FD->getType();
814    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
815      GetExpandedTypes(FT, ArgTys);
816    } else {
817      ArgTys.push_back(ConvertType(FT));
818    }
819  }
820}
821
822llvm::Function::arg_iterator
823CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
824                                    llvm::Function::arg_iterator AI) {
825  const RecordType *RT = Ty->getAsStructureType();
826  assert(RT && "Can only expand structure types.");
827
828  RecordDecl *RD = RT->getDecl();
829  assert(LV.isSimple() &&
830         "Unexpected non-simple lvalue during struct expansion.");
831  llvm::Value *Addr = LV.getAddress();
832  for (RecordDecl::field_iterator i = RD->field_begin(),
833         e = RD->field_end(); i != e; ++i) {
834    FieldDecl *FD = *i;
835    QualType FT = FD->getType();
836
837    // FIXME: What are the right qualifiers here?
838    LValue LV = EmitLValueForField(Addr, FD, false, 0);
839    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
840      AI = ExpandTypeFromArgs(FT, LV, AI);
841    } else {
842      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
843      ++AI;
844    }
845  }
846
847  return AI;
848}
849
850void
851CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
852                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
853  const RecordType *RT = Ty->getAsStructureType();
854  assert(RT && "Can only expand structure types.");
855
856  RecordDecl *RD = RT->getDecl();
857  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
858  llvm::Value *Addr = RV.getAggregateAddr();
859  for (RecordDecl::field_iterator i = RD->field_begin(),
860         e = RD->field_end(); i != e; ++i) {
861    FieldDecl *FD = *i;
862    QualType FT = FD->getType();
863
864    // FIXME: What are the right qualifiers here?
865    LValue LV = EmitLValueForField(Addr, FD, false, 0);
866    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
867      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
868    } else {
869      RValue RV = EmitLoadOfLValue(LV, FT);
870      assert(RV.isScalar() &&
871             "Unexpected non-scalar rvalue during struct expansion.");
872      Args.push_back(RV.getScalarVal());
873    }
874  }
875}
876
877/***/
878
879const llvm::FunctionType *
880CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
881  std::vector<const llvm::Type*> ArgTys;
882
883  const llvm::Type *ResultType = 0;
884
885  ArgTypeIterator begin = FI.argtypes_begin(), end = FI.argtypes_end();
886  QualType RetTy = *begin;
887  ABIArgInfo RetAI = getABIReturnInfo(RetTy, *this);
888  switch (RetAI.getKind()) {
889  case ABIArgInfo::ByVal:
890  case ABIArgInfo::Expand:
891    assert(0 && "Invalid ABI kind for return argument");
892
893  case ABIArgInfo::Default:
894    if (RetTy->isVoidType()) {
895      ResultType = llvm::Type::VoidTy;
896    } else {
897      ResultType = ConvertType(RetTy);
898    }
899    break;
900
901  case ABIArgInfo::StructRet: {
902    ResultType = llvm::Type::VoidTy;
903    const llvm::Type *STy = ConvertType(RetTy);
904    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
905    break;
906  }
907
908  case ABIArgInfo::Ignore:
909    ResultType = llvm::Type::VoidTy;
910    break;
911
912  case ABIArgInfo::Coerce:
913    ResultType = RetAI.getCoerceToType();
914    break;
915  }
916
917  for (++begin; begin != end; ++begin) {
918    ABIArgInfo AI = getABIArgumentInfo(*begin, *this);
919    const llvm::Type *Ty = ConvertType(*begin);
920
921    switch (AI.getKind()) {
922    case ABIArgInfo::Ignore:
923      break;
924
925    case ABIArgInfo::Coerce:
926    case ABIArgInfo::StructRet:
927      assert(0 && "Invalid ABI kind for non-return argument");
928
929    case ABIArgInfo::ByVal:
930      // byval arguments are always on the stack, which is addr space #0.
931      ArgTys.push_back(llvm::PointerType::getUnqual(Ty));
932      assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled");
933      break;
934
935    case ABIArgInfo::Default:
936      ArgTys.push_back(Ty);
937      break;
938
939    case ABIArgInfo::Expand:
940      GetExpandedTypes(*begin, ArgTys);
941      break;
942    }
943  }
944
945  return llvm::FunctionType::get(ResultType, ArgTys, FI.isVariadic());
946}
947
948bool CodeGenModule::ReturnTypeUsesSret(QualType RetTy) {
949  return getABIReturnInfo(RetTy, getTypes()).isStructRet();
950}
951
952void CodeGenModule::ConstructAttributeList(const Decl *TargetDecl,
953                                           const CGFunctionInfo &Info,
954                                           AttributeListType &PAL) {
955  unsigned FuncAttrs = 0;
956  unsigned RetAttrs = 0;
957
958  if (TargetDecl) {
959    if (TargetDecl->getAttr<NoThrowAttr>())
960      FuncAttrs |= llvm::Attribute::NoUnwind;
961    if (TargetDecl->getAttr<NoReturnAttr>())
962      FuncAttrs |= llvm::Attribute::NoReturn;
963    if (TargetDecl->getAttr<PureAttr>())
964      FuncAttrs |= llvm::Attribute::ReadOnly;
965    if (TargetDecl->getAttr<ConstAttr>())
966      FuncAttrs |= llvm::Attribute::ReadNone;
967  }
968
969  ArgTypeIterator begin = Info.argtypes_begin(), end = Info.argtypes_end();
970  QualType RetTy = *begin;
971  unsigned Index = 1;
972  ABIArgInfo RetAI = getABIReturnInfo(RetTy, getTypes());
973  switch (RetAI.getKind()) {
974  case ABIArgInfo::Default:
975    if (RetTy->isPromotableIntegerType()) {
976      if (RetTy->isSignedIntegerType()) {
977        RetAttrs |= llvm::Attribute::SExt;
978      } else if (RetTy->isUnsignedIntegerType()) {
979        RetAttrs |= llvm::Attribute::ZExt;
980      }
981    }
982    break;
983
984  case ABIArgInfo::StructRet:
985    PAL.push_back(llvm::AttributeWithIndex::get(Index,
986                                                llvm::Attribute::StructRet |
987                                                llvm::Attribute::NoAlias));
988    ++Index;
989    break;
990
991  case ABIArgInfo::Ignore:
992  case ABIArgInfo::Coerce:
993    break;
994
995  case ABIArgInfo::ByVal:
996  case ABIArgInfo::Expand:
997    assert(0 && "Invalid ABI kind for return argument");
998  }
999
1000  if (RetAttrs)
1001    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
1002  for (++begin; begin != end; ++begin) {
1003    QualType ParamType = *begin;
1004    unsigned Attributes = 0;
1005    ABIArgInfo AI = getABIArgumentInfo(ParamType, getTypes());
1006
1007    switch (AI.getKind()) {
1008    case ABIArgInfo::StructRet:
1009    case ABIArgInfo::Coerce:
1010      assert(0 && "Invalid ABI kind for non-return argument");
1011
1012    case ABIArgInfo::ByVal:
1013      Attributes |= llvm::Attribute::ByVal;
1014      assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled");
1015      break;
1016
1017    case ABIArgInfo::Default:
1018      if (ParamType->isPromotableIntegerType()) {
1019        if (ParamType->isSignedIntegerType()) {
1020          Attributes |= llvm::Attribute::SExt;
1021        } else if (ParamType->isUnsignedIntegerType()) {
1022          Attributes |= llvm::Attribute::ZExt;
1023        }
1024      }
1025      break;
1026
1027    case ABIArgInfo::Ignore:
1028      // Skip increment, no matching LLVM parameter.
1029      continue;
1030
1031    case ABIArgInfo::Expand: {
1032      std::vector<const llvm::Type*> Tys;
1033      // FIXME: This is rather inefficient. Do we ever actually need
1034      // to do anything here? The result should be just reconstructed
1035      // on the other side, so extension should be a non-issue.
1036      getTypes().GetExpandedTypes(ParamType, Tys);
1037      Index += Tys.size();
1038      continue;
1039    }
1040    }
1041
1042    if (Attributes)
1043      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
1044    ++Index;
1045  }
1046  if (FuncAttrs)
1047    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1048
1049}
1050
1051void CodeGenFunction::EmitFunctionProlog(llvm::Function *Fn,
1052                                         QualType RetTy,
1053                                         const FunctionArgList &Args) {
1054  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1055  llvm::Function::arg_iterator AI = Fn->arg_begin();
1056
1057  // Name the struct return argument.
1058  if (CGM.ReturnTypeUsesSret(RetTy)) {
1059    AI->setName("agg.result");
1060    ++AI;
1061  }
1062
1063  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1064       i != e; ++i) {
1065    const VarDecl *Arg = i->first;
1066    QualType Ty = i->second;
1067    ABIArgInfo ArgI = getABIArgumentInfo(Ty, CGM.getTypes());
1068
1069    switch (ArgI.getKind()) {
1070    case ABIArgInfo::ByVal:
1071    case ABIArgInfo::Default: {
1072      assert(AI != Fn->arg_end() && "Argument mismatch!");
1073      llvm::Value* V = AI;
1074      if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1075        // This must be a promotion, for something like
1076        // "void a(x) short x; {..."
1077        V = EmitScalarConversion(V, Ty, Arg->getType());
1078      }
1079      EmitParmDecl(*Arg, V);
1080      break;
1081    }
1082
1083    case ABIArgInfo::Expand: {
1084      // If this was structure was expand into multiple arguments then
1085      // we need to create a temporary and reconstruct it from the
1086      // arguments.
1087      std::string Name = Arg->getNameAsString();
1088      llvm::Value *Temp = CreateTempAlloca(ConvertType(Ty),
1089                                           (Name + ".addr").c_str());
1090      // FIXME: What are the right qualifiers here?
1091      llvm::Function::arg_iterator End =
1092        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1093      EmitParmDecl(*Arg, Temp);
1094
1095      // Name the arguments used in expansion and increment AI.
1096      unsigned Index = 0;
1097      for (; AI != End; ++AI, ++Index)
1098        AI->setName(Name + "." + llvm::utostr(Index));
1099      continue;
1100    }
1101
1102    case ABIArgInfo::Ignore:
1103      break;
1104
1105    case ABIArgInfo::Coerce:
1106    case ABIArgInfo::StructRet:
1107      assert(0 && "Invalid ABI kind for non-return argument");
1108    }
1109
1110    ++AI;
1111  }
1112  assert(AI == Fn->arg_end() && "Argument mismatch!");
1113}
1114
1115/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1116/// a pointer to an object of type \arg Ty.
1117///
1118/// This safely handles the case when the src type is smaller than the
1119/// destination type; in this situation the values of bits which not
1120/// present in the src are undefined.
1121static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
1122                                      const llvm::Type *Ty,
1123                                      CodeGenFunction &CGF) {
1124  const llvm::Type *SrcTy =
1125    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
1126  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1127  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty);
1128
1129  // If load is legal, just bitcase the src pointer.
1130  if (SrcSize == DstSize) {
1131    llvm::Value *Casted =
1132      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
1133    return CGF.Builder.CreateLoad(Casted);
1134  } else {
1135    assert(SrcSize < DstSize && "Coercion is losing source bits!");
1136
1137    // Otherwise do coercion through memory. This is stupid, but
1138    // simple.
1139    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
1140    llvm::Value *Casted =
1141      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
1142    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
1143    return CGF.Builder.CreateLoad(Tmp);
1144  }
1145}
1146
1147/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1148/// where the source and destination may have different types.
1149///
1150/// This safely handles the case when the src type is larger than the
1151/// destination type; the upper bits of the src will be lost.
1152static void CreateCoercedStore(llvm::Value *Src,
1153                               llvm::Value *DstPtr,
1154                               CodeGenFunction &CGF) {
1155  const llvm::Type *SrcTy = Src->getType();
1156  const llvm::Type *DstTy =
1157    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1158
1159  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1160  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy);
1161
1162  // If store is legal, just bitcase the src pointer.
1163  if (SrcSize == DstSize) {
1164    llvm::Value *Casted =
1165      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1166    CGF.Builder.CreateStore(Src, Casted);
1167  } else {
1168    assert(SrcSize > DstSize && "Coercion is missing bits!");
1169
1170    // Otherwise do coercion through memory. This is stupid, but
1171    // simple.
1172    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1173    CGF.Builder.CreateStore(Src, Tmp);
1174    llvm::Value *Casted =
1175      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
1176    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(Casted), DstPtr);
1177  }
1178}
1179
1180void CodeGenFunction::EmitFunctionEpilog(QualType RetTy,
1181                                         llvm::Value *ReturnValue) {
1182  llvm::Value *RV = 0;
1183
1184  // Functions with no result always return void.
1185  if (ReturnValue) {
1186    ABIArgInfo RetAI = getABIReturnInfo(RetTy, CGM.getTypes());
1187
1188    switch (RetAI.getKind()) {
1189    case ABIArgInfo::StructRet:
1190      if (RetTy->isAnyComplexType()) {
1191        // FIXME: Volatile
1192        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1193        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1194      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1195        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1196      } else {
1197        Builder.CreateStore(Builder.CreateLoad(ReturnValue),
1198                            CurFn->arg_begin());
1199      }
1200      break;
1201
1202    case ABIArgInfo::Default:
1203      RV = Builder.CreateLoad(ReturnValue);
1204      break;
1205
1206    case ABIArgInfo::Ignore:
1207      break;
1208
1209    case ABIArgInfo::Coerce: {
1210      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1211      break;
1212    }
1213
1214    case ABIArgInfo::ByVal:
1215    case ABIArgInfo::Expand:
1216      assert(0 && "Invalid ABI kind for return argument");
1217    }
1218  }
1219
1220  if (RV) {
1221    Builder.CreateRet(RV);
1222  } else {
1223    Builder.CreateRetVoid();
1224  }
1225}
1226
1227RValue CodeGenFunction::EmitCall(llvm::Value *Callee,
1228                                 QualType RetTy,
1229                                 const CallArgList &CallArgs) {
1230  llvm::SmallVector<llvm::Value*, 16> Args;
1231
1232  // Handle struct-return functions by passing a pointer to the
1233  // location that we would like to return into.
1234  ABIArgInfo RetAI = getABIReturnInfo(RetTy, CGM.getTypes());
1235  switch (RetAI.getKind()) {
1236  case ABIArgInfo::StructRet:
1237    // Create a temporary alloca to hold the result of the call. :(
1238    Args.push_back(CreateTempAlloca(ConvertType(RetTy)));
1239    break;
1240
1241  case ABIArgInfo::Default:
1242  case ABIArgInfo::Ignore:
1243  case ABIArgInfo::Coerce:
1244    break;
1245
1246  case ABIArgInfo::ByVal:
1247  case ABIArgInfo::Expand:
1248    assert(0 && "Invalid ABI kind for return argument");
1249  }
1250
1251  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1252       I != E; ++I) {
1253    ABIArgInfo ArgInfo = getABIArgumentInfo(I->second, CGM.getTypes());
1254    RValue RV = I->first;
1255
1256    switch (ArgInfo.getKind()) {
1257    case ABIArgInfo::ByVal: // Default is byval
1258    case ABIArgInfo::Default:
1259      if (RV.isScalar()) {
1260        Args.push_back(RV.getScalarVal());
1261      } else if (RV.isComplex()) {
1262        // Make a temporary alloca to pass the argument.
1263        Args.push_back(CreateTempAlloca(ConvertType(I->second)));
1264        StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1265      } else {
1266        Args.push_back(RV.getAggregateAddr());
1267      }
1268      break;
1269
1270    case ABIArgInfo::Ignore:
1271      break;
1272
1273    case ABIArgInfo::StructRet:
1274    case ABIArgInfo::Coerce:
1275      assert(0 && "Invalid ABI kind for non-return argument");
1276      break;
1277
1278    case ABIArgInfo::Expand:
1279      ExpandTypeToArgs(I->second, RV, Args);
1280      break;
1281    }
1282  }
1283
1284  llvm::CallInst *CI = Builder.CreateCall(Callee,&Args[0],&Args[0]+Args.size());
1285  const llvm::Type *FnType =
1286    cast<llvm::PointerType>(Callee->getType())->getElementType();
1287  CGFunctionInfo CallInfo(RetTy, CallArgs,
1288                          cast<llvm::FunctionType>(FnType)->isVarArg());
1289
1290  // FIXME: Provide TargetDecl so nounwind, noreturn, etc, etc get set.
1291  CodeGen::AttributeListType AttributeList;
1292  CGM.ConstructAttributeList(0, CallInfo, AttributeList);
1293  CI->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
1294                                           AttributeList.size()));
1295
1296  if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1297    CI->setCallingConv(F->getCallingConv());
1298  if (CI->getType() != llvm::Type::VoidTy)
1299    CI->setName("call");
1300
1301  switch (RetAI.getKind()) {
1302  case ABIArgInfo::StructRet:
1303    if (RetTy->isAnyComplexType())
1304      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1305    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1306      return RValue::getAggregate(Args[0]);
1307    else
1308      return RValue::get(Builder.CreateLoad(Args[0]));
1309
1310  case ABIArgInfo::Default:
1311    return RValue::get(RetTy->isVoidType() ? 0 : CI);
1312
1313  case ABIArgInfo::Ignore:
1314    if (RetTy->isVoidType())
1315      return RValue::get(0);
1316    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1317      llvm::Value *Res =
1318        llvm::UndefValue::get(llvm::PointerType::getUnqual(ConvertType(RetTy)));
1319      return RValue::getAggregate(Res);
1320    }
1321    return RValue::get(llvm::UndefValue::get(ConvertType(RetTy)));
1322
1323  case ABIArgInfo::Coerce: {
1324    llvm::Value *V = CreateTempAlloca(ConvertType(RetTy), "coerce");
1325    CreateCoercedStore(CI, V, *this);
1326    if (RetTy->isAnyComplexType())
1327      return RValue::getComplex(LoadComplexFromAddr(V, false));
1328    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1329      return RValue::getAggregate(V);
1330    else
1331      return RValue::get(Builder.CreateLoad(V));
1332  }
1333
1334  case ABIArgInfo::ByVal:
1335  case ABIArgInfo::Expand:
1336    assert(0 && "Invalid ABI kind for return argument");
1337  }
1338
1339  assert(0 && "Unhandled ABIArgInfo::Kind");
1340  return RValue::get(0);
1341}
1342