CGCall.cpp revision 2969a0243b0939286a45d0ba58dd3e1de226ac60
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/RecordLayout.h"
23#include "llvm/ADT/StringExtras.h"
24#include "llvm/Attributes.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/raw_ostream.h"
27#include "llvm/Target/TargetData.h"
28
29#include "ABIInfo.h"
30
31using namespace clang;
32using namespace CodeGen;
33
34/***/
35
36// FIXME: Use iterator and sidestep silly type array creation.
37
38const
39CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionTypeNoProto *FTNP) {
40  return getFunctionInfo(FTNP->getResultType(),
41                         llvm::SmallVector<QualType, 16>());
42}
43
44const
45CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionTypeProto *FTP) {
46  llvm::SmallVector<QualType, 16> ArgTys;
47  // FIXME: Kill copy.
48  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
49    ArgTys.push_back(FTP->getArgType(i));
50  return getFunctionInfo(FTP->getResultType(), ArgTys);
51}
52
53const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
54  const FunctionType *FTy = FD->getType()->getAsFunctionType();
55  if (const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(FTy))
56    return getFunctionInfo(FTP);
57  return getFunctionInfo(cast<FunctionTypeNoProto>(FTy));
58}
59
60const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
61  llvm::SmallVector<QualType, 16> ArgTys;
62  ArgTys.push_back(MD->getSelfDecl()->getType());
63  ArgTys.push_back(Context.getObjCSelType());
64  // FIXME: Kill copy?
65  for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
66         e = MD->param_end(); i != e; ++i)
67    ArgTys.push_back((*i)->getType());
68  return getFunctionInfo(MD->getResultType(), ArgTys);
69}
70
71const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
72                                                    const CallArgList &Args) {
73  // FIXME: Kill copy.
74  llvm::SmallVector<QualType, 16> ArgTys;
75  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
76       i != e; ++i)
77    ArgTys.push_back(i->second);
78  return getFunctionInfo(ResTy, ArgTys);
79}
80
81const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
82                                                  const FunctionArgList &Args) {
83  // FIXME: Kill copy.
84  llvm::SmallVector<QualType, 16> ArgTys;
85  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
86       i != e; ++i)
87    ArgTys.push_back(i->second);
88  return getFunctionInfo(ResTy, ArgTys);
89}
90
91const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
92                               const llvm::SmallVector<QualType, 16> &ArgTys) {
93  // Lookup or create unique function info.
94  llvm::FoldingSetNodeID ID;
95  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
96
97  void *InsertPos = 0;
98  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
99  if (FI)
100    return *FI;
101
102  // Construct the function info.
103  FI = new CGFunctionInfo(ResTy, ArgTys);
104  FunctionInfos.InsertNode(FI, InsertPos);
105
106  // Compute ABI information.
107  getABIInfo().computeInfo(*FI, getContext());
108
109  return *FI;
110}
111
112/***/
113
114ABIInfo::~ABIInfo() {}
115
116void ABIArgInfo::dump() const {
117  fprintf(stderr, "(ABIArgInfo Kind=");
118  switch (TheKind) {
119  case Direct:
120    fprintf(stderr, "Direct");
121    break;
122  case Ignore:
123    fprintf(stderr, "Ignore");
124    break;
125  case Coerce:
126    fprintf(stderr, "Coerce Type=");
127    getCoerceToType()->print(llvm::errs());
128    // FIXME: This is ridiculous.
129    llvm::errs().flush();
130    break;
131  case Indirect:
132    fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
133    break;
134  case Expand:
135    fprintf(stderr, "Expand");
136    break;
137  }
138  fprintf(stderr, ")\n");
139}
140
141/***/
142
143/// isEmptyStruct - Return true iff a structure has no non-empty
144/// members. Note that a structure with a flexible array member is not
145/// considered empty.
146static bool isEmptyStruct(QualType T) {
147  const RecordType *RT = T->getAsStructureType();
148  if (!RT)
149    return 0;
150  const RecordDecl *RD = RT->getDecl();
151  if (RD->hasFlexibleArrayMember())
152    return false;
153  for (RecordDecl::field_iterator i = RD->field_begin(),
154         e = RD->field_end(); i != e; ++i) {
155    const FieldDecl *FD = *i;
156    if (!isEmptyStruct(FD->getType()))
157      return false;
158  }
159  return true;
160}
161
162/// isSingleElementStruct - Determine if a structure is a "single
163/// element struct", i.e. it has exactly one non-empty field or
164/// exactly one field which is itself a single element
165/// struct. Structures with flexible array members are never
166/// considered single element structs.
167///
168/// \return The field declaration for the single non-empty field, if
169/// it exists.
170static const FieldDecl *isSingleElementStruct(QualType T) {
171  const RecordType *RT = T->getAsStructureType();
172  if (!RT)
173    return 0;
174
175  const RecordDecl *RD = RT->getDecl();
176  if (RD->hasFlexibleArrayMember())
177    return 0;
178
179  const FieldDecl *Found = 0;
180  for (RecordDecl::field_iterator i = RD->field_begin(),
181         e = RD->field_end(); i != e; ++i) {
182    const FieldDecl *FD = *i;
183    QualType FT = FD->getType();
184
185    if (isEmptyStruct(FT)) {
186      // Ignore
187    } else if (Found) {
188      return 0;
189    } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
190      Found = FD;
191    } else {
192      Found = isSingleElementStruct(FT);
193      if (!Found)
194        return 0;
195    }
196  }
197
198  return Found;
199}
200
201static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
202  if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
203    return false;
204
205  uint64_t Size = Context.getTypeSize(Ty);
206  return Size == 32 || Size == 64;
207}
208
209static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
210                                           ASTContext &Context) {
211  for (RecordDecl::field_iterator i = RD->field_begin(),
212         e = RD->field_end(); i != e; ++i) {
213    const FieldDecl *FD = *i;
214
215    if (!is32Or64BitBasicType(FD->getType(), Context))
216      return false;
217
218    // If this is a bit-field we need to make sure it is still a
219    // 32-bit or 64-bit type.
220    if (Expr *BW = FD->getBitWidth()) {
221      unsigned Width = BW->getIntegerConstantExprValue(Context).getZExtValue();
222      if (Width <= 16)
223        return false;
224    }
225  }
226  return true;
227}
228
229namespace {
230/// DefaultABIInfo - The default implementation for ABI specific
231/// details. This implementation provides information which results in
232/// self-consistent and sensible LLVM IR generation, but does not
233/// conform to any particular ABI.
234class DefaultABIInfo : public ABIInfo {
235  ABIArgInfo classifyReturnType(QualType RetTy,
236                                ASTContext &Context) const;
237
238  ABIArgInfo classifyArgumentType(QualType RetTy,
239                                  ASTContext &Context) const;
240
241  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
242    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
243    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
244         it != ie; ++it)
245      it->info = classifyArgumentType(it->type, Context);
246  }
247};
248
249/// X86_32ABIInfo - The X86-32 ABI information.
250class X86_32ABIInfo : public ABIInfo {
251public:
252  ABIArgInfo classifyReturnType(QualType RetTy,
253                                ASTContext &Context) const;
254
255  ABIArgInfo classifyArgumentType(QualType RetTy,
256                                  ASTContext &Context) const;
257
258  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
259    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
260    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
261         it != ie; ++it)
262      it->info = classifyArgumentType(it->type, Context);
263  }
264};
265}
266
267ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
268                                            ASTContext &Context) const {
269  if (RetTy->isVoidType()) {
270    return ABIArgInfo::getIgnore();
271  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
272    // Classify "single element" structs as their element type.
273    const FieldDecl *SeltFD = isSingleElementStruct(RetTy);
274    if (SeltFD) {
275      QualType SeltTy = SeltFD->getType()->getDesugaredType();
276      if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
277        // FIXME: This is gross, it would be nice if we could just
278        // pass back SeltTy and have clients deal with it. Is it worth
279        // supporting coerce to both LLVM and clang Types?
280        if (BT->isIntegerType()) {
281          uint64_t Size = Context.getTypeSize(SeltTy);
282          return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
283        } else if (BT->getKind() == BuiltinType::Float) {
284          return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
285        } else if (BT->getKind() == BuiltinType::Double) {
286          return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
287        }
288      } else if (SeltTy->isPointerType()) {
289        // FIXME: It would be really nice if this could come out as
290        // the proper pointer type.
291        llvm::Type *PtrTy =
292          llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
293        return ABIArgInfo::getCoerce(PtrTy);
294      }
295    }
296
297    uint64_t Size = Context.getTypeSize(RetTy);
298    if (Size == 8) {
299      return ABIArgInfo::getCoerce(llvm::Type::Int8Ty);
300    } else if (Size == 16) {
301      return ABIArgInfo::getCoerce(llvm::Type::Int16Ty);
302    } else if (Size == 32) {
303      return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
304    } else if (Size == 64) {
305      return ABIArgInfo::getCoerce(llvm::Type::Int64Ty);
306    } else {
307      return ABIArgInfo::getIndirect(0);
308    }
309  } else {
310    return ABIArgInfo::getDirect();
311  }
312}
313
314ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
315                                              ASTContext &Context) const {
316  // FIXME: Set alignment on indirect arguments.
317  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
318    // Structures with flexible arrays are always indirect.
319    if (const RecordType *RT = Ty->getAsStructureType())
320      if (RT->getDecl()->hasFlexibleArrayMember())
321        return ABIArgInfo::getIndirect(0);
322
323    // Ignore empty structs.
324    uint64_t Size = Context.getTypeSize(Ty);
325    if (Ty->isStructureType() && Size == 0)
326      return ABIArgInfo::getIgnore();
327
328    // Expand structs with size <= 128-bits which consist only of
329    // basic types (int, long long, float, double, xxx*). This is
330    // non-recursive and does not ignore empty fields.
331    if (const RecordType *RT = Ty->getAsStructureType()) {
332      if (Context.getTypeSize(Ty) <= 4*32 &&
333          areAllFields32Or64BitBasicType(RT->getDecl(), Context))
334        return ABIArgInfo::getExpand();
335    }
336
337    return ABIArgInfo::getIndirect(0);
338  } else {
339    return ABIArgInfo::getDirect();
340  }
341}
342
343namespace {
344/// X86_64ABIInfo - The X86_64 ABI information.
345class X86_64ABIInfo : public ABIInfo {
346  enum Class {
347    Integer = 0,
348    SSE,
349    SSEUp,
350    X87,
351    X87Up,
352    ComplexX87,
353    NoClass,
354    Memory
355  };
356
357  /// merge - Implement the X86_64 ABI merging algorithm.
358  ///
359  /// Merge an accumulating classification \arg Accum with a field
360  /// classification \arg Field.
361  ///
362  /// \param Accum - The accumulating classification. This should
363  /// always be either NoClass or the result of a previous merge
364  /// call. In addition, this should never be Memory (the caller
365  /// should just return Memory for the aggregate).
366  Class merge(Class Accum, Class Field) const;
367
368  /// classify - Determine the x86_64 register classes in which the
369  /// given type T should be passed.
370  ///
371  /// \param Lo - The classification for the parts of the type
372  /// residing in the low word of the containing object.
373  ///
374  /// \param Hi - The classification for the parts of the type
375  /// residing in the high word of the containing object.
376  ///
377  /// \param OffsetBase - The bit offset of this type in the
378  /// containing object.  Some parameters are classified different
379  /// depending on whether they straddle an eightbyte boundary.
380  ///
381  /// If a word is unused its result will be NoClass; if a type should
382  /// be passed in Memory then at least the classification of \arg Lo
383  /// will be Memory.
384  ///
385  /// The \arg Lo class will be NoClass iff the argument is ignored.
386  ///
387  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
388  /// be NoClass.
389  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
390                Class &Lo, Class &Hi) const;
391
392  ABIArgInfo classifyReturnType(QualType RetTy,
393                                ASTContext &Context) const;
394
395  ABIArgInfo classifyArgumentType(QualType Ty,
396                                  ASTContext &Context,
397                                  unsigned &freeIntRegs,
398                                  unsigned &freeSSERegs) const;
399
400public:
401  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
402};
403}
404
405X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
406                                          Class Field) const {
407  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
408  // classified recursively so that always two fields are
409  // considered. The resulting class is calculated according to
410  // the classes of the fields in the eightbyte:
411  //
412  // (a) If both classes are equal, this is the resulting class.
413  //
414  // (b) If one of the classes is NO_CLASS, the resulting class is
415  // the other class.
416  //
417  // (c) If one of the classes is MEMORY, the result is the MEMORY
418  // class.
419  //
420  // (d) If one of the classes is INTEGER, the result is the
421  // INTEGER.
422  //
423  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
424  // MEMORY is used as class.
425  //
426  // (f) Otherwise class SSE is used.
427  assert((Accum == NoClass || Accum == Integer ||
428          Accum == SSE || Accum == SSEUp) &&
429         "Invalid accumulated classification during merge.");
430  if (Accum == Field || Field == NoClass)
431    return Accum;
432  else if (Field == Memory)
433    return Memory;
434  else if (Accum == NoClass)
435    return Field;
436  else if (Accum == Integer || Field == Integer)
437    return Integer;
438  else if (Field == X87 || Field == X87Up || Field == ComplexX87)
439    return Memory;
440  else
441    return SSE;
442}
443
444void X86_64ABIInfo::classify(QualType Ty,
445                             ASTContext &Context,
446                             uint64_t OffsetBase,
447                             Class &Lo, Class &Hi) const {
448  // FIXME: This code can be simplified by introducing a simple value
449  // class for Class pairs with appropriate constructor methods for
450  // the various situations.
451
452  Lo = Hi = NoClass;
453
454  Class &Current = OffsetBase < 64 ? Lo : Hi;
455  Current = Memory;
456
457  if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
458    BuiltinType::Kind k = BT->getKind();
459
460    if (k == BuiltinType::Void) {
461      Current = NoClass;
462    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
463      Current = Integer;
464    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
465      Current = SSE;
466    } else if (k == BuiltinType::LongDouble) {
467      Lo = X87;
468      Hi = X87Up;
469    }
470    // FIXME: _Decimal32 and _Decimal64 are SSE.
471    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
472    // FIXME: __int128 is (Integer, Integer).
473  } else if (Ty->isPointerLikeType() || Ty->isBlockPointerType() ||
474             Ty->isObjCQualifiedInterfaceType()) {
475    Current = Integer;
476  } else if (const VectorType *VT = Ty->getAsVectorType()) {
477    uint64_t Size = Context.getTypeSize(VT);
478    if (Size == 64) {
479      // gcc passes <1 x double> in memory.
480      if (VT->getElementType() == Context.DoubleTy)
481        return;
482
483      Current = SSE;
484
485      // If this type crosses an eightbyte boundary, it should be
486      // split.
487      if (OffsetBase && OffsetBase != 64)
488        Hi = Lo;
489    } else if (Size == 128) {
490      Lo = SSE;
491      Hi = SSEUp;
492    }
493  } else if (const ComplexType *CT = Ty->getAsComplexType()) {
494    QualType ET = CT->getElementType();
495
496    uint64_t Size = Context.getTypeSize(Ty);
497    if (ET->isIntegerType()) {
498      if (Size <= 64)
499        Current = Integer;
500      else if (Size <= 128)
501        Lo = Hi = Integer;
502    } else if (ET == Context.FloatTy)
503      Current = SSE;
504    else if (ET == Context.DoubleTy)
505      Lo = Hi = SSE;
506    else if (ET == Context.LongDoubleTy)
507      Current = ComplexX87;
508
509    // If this complex type crosses an eightbyte boundary then it
510    // should be split.
511    uint64_t EB_Real = (OffsetBase) / 64;
512    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
513    if (Hi == NoClass && EB_Real != EB_Imag)
514      Hi = Lo;
515  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
516    // Arrays are treated like structures.
517
518    uint64_t Size = Context.getTypeSize(Ty);
519
520    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
521    // than two eightbytes, ..., it has class MEMORY.
522    if (Size > 128)
523      return;
524
525    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
526    // fields, it has class MEMORY.
527    //
528    // Only need to check alignment of array base.
529    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
530      return;
531
532    // Otherwise implement simplified merge. We could be smarter about
533    // this, but it isn't worth it and would be harder to verify.
534    Current = NoClass;
535    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
536    uint64_t ArraySize = AT->getSize().getZExtValue();
537    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
538      Class FieldLo, FieldHi;
539      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
540      Lo = merge(Lo, FieldLo);
541      Hi = merge(Hi, FieldHi);
542      if (Lo == Memory || Hi == Memory)
543        break;
544    }
545
546    // Do post merger cleanup (see below). Only case we worry about is Memory.
547    if (Hi == Memory)
548      Lo = Memory;
549    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
550  } else if (const RecordType *RT = Ty->getAsRecordType()) {
551    uint64_t Size = Context.getTypeSize(Ty);
552
553    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
554    // than two eightbytes, ..., it has class MEMORY.
555    if (Size > 128)
556      return;
557
558    const RecordDecl *RD = RT->getDecl();
559
560    // Assume variable sized types are passed in memory.
561    if (RD->hasFlexibleArrayMember())
562      return;
563
564    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
565
566    // Reset Lo class, this will be recomputed.
567    Current = NoClass;
568    unsigned idx = 0;
569    for (RecordDecl::field_iterator i = RD->field_begin(),
570           e = RD->field_end(); i != e; ++i, ++idx) {
571      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
572
573      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
574      // fields, it has class MEMORY.
575      if (Offset % Context.getTypeAlign(i->getType())) {
576        Lo = Memory;
577        return;
578      }
579
580      // Classify this field.
581      //
582      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
583      // exceeds a single eightbyte, each is classified
584      // separately. Each eightbyte gets initialized to class
585      // NO_CLASS.
586      Class FieldLo, FieldHi;
587      classify(i->getType(), Context, Offset, FieldLo, FieldHi);
588      Lo = merge(Lo, FieldLo);
589      Hi = merge(Hi, FieldHi);
590      if (Lo == Memory || Hi == Memory)
591        break;
592    }
593
594    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
595    //
596    // (a) If one of the classes is MEMORY, the whole argument is
597    // passed in memory.
598    //
599    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
600
601    // The first of these conditions is guaranteed by how we implement
602    // the merge (just bail).
603    //
604    // The second condition occurs in the case of unions; for example
605    // union { _Complex double; unsigned; }.
606    if (Hi == Memory)
607      Lo = Memory;
608    if (Hi == SSEUp && Lo != SSE)
609      Hi = SSE;
610  }
611}
612
613
614ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
615                                            ASTContext &Context) const {
616  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
617  // classification algorithm.
618  X86_64ABIInfo::Class Lo, Hi;
619  classify(RetTy, Context, 0, Lo, Hi);
620
621  // Check some invariants.
622  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
623  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
624  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
625
626  const llvm::Type *ResType = 0;
627  switch (Lo) {
628  case NoClass:
629    return ABIArgInfo::getIgnore();
630
631  case SSEUp:
632  case X87Up:
633    assert(0 && "Invalid classification for lo word.");
634
635    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
636    // hidden argument.
637  case Memory:
638    return ABIArgInfo::getIndirect(0);
639
640    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
641    // available register of the sequence %rax, %rdx is used.
642  case Integer:
643    ResType = llvm::Type::Int64Ty; break;
644
645    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
646    // available SSE register of the sequence %xmm0, %xmm1 is used.
647  case SSE:
648    ResType = llvm::Type::DoubleTy; break;
649
650    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
651    // returned on the X87 stack in %st0 as 80-bit x87 number.
652  case X87:
653    ResType = llvm::Type::X86_FP80Ty; break;
654
655    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
656    // part of the value is returned in %st0 and the imaginary part in
657    // %st1.
658  case ComplexX87:
659    assert(Hi == NoClass && "Unexpected ComplexX87 classification.");
660    ResType = llvm::VectorType::get(llvm::Type::X86_FP80Ty, 2);
661    break;
662  }
663
664  switch (Hi) {
665    // Memory was handled previously, and ComplexX87 and X87 should
666    // never occur as hi classes.
667  case Memory:
668  case X87:
669  case ComplexX87:
670    assert(0 && "Invalid classification for hi word.");
671
672  case NoClass: break;
673  case Integer:
674    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
675    break;
676  case SSE:
677    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
678    break;
679
680    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
681    // is passed in the upper half of the last used SSE register.
682    //
683    // SSEUP should always be preceeded by SSE, just widen.
684  case SSEUp:
685    assert(Lo == SSE && "Unexpected SSEUp classification.");
686    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
687    break;
688
689    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
690    // returned together with the previous X87 value in %st0.
691    //
692    // X87UP should always be preceeded by X87, so we don't need to do
693    // anything here.
694  case X87Up:
695    assert(Lo == X87 && "Unexpected X87Up classification.");
696    break;
697  }
698
699  return ABIArgInfo::getCoerce(ResType);
700}
701
702ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
703                                               unsigned &freeIntRegs,
704                                               unsigned &freeSSERegs) const {
705  X86_64ABIInfo::Class Lo, Hi;
706  classify(Ty, Context, 0, Lo, Hi);
707
708  // Check some invariants.
709  // FIXME: Enforce these by construction.
710  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
711  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
712  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
713
714  unsigned neededInt = 0, neededSSE = 0;
715  const llvm::Type *ResType = 0;
716  switch (Lo) {
717  case NoClass:
718    return ABIArgInfo::getIgnore();
719
720    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
721    // on the stack.
722  case Memory:
723
724    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
725    // COMPLEX_X87, it is passed in memory.
726  case X87:
727  case ComplexX87:
728    // Choose appropriate in memory type.
729    if (CodeGenFunction::hasAggregateLLVMType(Ty))
730      return ABIArgInfo::getIndirect(0);
731    else
732      return ABIArgInfo::getDirect();
733
734  case SSEUp:
735  case X87Up:
736    assert(0 && "Invalid classification for lo word.");
737
738    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
739    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
740    // and %r9 is used.
741  case Integer:
742    ++neededInt;
743    ResType = llvm::Type::Int64Ty;
744    break;
745
746    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
747    // available SSE register is used, the registers are taken in the
748    // order from %xmm0 to %xmm7.
749  case SSE:
750    ++neededSSE;
751    ResType = llvm::Type::DoubleTy;
752    break;
753  }
754
755  switch (Hi) {
756    // Memory was handled previously, ComplexX87 and X87 should
757    // never occur as hi classes, and X87Up must be preceed by X87,
758    // which is passed in memory.
759  case Memory:
760  case X87:
761  case X87Up:
762  case ComplexX87:
763    assert(0 && "Invalid classification for hi word.");
764
765  case NoClass: break;
766  case Integer:
767    ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
768    ++neededInt;
769    break;
770  case SSE:
771    ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
772    ++neededSSE;
773    break;
774
775    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
776    // eightbyte is passed in the upper half of the last used SSE
777    // register.
778  case SSEUp:
779    assert(Lo == SSE && "Unexpected SSEUp classification.");
780    ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
781    break;
782  }
783
784  // AMD64-ABI 3.2.3p3: If there are no registers available for any
785  // eightbyte of an argument, the whole argument is passed on the
786  // stack. If registers have already been assigned for some
787  // eightbytes of such an argument, the assignments get reverted.
788  if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
789    freeIntRegs -= neededInt;
790    freeSSERegs -= neededSSE;
791    return ABIArgInfo::getCoerce(ResType);
792  } else {
793    // Choose appropriate in memory type.
794    if (CodeGenFunction::hasAggregateLLVMType(Ty))
795      return ABIArgInfo::getIndirect(0);
796    else
797      return ABIArgInfo::getDirect();
798  }
799}
800
801void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
802  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
803
804  // Keep track of the number of assigned registers.
805  unsigned freeIntRegs = 6, freeSSERegs = 8;
806
807  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
808  // get assigned (in left-to-right order) for passing as follows...
809  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
810       it != ie; ++it)
811    it->info = classifyArgumentType(it->type, Context, freeIntRegs, freeSSERegs);
812}
813
814ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
815                                            ASTContext &Context) const {
816  if (RetTy->isVoidType()) {
817    return ABIArgInfo::getIgnore();
818  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
819    return ABIArgInfo::getIndirect(0);
820  } else {
821    return ABIArgInfo::getDirect();
822  }
823}
824
825ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
826                                              ASTContext &Context) const {
827  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
828    return ABIArgInfo::getIndirect(0);
829  } else {
830    return ABIArgInfo::getDirect();
831  }
832}
833
834const ABIInfo &CodeGenTypes::getABIInfo() const {
835  if (TheABIInfo)
836    return *TheABIInfo;
837
838  // For now we just cache this in the CodeGenTypes and don't bother
839  // to free it.
840  const char *TargetPrefix = getContext().Target.getTargetPrefix();
841  if (strcmp(TargetPrefix, "x86") == 0) {
842    switch (getContext().Target.getPointerWidth(0)) {
843    case 32:
844      return *(TheABIInfo = new X86_32ABIInfo());
845    case 64:
846      return *(TheABIInfo = new X86_64ABIInfo());
847    }
848  }
849
850  return *(TheABIInfo = new DefaultABIInfo);
851}
852
853/***/
854
855CGFunctionInfo::CGFunctionInfo(QualType ResTy,
856                               const llvm::SmallVector<QualType, 16> &ArgTys) {
857  NumArgs = ArgTys.size();
858  Args = new ArgInfo[1 + NumArgs];
859  Args[0].type = ResTy;
860  for (unsigned i = 0; i < NumArgs; ++i)
861    Args[1 + i].type = ArgTys[i];
862}
863
864/***/
865
866void CodeGenTypes::GetExpandedTypes(QualType Ty,
867                                    std::vector<const llvm::Type*> &ArgTys) {
868  const RecordType *RT = Ty->getAsStructureType();
869  assert(RT && "Can only expand structure types.");
870  const RecordDecl *RD = RT->getDecl();
871  assert(!RD->hasFlexibleArrayMember() &&
872         "Cannot expand structure with flexible array.");
873
874  for (RecordDecl::field_iterator i = RD->field_begin(),
875         e = RD->field_end(); i != e; ++i) {
876    const FieldDecl *FD = *i;
877    assert(!FD->isBitField() &&
878           "Cannot expand structure with bit-field members.");
879
880    QualType FT = FD->getType();
881    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
882      GetExpandedTypes(FT, ArgTys);
883    } else {
884      ArgTys.push_back(ConvertType(FT));
885    }
886  }
887}
888
889llvm::Function::arg_iterator
890CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
891                                    llvm::Function::arg_iterator AI) {
892  const RecordType *RT = Ty->getAsStructureType();
893  assert(RT && "Can only expand structure types.");
894
895  RecordDecl *RD = RT->getDecl();
896  assert(LV.isSimple() &&
897         "Unexpected non-simple lvalue during struct expansion.");
898  llvm::Value *Addr = LV.getAddress();
899  for (RecordDecl::field_iterator i = RD->field_begin(),
900         e = RD->field_end(); i != e; ++i) {
901    FieldDecl *FD = *i;
902    QualType FT = FD->getType();
903
904    // FIXME: What are the right qualifiers here?
905    LValue LV = EmitLValueForField(Addr, FD, false, 0);
906    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
907      AI = ExpandTypeFromArgs(FT, LV, AI);
908    } else {
909      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
910      ++AI;
911    }
912  }
913
914  return AI;
915}
916
917void
918CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
919                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
920  const RecordType *RT = Ty->getAsStructureType();
921  assert(RT && "Can only expand structure types.");
922
923  RecordDecl *RD = RT->getDecl();
924  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
925  llvm::Value *Addr = RV.getAggregateAddr();
926  for (RecordDecl::field_iterator i = RD->field_begin(),
927         e = RD->field_end(); i != e; ++i) {
928    FieldDecl *FD = *i;
929    QualType FT = FD->getType();
930
931    // FIXME: What are the right qualifiers here?
932    LValue LV = EmitLValueForField(Addr, FD, false, 0);
933    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
934      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
935    } else {
936      RValue RV = EmitLoadOfLValue(LV, FT);
937      assert(RV.isScalar() &&
938             "Unexpected non-scalar rvalue during struct expansion.");
939      Args.push_back(RV.getScalarVal());
940    }
941  }
942}
943
944/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
945/// a pointer to an object of type \arg Ty.
946///
947/// This safely handles the case when the src type is smaller than the
948/// destination type; in this situation the values of bits which not
949/// present in the src are undefined.
950static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
951                                      const llvm::Type *Ty,
952                                      CodeGenFunction &CGF) {
953  const llvm::Type *SrcTy =
954    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
955  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
956  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty);
957
958  // If load is legal, just bitcast the src pointer.
959  if (SrcSize == DstSize) {
960    llvm::Value *Casted =
961      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
962    return CGF.Builder.CreateLoad(Casted);
963  } else {
964    assert(SrcSize < DstSize && "Coercion is losing source bits!");
965
966    // Otherwise do coercion through memory. This is stupid, but
967    // simple.
968    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
969    llvm::Value *Casted =
970      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
971    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
972    return CGF.Builder.CreateLoad(Tmp);
973  }
974}
975
976/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
977/// where the source and destination may have different types.
978///
979/// This safely handles the case when the src type is larger than the
980/// destination type; the upper bits of the src will be lost.
981static void CreateCoercedStore(llvm::Value *Src,
982                               llvm::Value *DstPtr,
983                               CodeGenFunction &CGF) {
984  const llvm::Type *SrcTy = Src->getType();
985  const llvm::Type *DstTy =
986    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
987
988  uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
989  uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy);
990
991  // If store is legal, just bitcast the src pointer.
992  if (SrcSize == DstSize) {
993    llvm::Value *Casted =
994      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
995    CGF.Builder.CreateStore(Src, Casted);
996  } else {
997    assert(SrcSize > DstSize && "Coercion is missing bits!");
998
999    // Otherwise do coercion through memory. This is stupid, but
1000    // simple.
1001    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1002    CGF.Builder.CreateStore(Src, Tmp);
1003    llvm::Value *Casted =
1004      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
1005    CGF.Builder.CreateStore(CGF.Builder.CreateLoad(Casted), DstPtr);
1006  }
1007}
1008
1009/***/
1010
1011bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
1012  return FI.getReturnInfo().isIndirect();
1013}
1014
1015const llvm::FunctionType *
1016CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
1017  std::vector<const llvm::Type*> ArgTys;
1018
1019  const llvm::Type *ResultType = 0;
1020
1021  QualType RetTy = FI.getReturnType();
1022  const ABIArgInfo &RetAI = FI.getReturnInfo();
1023  switch (RetAI.getKind()) {
1024  case ABIArgInfo::Expand:
1025    assert(0 && "Invalid ABI kind for return argument");
1026
1027  case ABIArgInfo::Direct:
1028    ResultType = ConvertType(RetTy);
1029    break;
1030
1031  case ABIArgInfo::Indirect: {
1032    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
1033    ResultType = llvm::Type::VoidTy;
1034    const llvm::Type *STy = ConvertType(RetTy);
1035    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
1036    break;
1037  }
1038
1039  case ABIArgInfo::Ignore:
1040    ResultType = llvm::Type::VoidTy;
1041    break;
1042
1043  case ABIArgInfo::Coerce:
1044    ResultType = RetAI.getCoerceToType();
1045    break;
1046  }
1047
1048  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1049         ie = FI.arg_end(); it != ie; ++it) {
1050    const ABIArgInfo &AI = it->info;
1051
1052    switch (AI.getKind()) {
1053    case ABIArgInfo::Ignore:
1054      break;
1055
1056    case ABIArgInfo::Coerce:
1057      ArgTys.push_back(AI.getCoerceToType());
1058      break;
1059
1060    case ABIArgInfo::Indirect:
1061      // indirect arguments are always on the stack, which is addr space #0.
1062      ArgTys.push_back(llvm::PointerType::getUnqual(ConvertType(it->type)));
1063      break;
1064
1065    case ABIArgInfo::Direct:
1066      ArgTys.push_back(ConvertType(it->type));
1067      break;
1068
1069    case ABIArgInfo::Expand:
1070      GetExpandedTypes(it->type, ArgTys);
1071      break;
1072    }
1073  }
1074
1075  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
1076}
1077
1078void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1079                                           const Decl *TargetDecl,
1080                                           AttributeListType &PAL) {
1081  unsigned FuncAttrs = 0;
1082  unsigned RetAttrs = 0;
1083
1084  if (TargetDecl) {
1085    if (TargetDecl->getAttr<NoThrowAttr>())
1086      FuncAttrs |= llvm::Attribute::NoUnwind;
1087    if (TargetDecl->getAttr<NoReturnAttr>())
1088      FuncAttrs |= llvm::Attribute::NoReturn;
1089    if (TargetDecl->getAttr<PureAttr>())
1090      FuncAttrs |= llvm::Attribute::ReadOnly;
1091    if (TargetDecl->getAttr<ConstAttr>())
1092      FuncAttrs |= llvm::Attribute::ReadNone;
1093  }
1094
1095  QualType RetTy = FI.getReturnType();
1096  unsigned Index = 1;
1097  const ABIArgInfo &RetAI = FI.getReturnInfo();
1098  switch (RetAI.getKind()) {
1099  case ABIArgInfo::Direct:
1100    if (RetTy->isPromotableIntegerType()) {
1101      if (RetTy->isSignedIntegerType()) {
1102        RetAttrs |= llvm::Attribute::SExt;
1103      } else if (RetTy->isUnsignedIntegerType()) {
1104        RetAttrs |= llvm::Attribute::ZExt;
1105      }
1106    }
1107    break;
1108
1109  case ABIArgInfo::Indirect:
1110    PAL.push_back(llvm::AttributeWithIndex::get(Index,
1111                                                llvm::Attribute::StructRet |
1112                                                llvm::Attribute::NoAlias));
1113    ++Index;
1114    break;
1115
1116  case ABIArgInfo::Ignore:
1117  case ABIArgInfo::Coerce:
1118    break;
1119
1120  case ABIArgInfo::Expand:
1121    assert(0 && "Invalid ABI kind for return argument");
1122  }
1123
1124  if (RetAttrs)
1125    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
1126  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1127         ie = FI.arg_end(); it != ie; ++it) {
1128    QualType ParamType = it->type;
1129    const ABIArgInfo &AI = it->info;
1130    unsigned Attributes = 0;
1131
1132    switch (AI.getKind()) {
1133    case ABIArgInfo::Coerce:
1134      break;
1135
1136    case ABIArgInfo::Indirect:
1137      Attributes |= llvm::Attribute::ByVal;
1138      Attributes |=
1139        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
1140      break;
1141
1142    case ABIArgInfo::Direct:
1143      if (ParamType->isPromotableIntegerType()) {
1144        if (ParamType->isSignedIntegerType()) {
1145          Attributes |= llvm::Attribute::SExt;
1146        } else if (ParamType->isUnsignedIntegerType()) {
1147          Attributes |= llvm::Attribute::ZExt;
1148        }
1149      }
1150      break;
1151
1152    case ABIArgInfo::Ignore:
1153      // Skip increment, no matching LLVM parameter.
1154      continue;
1155
1156    case ABIArgInfo::Expand: {
1157      std::vector<const llvm::Type*> Tys;
1158      // FIXME: This is rather inefficient. Do we ever actually need
1159      // to do anything here? The result should be just reconstructed
1160      // on the other side, so extension should be a non-issue.
1161      getTypes().GetExpandedTypes(ParamType, Tys);
1162      Index += Tys.size();
1163      continue;
1164    }
1165    }
1166
1167    if (Attributes)
1168      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
1169    ++Index;
1170  }
1171  if (FuncAttrs)
1172    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1173
1174}
1175
1176void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1177                                         llvm::Function *Fn,
1178                                         const FunctionArgList &Args) {
1179  // FIXME: We no longer need the types from FunctionArgList; lift up
1180  // and simplify.
1181
1182  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1183  llvm::Function::arg_iterator AI = Fn->arg_begin();
1184
1185  // Name the struct return argument.
1186  if (CGM.ReturnTypeUsesSret(FI)) {
1187    AI->setName("agg.result");
1188    ++AI;
1189  }
1190
1191  assert(FI.arg_size() == Args.size() &&
1192         "Mismatch between function signature & arguments.");
1193  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1194  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1195       i != e; ++i, ++info_it) {
1196    const VarDecl *Arg = i->first;
1197    QualType Ty = info_it->type;
1198    const ABIArgInfo &ArgI = info_it->info;
1199
1200    switch (ArgI.getKind()) {
1201    case ABIArgInfo::Indirect: {
1202      llvm::Value* V = AI;
1203      if (hasAggregateLLVMType(Ty)) {
1204        // Do nothing, aggregates and complex variables are accessed by
1205        // reference.
1206      } else {
1207        // Load scalar value from indirect argument.
1208        V = Builder.CreateLoad(V);
1209        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1210          // This must be a promotion, for something like
1211          // "void a(x) short x; {..."
1212          V = EmitScalarConversion(V, Ty, Arg->getType());
1213        }
1214      }
1215      EmitParmDecl(*Arg, V);
1216      break;
1217    }
1218
1219    case ABIArgInfo::Direct: {
1220      assert(AI != Fn->arg_end() && "Argument mismatch!");
1221      llvm::Value* V = AI;
1222      if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1223        // This must be a promotion, for something like
1224        // "void a(x) short x; {..."
1225        V = EmitScalarConversion(V, Ty, Arg->getType());
1226      }
1227      EmitParmDecl(*Arg, V);
1228      break;
1229    }
1230
1231    case ABIArgInfo::Expand: {
1232      // If this structure was expanded into multiple arguments then
1233      // we need to create a temporary and reconstruct it from the
1234      // arguments.
1235      std::string Name = Arg->getNameAsString();
1236      llvm::Value *Temp = CreateTempAlloca(ConvertType(Ty),
1237                                           (Name + ".addr").c_str());
1238      // FIXME: What are the right qualifiers here?
1239      llvm::Function::arg_iterator End =
1240        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1241      EmitParmDecl(*Arg, Temp);
1242
1243      // Name the arguments used in expansion and increment AI.
1244      unsigned Index = 0;
1245      for (; AI != End; ++AI, ++Index)
1246        AI->setName(Name + "." + llvm::utostr(Index));
1247      continue;
1248    }
1249
1250    case ABIArgInfo::Ignore:
1251      // Skip increment, no matching LLVM parameter.
1252      continue;
1253
1254    case ABIArgInfo::Coerce: {
1255      assert(AI != Fn->arg_end() && "Argument mismatch!");
1256      // FIXME: This is very wasteful; EmitParmDecl is just going to
1257      // drop the result in a new alloca anyway, so we could just
1258      // store into that directly if we broke the abstraction down
1259      // more.
1260      llvm::Value *V = CreateTempAlloca(ConvertType(Ty), "coerce");
1261      CreateCoercedStore(AI, V, *this);
1262      // Match to what EmitParmDecl is expecting for this type.
1263      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1264        V = Builder.CreateLoad(V);
1265        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1266          // This must be a promotion, for something like
1267          // "void a(x) short x; {..."
1268          V = EmitScalarConversion(V, Ty, Arg->getType());
1269        }
1270      }
1271      EmitParmDecl(*Arg, V);
1272      break;
1273    }
1274    }
1275
1276    ++AI;
1277  }
1278  assert(AI == Fn->arg_end() && "Argument mismatch!");
1279}
1280
1281void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1282                                         llvm::Value *ReturnValue) {
1283  llvm::Value *RV = 0;
1284
1285  // Functions with no result always return void.
1286  if (ReturnValue) {
1287    QualType RetTy = FI.getReturnType();
1288    const ABIArgInfo &RetAI = FI.getReturnInfo();
1289
1290    switch (RetAI.getKind()) {
1291      // FIXME: Implement correct [in]direct semantics.
1292    case ABIArgInfo::Indirect:
1293      if (RetTy->isAnyComplexType()) {
1294        // FIXME: Volatile
1295        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1296        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1297      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1298        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1299      } else {
1300        Builder.CreateStore(Builder.CreateLoad(ReturnValue),
1301                            CurFn->arg_begin());
1302      }
1303      break;
1304
1305    case ABIArgInfo::Direct:
1306      RV = Builder.CreateLoad(ReturnValue);
1307      break;
1308
1309    case ABIArgInfo::Ignore:
1310      break;
1311
1312    case ABIArgInfo::Coerce: {
1313      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1314      break;
1315    }
1316
1317    case ABIArgInfo::Expand:
1318      assert(0 && "Invalid ABI kind for return argument");
1319    }
1320  }
1321
1322  if (RV) {
1323    Builder.CreateRet(RV);
1324  } else {
1325    Builder.CreateRetVoid();
1326  }
1327}
1328
1329RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1330                                 llvm::Value *Callee,
1331                                 const CallArgList &CallArgs) {
1332  // FIXME: We no longer need the types from CallArgs; lift up and
1333  // simplify.
1334  llvm::SmallVector<llvm::Value*, 16> Args;
1335
1336  // Handle struct-return functions by passing a pointer to the
1337  // location that we would like to return into.
1338  QualType RetTy = CallInfo.getReturnType();
1339  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1340  if (CGM.ReturnTypeUsesSret(CallInfo)) {
1341    // Create a temporary alloca to hold the result of the call. :(
1342    Args.push_back(CreateTempAlloca(ConvertType(RetTy)));
1343  }
1344
1345  assert(CallInfo.arg_size() == CallArgs.size() &&
1346         "Mismatch between function signature & arguments.");
1347  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1348  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1349       I != E; ++I, ++info_it) {
1350    const ABIArgInfo &ArgInfo = info_it->info;
1351    RValue RV = I->first;
1352
1353    switch (ArgInfo.getKind()) {
1354    case ABIArgInfo::Indirect:
1355      if (RV.isScalar() || RV.isComplex()) {
1356        // Make a temporary alloca to pass the argument.
1357        Args.push_back(CreateTempAlloca(ConvertType(I->second)));
1358        if (RV.isScalar())
1359          Builder.CreateStore(RV.getScalarVal(), Args.back());
1360        else
1361          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1362      } else {
1363        Args.push_back(RV.getAggregateAddr());
1364      }
1365      break;
1366
1367    case ABIArgInfo::Direct:
1368      if (RV.isScalar()) {
1369        Args.push_back(RV.getScalarVal());
1370      } else if (RV.isComplex()) {
1371        // Make a temporary alloca to pass the argument.
1372        Args.push_back(CreateTempAlloca(ConvertType(I->second)));
1373        StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1374      } else {
1375        Args.push_back(RV.getAggregateAddr());
1376      }
1377      break;
1378
1379    case ABIArgInfo::Ignore:
1380      break;
1381
1382    case ABIArgInfo::Coerce: {
1383      // FIXME: Avoid the conversion through memory if possible.
1384      llvm::Value *SrcPtr;
1385      if (RV.isScalar()) {
1386        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
1387        Builder.CreateStore(RV.getScalarVal(), SrcPtr);
1388      } else if (RV.isComplex()) {
1389        SrcPtr = CreateTempAlloca(ConvertType(I->second), "coerce");
1390        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1391      } else
1392        SrcPtr = RV.getAggregateAddr();
1393      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1394                                       *this));
1395      break;
1396    }
1397
1398    case ABIArgInfo::Expand:
1399      ExpandTypeToArgs(I->second, RV, Args);
1400      break;
1401    }
1402  }
1403
1404  llvm::CallInst *CI = Builder.CreateCall(Callee,&Args[0],&Args[0]+Args.size());
1405
1406  // FIXME: Provide TargetDecl so nounwind, noreturn, etc, etc get set.
1407  CodeGen::AttributeListType AttributeList;
1408  CGM.ConstructAttributeList(CallInfo, 0, AttributeList);
1409  CI->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
1410                                           AttributeList.size()));
1411
1412  if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1413    CI->setCallingConv(F->getCallingConv());
1414  if (CI->getType() != llvm::Type::VoidTy)
1415    CI->setName("call");
1416
1417  switch (RetAI.getKind()) {
1418    // FIXME: Implement correct [in]direct semantics.
1419  case ABIArgInfo::Indirect:
1420    if (RetTy->isAnyComplexType())
1421      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1422    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1423      return RValue::getAggregate(Args[0]);
1424    else
1425      return RValue::get(Builder.CreateLoad(Args[0]));
1426
1427  case ABIArgInfo::Direct:
1428    assert((!RetTy->isAnyComplexType() &&
1429            !CodeGenFunction::hasAggregateLLVMType(RetTy)) &&
1430           "FIXME: Implement return for non-scalar direct types.");
1431    return RValue::get(CI);
1432
1433  case ABIArgInfo::Ignore:
1434    // If we are ignoring an argument that had a result, make sure to
1435    // construct the appropriate return value for our caller.
1436    return GetUndefRValue(RetTy);
1437    if (RetTy->isVoidType())
1438      return RValue::get(0);
1439
1440  case ABIArgInfo::Coerce: {
1441    // FIXME: Avoid the conversion through memory if possible.
1442    llvm::Value *V = CreateTempAlloca(ConvertType(RetTy), "coerce");
1443    CreateCoercedStore(CI, V, *this);
1444    if (RetTy->isAnyComplexType())
1445      return RValue::getComplex(LoadComplexFromAddr(V, false));
1446    else if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1447      return RValue::getAggregate(V);
1448    else
1449      return RValue::get(Builder.CreateLoad(V));
1450  }
1451
1452  case ABIArgInfo::Expand:
1453    assert(0 && "Invalid ABI kind for return argument");
1454  }
1455
1456  assert(0 && "Unhandled ABIArgInfo::Kind");
1457  return RValue::get(0);
1458}
1459