TargetInfo.cpp revision 82d0a418c8699fc6f4a9417457ffe93d43bba1c1
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "TargetInfo.h"
16#include "ABIInfo.h"
17#include "CodeGenFunction.h"
18#include "clang/AST/RecordLayout.h"
19#include "llvm/Type.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/Triple.h"
22#include "llvm/Support/raw_ostream.h"
23using namespace clang;
24using namespace CodeGen;
25
26ABIInfo::~ABIInfo() {}
27
28void ABIArgInfo::dump() const {
29  llvm::raw_ostream &OS = llvm::errs();
30  OS << "(ABIArgInfo Kind=";
31  switch (TheKind) {
32  case Direct:
33    OS << "Direct";
34    break;
35  case Extend:
36    OS << "Extend";
37    break;
38  case Ignore:
39    OS << "Ignore";
40    break;
41  case Coerce:
42    OS << "Coerce Type=";
43    getCoerceToType()->print(OS);
44    break;
45  case Indirect:
46    OS << "Indirect Align=" << getIndirectAlign();
47    break;
48  case Expand:
49    OS << "Expand";
50    break;
51  }
52  OS << ")\n";
53}
54
55TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
56
57static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
58
59/// isEmptyField - Return true iff a the field is "empty", that is it
60/// is an unnamed bit-field or an (array of) empty record(s).
61static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
62                         bool AllowArrays) {
63  if (FD->isUnnamedBitfield())
64    return true;
65
66  QualType FT = FD->getType();
67
68    // Constant arrays of empty records count as empty, strip them off.
69  if (AllowArrays)
70    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
71      FT = AT->getElementType();
72
73  return isEmptyRecord(Context, FT, AllowArrays);
74}
75
76/// isEmptyRecord - Return true iff a structure contains only empty
77/// fields. Note that a structure with a flexible array member is not
78/// considered empty.
79static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
80  const RecordType *RT = T->getAs<RecordType>();
81  if (!RT)
82    return 0;
83  const RecordDecl *RD = RT->getDecl();
84  if (RD->hasFlexibleArrayMember())
85    return false;
86  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
87         i != e; ++i)
88    if (!isEmptyField(Context, *i, AllowArrays))
89      return false;
90  return true;
91}
92
93/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
94/// a non-trivial destructor or a non-trivial copy constructor.
95static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
96  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
97  if (!RD)
98    return false;
99
100  return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
101}
102
103/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
104/// a record type with either a non-trivial destructor or a non-trivial copy
105/// constructor.
106static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
107  const RecordType *RT = T->getAs<RecordType>();
108  if (!RT)
109    return false;
110
111  return hasNonTrivialDestructorOrCopyConstructor(RT);
112}
113
114/// isSingleElementStruct - Determine if a structure is a "single
115/// element struct", i.e. it has exactly one non-empty field or
116/// exactly one field which is itself a single element
117/// struct. Structures with flexible array members are never
118/// considered single element structs.
119///
120/// \return The field declaration for the single non-empty field, if
121/// it exists.
122static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
123  const RecordType *RT = T->getAsStructureType();
124  if (!RT)
125    return 0;
126
127  const RecordDecl *RD = RT->getDecl();
128  if (RD->hasFlexibleArrayMember())
129    return 0;
130
131  const Type *Found = 0;
132  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
133         i != e; ++i) {
134    const FieldDecl *FD = *i;
135    QualType FT = FD->getType();
136
137    // Ignore empty fields.
138    if (isEmptyField(Context, FD, true))
139      continue;
140
141    // If we already found an element then this isn't a single-element
142    // struct.
143    if (Found)
144      return 0;
145
146    // Treat single element arrays as the element.
147    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
148      if (AT->getSize().getZExtValue() != 1)
149        break;
150      FT = AT->getElementType();
151    }
152
153    if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
154      Found = FT.getTypePtr();
155    } else {
156      Found = isSingleElementStruct(FT, Context);
157      if (!Found)
158        return 0;
159    }
160  }
161
162  return Found;
163}
164
165static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
166  if (!Ty->getAs<BuiltinType>() && !Ty->isAnyPointerType() &&
167      !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
168      !Ty->isBlockPointerType())
169    return false;
170
171  uint64_t Size = Context.getTypeSize(Ty);
172  return Size == 32 || Size == 64;
173}
174
175/// canExpandIndirectArgument - Test whether an argument type which is to be
176/// passed indirectly (on the stack) would have the equivalent layout if it was
177/// expanded into separate arguments. If so, we prefer to do the latter to avoid
178/// inhibiting optimizations.
179///
180// FIXME: This predicate is missing many cases, currently it just follows
181// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
182// should probably make this smarter, or better yet make the LLVM backend
183// capable of handling it.
184static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
185  // We can only expand structure types.
186  const RecordType *RT = Ty->getAs<RecordType>();
187  if (!RT)
188    return false;
189
190  // We can only expand (C) structures.
191  //
192  // FIXME: This needs to be generalized to handle classes as well.
193  const RecordDecl *RD = RT->getDecl();
194  if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
195    return false;
196
197  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
198         i != e; ++i) {
199    const FieldDecl *FD = *i;
200
201    if (!is32Or64BitBasicType(FD->getType(), Context))
202      return false;
203
204    // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
205    // how to expand them yet, and the predicate for telling if a bitfield still
206    // counts as "basic" is more complicated than what we were doing previously.
207    if (FD->isBitField())
208      return false;
209  }
210
211  return true;
212}
213
214static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
215  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
216         i != e; ++i) {
217    const FieldDecl *FD = *i;
218
219    if (FD->getType()->isVectorType() &&
220        Context.getTypeSize(FD->getType()) >= 128)
221      return true;
222
223    if (const RecordType* RT = FD->getType()->getAs<RecordType>())
224      if (typeContainsSSEVector(RT->getDecl(), Context))
225        return true;
226  }
227
228  return false;
229}
230
231namespace {
232/// DefaultABIInfo - The default implementation for ABI specific
233/// details. This implementation provides information which results in
234/// self-consistent and sensible LLVM IR generation, but does not
235/// conform to any particular ABI.
236class DefaultABIInfo : public ABIInfo {
237  ABIArgInfo classifyReturnType(QualType RetTy,
238                                ASTContext &Context,
239                                llvm::LLVMContext &VMContext) const;
240
241  ABIArgInfo classifyArgumentType(QualType RetTy,
242                                  ASTContext &Context,
243                                  llvm::LLVMContext &VMContext) const;
244
245  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
246                           llvm::LLVMContext &VMContext) const {
247    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
248                                            VMContext);
249    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
250         it != ie; ++it)
251      it->info = classifyArgumentType(it->type, Context, VMContext);
252  }
253
254  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
255                                 CodeGenFunction &CGF) const;
256};
257
258class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
259public:
260  DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {};
261};
262
263llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
264                                       CodeGenFunction &CGF) const {
265  return 0;
266}
267
268ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
269                                                ASTContext &Context,
270                                          llvm::LLVMContext &VMContext) const {
271  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
272    return ABIArgInfo::getIndirect(0);
273  } else {
274    return (Ty->isPromotableIntegerType() ?
275            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
276  }
277}
278
279/// X86_32ABIInfo - The X86-32 ABI information.
280class X86_32ABIInfo : public ABIInfo {
281  ASTContext &Context;
282  bool IsDarwinVectorABI;
283  bool IsSmallStructInRegABI;
284
285  static bool isRegisterSize(unsigned Size) {
286    return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
287  }
288
289  static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
290
291  static unsigned getIndirectArgumentAlignment(QualType Ty,
292                                               ASTContext &Context);
293
294public:
295  ABIArgInfo classifyReturnType(QualType RetTy,
296                                ASTContext &Context,
297                                llvm::LLVMContext &VMContext) const;
298
299  ABIArgInfo classifyArgumentType(QualType RetTy,
300                                  ASTContext &Context,
301                                  llvm::LLVMContext &VMContext) const;
302
303  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
304                           llvm::LLVMContext &VMContext) const {
305    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
306                                            VMContext);
307    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
308         it != ie; ++it)
309      it->info = classifyArgumentType(it->type, Context, VMContext);
310  }
311
312  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
313                                 CodeGenFunction &CGF) const;
314
315  X86_32ABIInfo(ASTContext &Context, bool d, bool p)
316    : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
317      IsSmallStructInRegABI(p) {}
318};
319
320class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
321public:
322  X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
323    :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {};
324};
325
326}
327
328/// shouldReturnTypeInRegister - Determine if the given type should be
329/// passed in a register (for the Darwin ABI).
330bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
331                                               ASTContext &Context) {
332  uint64_t Size = Context.getTypeSize(Ty);
333
334  // Type must be register sized.
335  if (!isRegisterSize(Size))
336    return false;
337
338  if (Ty->isVectorType()) {
339    // 64- and 128- bit vectors inside structures are not returned in
340    // registers.
341    if (Size == 64 || Size == 128)
342      return false;
343
344    return true;
345  }
346
347  // If this is a builtin, pointer, enum, or complex type, it is ok.
348  if (Ty->getAs<BuiltinType>() || Ty->isAnyPointerType() ||
349      Ty->isAnyComplexType() || Ty->isEnumeralType() ||
350      Ty->isBlockPointerType())
351    return true;
352
353  // Arrays are treated like records.
354  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
355    return shouldReturnTypeInRegister(AT->getElementType(), Context);
356
357  // Otherwise, it must be a record type.
358  const RecordType *RT = Ty->getAs<RecordType>();
359  if (!RT) return false;
360
361  // Structure types are passed in register if all fields would be
362  // passed in a register.
363  for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
364         e = RT->getDecl()->field_end(); i != e; ++i) {
365    const FieldDecl *FD = *i;
366
367    // Empty fields are ignored.
368    if (isEmptyField(Context, FD, true))
369      continue;
370
371    // Check fields recursively.
372    if (!shouldReturnTypeInRegister(FD->getType(), Context))
373      return false;
374  }
375
376  return true;
377}
378
379ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
380                                            ASTContext &Context,
381                                          llvm::LLVMContext &VMContext) const {
382  if (RetTy->isVoidType()) {
383    return ABIArgInfo::getIgnore();
384  } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
385    // On Darwin, some vectors are returned in registers.
386    if (IsDarwinVectorABI) {
387      uint64_t Size = Context.getTypeSize(RetTy);
388
389      // 128-bit vectors are a special case; they are returned in
390      // registers and we need to make sure to pick a type the LLVM
391      // backend will like.
392      if (Size == 128)
393        return ABIArgInfo::getCoerce(llvm::VectorType::get(
394                  llvm::Type::getInt64Ty(VMContext), 2));
395
396      // Always return in register if it fits in a general purpose
397      // register, or if it is 64 bits and has a single element.
398      if ((Size == 8 || Size == 16 || Size == 32) ||
399          (Size == 64 && VT->getNumElements() == 1))
400        return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
401
402      return ABIArgInfo::getIndirect(0);
403    }
404
405    return ABIArgInfo::getDirect();
406  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
407    if (const RecordType *RT = RetTy->getAsStructureType()) {
408      // Structures with either a non-trivial destructor or a non-trivial
409      // copy constructor are always indirect.
410      if (hasNonTrivialDestructorOrCopyConstructor(RT))
411        return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
412
413      // Structures with flexible arrays are always indirect.
414      if (RT->getDecl()->hasFlexibleArrayMember())
415        return ABIArgInfo::getIndirect(0);
416    }
417
418    // If specified, structs and unions are always indirect.
419    if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
420      return ABIArgInfo::getIndirect(0);
421
422    // Classify "single element" structs as their element type.
423    if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
424      if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
425        if (BT->isIntegerType()) {
426          // We need to use the size of the structure, padding
427          // bit-fields can adjust that to be larger than the single
428          // element type.
429          uint64_t Size = Context.getTypeSize(RetTy);
430          return ABIArgInfo::getCoerce(
431            llvm::IntegerType::get(VMContext, (unsigned) Size));
432        } else if (BT->getKind() == BuiltinType::Float) {
433          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
434                 "Unexpect single element structure size!");
435          return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
436        } else if (BT->getKind() == BuiltinType::Double) {
437          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
438                 "Unexpect single element structure size!");
439          return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
440        }
441      } else if (SeltTy->isPointerType()) {
442        // FIXME: It would be really nice if this could come out as the proper
443        // pointer type.
444        const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
445        return ABIArgInfo::getCoerce(PtrTy);
446      } else if (SeltTy->isVectorType()) {
447        // 64- and 128-bit vectors are never returned in a
448        // register when inside a structure.
449        uint64_t Size = Context.getTypeSize(RetTy);
450        if (Size == 64 || Size == 128)
451          return ABIArgInfo::getIndirect(0);
452
453        return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
454      }
455    }
456
457    // Small structures which are register sized are generally returned
458    // in a register.
459    if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
460      uint64_t Size = Context.getTypeSize(RetTy);
461      return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
462    }
463
464    return ABIArgInfo::getIndirect(0);
465  } else {
466    return (RetTy->isPromotableIntegerType() ?
467            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
468  }
469}
470
471unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
472                                                     ASTContext &Context) {
473  unsigned Align = Context.getTypeAlign(Ty);
474  if (Align < 128) return 0;
475  if (const RecordType* RT = Ty->getAs<RecordType>())
476    if (typeContainsSSEVector(RT->getDecl(), Context))
477      return 16;
478  return 0;
479}
480
481ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
482                                               ASTContext &Context,
483                                           llvm::LLVMContext &VMContext) const {
484  // FIXME: Set alignment on indirect arguments.
485  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
486    // Structures with flexible arrays are always indirect.
487    if (const RecordType *RT = Ty->getAsStructureType())
488      if (RT->getDecl()->hasFlexibleArrayMember())
489        return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
490                                                                    Context));
491
492    // Ignore empty structs.
493    if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
494      return ABIArgInfo::getIgnore();
495
496    // Expand small (<= 128-bit) record types when we know that the stack layout
497    // of those arguments will match the struct. This is important because the
498    // LLVM backend isn't smart enough to remove byval, which inhibits many
499    // optimizations.
500    if (Context.getTypeSize(Ty) <= 4*32 &&
501        canExpandIndirectArgument(Ty, Context))
502      return ABIArgInfo::getExpand();
503
504    return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
505  } else {
506    return (Ty->isPromotableIntegerType() ?
507            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
508  }
509}
510
511llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
512                                      CodeGenFunction &CGF) const {
513  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
514  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
515
516  CGBuilderTy &Builder = CGF.Builder;
517  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
518                                                       "ap");
519  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
520  llvm::Type *PTy =
521    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
522  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
523
524  uint64_t Offset =
525    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
526  llvm::Value *NextAddr =
527    Builder.CreateGEP(Addr, llvm::ConstantInt::get(
528                          llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
529                      "ap.next");
530  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
531
532  return AddrTyped;
533}
534
535namespace {
536/// X86_64ABIInfo - The X86_64 ABI information.
537class X86_64ABIInfo : public ABIInfo {
538  enum Class {
539    Integer = 0,
540    SSE,
541    SSEUp,
542    X87,
543    X87Up,
544    ComplexX87,
545    NoClass,
546    Memory
547  };
548
549  /// merge - Implement the X86_64 ABI merging algorithm.
550  ///
551  /// Merge an accumulating classification \arg Accum with a field
552  /// classification \arg Field.
553  ///
554  /// \param Accum - The accumulating classification. This should
555  /// always be either NoClass or the result of a previous merge
556  /// call. In addition, this should never be Memory (the caller
557  /// should just return Memory for the aggregate).
558  Class merge(Class Accum, Class Field) const;
559
560  /// classify - Determine the x86_64 register classes in which the
561  /// given type T should be passed.
562  ///
563  /// \param Lo - The classification for the parts of the type
564  /// residing in the low word of the containing object.
565  ///
566  /// \param Hi - The classification for the parts of the type
567  /// residing in the high word of the containing object.
568  ///
569  /// \param OffsetBase - The bit offset of this type in the
570  /// containing object.  Some parameters are classified different
571  /// depending on whether they straddle an eightbyte boundary.
572  ///
573  /// If a word is unused its result will be NoClass; if a type should
574  /// be passed in Memory then at least the classification of \arg Lo
575  /// will be Memory.
576  ///
577  /// The \arg Lo class will be NoClass iff the argument is ignored.
578  ///
579  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
580  /// also be ComplexX87.
581  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
582                Class &Lo, Class &Hi) const;
583
584  /// getCoerceResult - Given a source type \arg Ty and an LLVM type
585  /// to coerce to, chose the best way to pass Ty in the same place
586  /// that \arg CoerceTo would be passed, but while keeping the
587  /// emitted code as simple as possible.
588  ///
589  /// FIXME: Note, this should be cleaned up to just take an enumeration of all
590  /// the ways we might want to pass things, instead of constructing an LLVM
591  /// type. This makes this code more explicit, and it makes it clearer that we
592  /// are also doing this for correctness in the case of passing scalar types.
593  ABIArgInfo getCoerceResult(QualType Ty,
594                             const llvm::Type *CoerceTo,
595                             ASTContext &Context) const;
596
597  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
598  /// such that the argument will be passed in memory.
599  ABIArgInfo getIndirectResult(QualType Ty,
600                               ASTContext &Context) const;
601
602  ABIArgInfo classifyReturnType(QualType RetTy,
603                                ASTContext &Context,
604                                llvm::LLVMContext &VMContext) const;
605
606  ABIArgInfo classifyArgumentType(QualType Ty,
607                                  ASTContext &Context,
608                                  llvm::LLVMContext &VMContext,
609                                  unsigned &neededInt,
610                                  unsigned &neededSSE) const;
611
612public:
613  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
614                           llvm::LLVMContext &VMContext) const;
615
616  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
617                                 CodeGenFunction &CGF) const;
618};
619
620class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
621public:
622  X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {};
623};
624
625}
626
627X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
628                                          Class Field) const {
629  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
630  // classified recursively so that always two fields are
631  // considered. The resulting class is calculated according to
632  // the classes of the fields in the eightbyte:
633  //
634  // (a) If both classes are equal, this is the resulting class.
635  //
636  // (b) If one of the classes is NO_CLASS, the resulting class is
637  // the other class.
638  //
639  // (c) If one of the classes is MEMORY, the result is the MEMORY
640  // class.
641  //
642  // (d) If one of the classes is INTEGER, the result is the
643  // INTEGER.
644  //
645  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
646  // MEMORY is used as class.
647  //
648  // (f) Otherwise class SSE is used.
649
650  // Accum should never be memory (we should have returned) or
651  // ComplexX87 (because this cannot be passed in a structure).
652  assert((Accum != Memory && Accum != ComplexX87) &&
653         "Invalid accumulated classification during merge.");
654  if (Accum == Field || Field == NoClass)
655    return Accum;
656  else if (Field == Memory)
657    return Memory;
658  else if (Accum == NoClass)
659    return Field;
660  else if (Accum == Integer || Field == Integer)
661    return Integer;
662  else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
663           Accum == X87 || Accum == X87Up)
664    return Memory;
665  else
666    return SSE;
667}
668
669void X86_64ABIInfo::classify(QualType Ty,
670                             ASTContext &Context,
671                             uint64_t OffsetBase,
672                             Class &Lo, Class &Hi) const {
673  // FIXME: This code can be simplified by introducing a simple value class for
674  // Class pairs with appropriate constructor methods for the various
675  // situations.
676
677  // FIXME: Some of the split computations are wrong; unaligned vectors
678  // shouldn't be passed in registers for example, so there is no chance they
679  // can straddle an eightbyte. Verify & simplify.
680
681  Lo = Hi = NoClass;
682
683  Class &Current = OffsetBase < 64 ? Lo : Hi;
684  Current = Memory;
685
686  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
687    BuiltinType::Kind k = BT->getKind();
688
689    if (k == BuiltinType::Void) {
690      Current = NoClass;
691    } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
692      Lo = Integer;
693      Hi = Integer;
694    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
695      Current = Integer;
696    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
697      Current = SSE;
698    } else if (k == BuiltinType::LongDouble) {
699      Lo = X87;
700      Hi = X87Up;
701    }
702    // FIXME: _Decimal32 and _Decimal64 are SSE.
703    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
704  } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
705    // Classify the underlying integer type.
706    classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
707  } else if (Ty->hasPointerRepresentation()) {
708    Current = Integer;
709  } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
710    uint64_t Size = Context.getTypeSize(VT);
711    if (Size == 32) {
712      // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
713      // float> as integer.
714      Current = Integer;
715
716      // If this type crosses an eightbyte boundary, it should be
717      // split.
718      uint64_t EB_Real = (OffsetBase) / 64;
719      uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
720      if (EB_Real != EB_Imag)
721        Hi = Lo;
722    } else if (Size == 64) {
723      // gcc passes <1 x double> in memory. :(
724      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
725        return;
726
727      // gcc passes <1 x long long> as INTEGER.
728      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
729        Current = Integer;
730      else
731        Current = SSE;
732
733      // If this type crosses an eightbyte boundary, it should be
734      // split.
735      if (OffsetBase && OffsetBase != 64)
736        Hi = Lo;
737    } else if (Size == 128) {
738      Lo = SSE;
739      Hi = SSEUp;
740    }
741  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
742    QualType ET = Context.getCanonicalType(CT->getElementType());
743
744    uint64_t Size = Context.getTypeSize(Ty);
745    if (ET->isIntegralType()) {
746      if (Size <= 64)
747        Current = Integer;
748      else if (Size <= 128)
749        Lo = Hi = Integer;
750    } else if (ET == Context.FloatTy)
751      Current = SSE;
752    else if (ET == Context.DoubleTy)
753      Lo = Hi = SSE;
754    else if (ET == Context.LongDoubleTy)
755      Current = ComplexX87;
756
757    // If this complex type crosses an eightbyte boundary then it
758    // should be split.
759    uint64_t EB_Real = (OffsetBase) / 64;
760    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
761    if (Hi == NoClass && EB_Real != EB_Imag)
762      Hi = Lo;
763  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
764    // Arrays are treated like structures.
765
766    uint64_t Size = Context.getTypeSize(Ty);
767
768    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
769    // than two eightbytes, ..., it has class MEMORY.
770    if (Size > 128)
771      return;
772
773    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
774    // fields, it has class MEMORY.
775    //
776    // Only need to check alignment of array base.
777    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
778      return;
779
780    // Otherwise implement simplified merge. We could be smarter about
781    // this, but it isn't worth it and would be harder to verify.
782    Current = NoClass;
783    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
784    uint64_t ArraySize = AT->getSize().getZExtValue();
785    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
786      Class FieldLo, FieldHi;
787      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
788      Lo = merge(Lo, FieldLo);
789      Hi = merge(Hi, FieldHi);
790      if (Lo == Memory || Hi == Memory)
791        break;
792    }
793
794    // Do post merger cleanup (see below). Only case we worry about is Memory.
795    if (Hi == Memory)
796      Lo = Memory;
797    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
798  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
799    uint64_t Size = Context.getTypeSize(Ty);
800
801    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
802    // than two eightbytes, ..., it has class MEMORY.
803    if (Size > 128)
804      return;
805
806    // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
807    // copy constructor or a non-trivial destructor, it is passed by invisible
808    // reference.
809    if (hasNonTrivialDestructorOrCopyConstructor(RT))
810      return;
811
812    const RecordDecl *RD = RT->getDecl();
813
814    // Assume variable sized types are passed in memory.
815    if (RD->hasFlexibleArrayMember())
816      return;
817
818    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
819
820    // Reset Lo class, this will be recomputed.
821    Current = NoClass;
822
823    // If this is a C++ record, classify the bases first.
824    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
825      for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
826             e = CXXRD->bases_end(); i != e; ++i) {
827        assert(!i->isVirtual() && !i->getType()->isDependentType() &&
828               "Unexpected base class!");
829        const CXXRecordDecl *Base =
830          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
831
832        // Classify this field.
833        //
834        // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
835        // single eightbyte, each is classified separately. Each eightbyte gets
836        // initialized to class NO_CLASS.
837        Class FieldLo, FieldHi;
838        uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
839        classify(i->getType(), Context, Offset, FieldLo, FieldHi);
840        Lo = merge(Lo, FieldLo);
841        Hi = merge(Hi, FieldHi);
842        if (Lo == Memory || Hi == Memory)
843          break;
844      }
845
846      // If this record has no fields but isn't empty, classify as INTEGER.
847      if (RD->field_empty() && Size)
848        Current = Integer;
849    }
850
851    // Classify the fields one at a time, merging the results.
852    unsigned idx = 0;
853    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
854           i != e; ++i, ++idx) {
855      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
856      bool BitField = i->isBitField();
857
858      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
859      // fields, it has class MEMORY.
860      //
861      // Note, skip this test for bit-fields, see below.
862      if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
863        Lo = Memory;
864        return;
865      }
866
867      // Classify this field.
868      //
869      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
870      // exceeds a single eightbyte, each is classified
871      // separately. Each eightbyte gets initialized to class
872      // NO_CLASS.
873      Class FieldLo, FieldHi;
874
875      // Bit-fields require special handling, they do not force the
876      // structure to be passed in memory even if unaligned, and
877      // therefore they can straddle an eightbyte.
878      if (BitField) {
879        // Ignore padding bit-fields.
880        if (i->isUnnamedBitfield())
881          continue;
882
883        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
884        uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
885
886        uint64_t EB_Lo = Offset / 64;
887        uint64_t EB_Hi = (Offset + Size - 1) / 64;
888        FieldLo = FieldHi = NoClass;
889        if (EB_Lo) {
890          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
891          FieldLo = NoClass;
892          FieldHi = Integer;
893        } else {
894          FieldLo = Integer;
895          FieldHi = EB_Hi ? Integer : NoClass;
896        }
897      } else
898        classify(i->getType(), Context, Offset, FieldLo, FieldHi);
899      Lo = merge(Lo, FieldLo);
900      Hi = merge(Hi, FieldHi);
901      if (Lo == Memory || Hi == Memory)
902        break;
903    }
904
905    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
906    //
907    // (a) If one of the classes is MEMORY, the whole argument is
908    // passed in memory.
909    //
910    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
911
912    // The first of these conditions is guaranteed by how we implement
913    // the merge (just bail).
914    //
915    // The second condition occurs in the case of unions; for example
916    // union { _Complex double; unsigned; }.
917    if (Hi == Memory)
918      Lo = Memory;
919    if (Hi == SSEUp && Lo != SSE)
920      Hi = SSE;
921  }
922}
923
924ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
925                                          const llvm::Type *CoerceTo,
926                                          ASTContext &Context) const {
927  if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
928    // Integer and pointer types will end up in a general purpose
929    // register.
930    if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
931      return (Ty->isPromotableIntegerType() ?
932              ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
933  } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
934    // FIXME: It would probably be better to make CGFunctionInfo only map using
935    // canonical types than to canonize here.
936    QualType CTy = Context.getCanonicalType(Ty);
937
938    // Float and double end up in a single SSE reg.
939    if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
940      return ABIArgInfo::getDirect();
941
942  }
943
944  return ABIArgInfo::getCoerce(CoerceTo);
945}
946
947ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
948                                            ASTContext &Context) const {
949  // If this is a scalar LLVM value then assume LLVM will pass it in the right
950  // place naturally.
951  if (!CodeGenFunction::hasAggregateLLVMType(Ty))
952    return (Ty->isPromotableIntegerType() ?
953            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
954
955  bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
956
957  // FIXME: Set alignment correctly.
958  return ABIArgInfo::getIndirect(0, ByVal);
959}
960
961ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
962                                            ASTContext &Context,
963                                          llvm::LLVMContext &VMContext) const {
964  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
965  // classification algorithm.
966  X86_64ABIInfo::Class Lo, Hi;
967  classify(RetTy, Context, 0, Lo, Hi);
968
969  // Check some invariants.
970  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
971  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
972  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
973
974  const llvm::Type *ResType = 0;
975  switch (Lo) {
976  case NoClass:
977    return ABIArgInfo::getIgnore();
978
979  case SSEUp:
980  case X87Up:
981    assert(0 && "Invalid classification for lo word.");
982
983    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
984    // hidden argument.
985  case Memory:
986    return getIndirectResult(RetTy, Context);
987
988    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
989    // available register of the sequence %rax, %rdx is used.
990  case Integer:
991    ResType = llvm::Type::getInt64Ty(VMContext); break;
992
993    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
994    // available SSE register of the sequence %xmm0, %xmm1 is used.
995  case SSE:
996    ResType = llvm::Type::getDoubleTy(VMContext); break;
997
998    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
999    // returned on the X87 stack in %st0 as 80-bit x87 number.
1000  case X87:
1001    ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
1002
1003    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
1004    // part of the value is returned in %st0 and the imaginary part in
1005    // %st1.
1006  case ComplexX87:
1007    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
1008    ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
1009                                    llvm::Type::getX86_FP80Ty(VMContext),
1010                                    NULL);
1011    break;
1012  }
1013
1014  switch (Hi) {
1015    // Memory was handled previously and X87 should
1016    // never occur as a hi class.
1017  case Memory:
1018  case X87:
1019    assert(0 && "Invalid classification for hi word.");
1020
1021  case ComplexX87: // Previously handled.
1022  case NoClass: break;
1023
1024  case Integer:
1025    ResType = llvm::StructType::get(VMContext, ResType,
1026                                    llvm::Type::getInt64Ty(VMContext), NULL);
1027    break;
1028  case SSE:
1029    ResType = llvm::StructType::get(VMContext, ResType,
1030                                    llvm::Type::getDoubleTy(VMContext), NULL);
1031    break;
1032
1033    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
1034    // is passed in the upper half of the last used SSE register.
1035    //
1036    // SSEUP should always be preceeded by SSE, just widen.
1037  case SSEUp:
1038    assert(Lo == SSE && "Unexpected SSEUp classification.");
1039    ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
1040    break;
1041
1042    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
1043    // returned together with the previous X87 value in %st0.
1044  case X87Up:
1045    // If X87Up is preceeded by X87, we don't need to do
1046    // anything. However, in some cases with unions it may not be
1047    // preceeded by X87. In such situations we follow gcc and pass the
1048    // extra bits in an SSE reg.
1049    if (Lo != X87)
1050      ResType = llvm::StructType::get(VMContext, ResType,
1051                                      llvm::Type::getDoubleTy(VMContext), NULL);
1052    break;
1053  }
1054
1055  return getCoerceResult(RetTy, ResType, Context);
1056}
1057
1058ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
1059                                               llvm::LLVMContext &VMContext,
1060                                               unsigned &neededInt,
1061                                               unsigned &neededSSE) const {
1062  X86_64ABIInfo::Class Lo, Hi;
1063  classify(Ty, Context, 0, Lo, Hi);
1064
1065  // Check some invariants.
1066  // FIXME: Enforce these by construction.
1067  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1068  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
1069  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1070
1071  neededInt = 0;
1072  neededSSE = 0;
1073  const llvm::Type *ResType = 0;
1074  switch (Lo) {
1075  case NoClass:
1076    return ABIArgInfo::getIgnore();
1077
1078    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
1079    // on the stack.
1080  case Memory:
1081
1082    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
1083    // COMPLEX_X87, it is passed in memory.
1084  case X87:
1085  case ComplexX87:
1086    return getIndirectResult(Ty, Context);
1087
1088  case SSEUp:
1089  case X87Up:
1090    assert(0 && "Invalid classification for lo word.");
1091
1092    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
1093    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
1094    // and %r9 is used.
1095  case Integer:
1096    ++neededInt;
1097    ResType = llvm::Type::getInt64Ty(VMContext);
1098    break;
1099
1100    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1101    // available SSE register is used, the registers are taken in the
1102    // order from %xmm0 to %xmm7.
1103  case SSE:
1104    ++neededSSE;
1105    ResType = llvm::Type::getDoubleTy(VMContext);
1106    break;
1107  }
1108
1109  switch (Hi) {
1110    // Memory was handled previously, ComplexX87 and X87 should
1111    // never occur as hi classes, and X87Up must be preceed by X87,
1112    // which is passed in memory.
1113  case Memory:
1114  case X87:
1115  case ComplexX87:
1116    assert(0 && "Invalid classification for hi word.");
1117    break;
1118
1119  case NoClass: break;
1120  case Integer:
1121    ResType = llvm::StructType::get(VMContext, ResType,
1122                                    llvm::Type::getInt64Ty(VMContext), NULL);
1123    ++neededInt;
1124    break;
1125
1126    // X87Up generally doesn't occur here (long double is passed in
1127    // memory), except in situations involving unions.
1128  case X87Up:
1129  case SSE:
1130    ResType = llvm::StructType::get(VMContext, ResType,
1131                                    llvm::Type::getDoubleTy(VMContext), NULL);
1132    ++neededSSE;
1133    break;
1134
1135    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1136    // eightbyte is passed in the upper half of the last used SSE
1137    // register.
1138  case SSEUp:
1139    assert(Lo == SSE && "Unexpected SSEUp classification.");
1140    ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
1141    break;
1142  }
1143
1144  return getCoerceResult(Ty, ResType, Context);
1145}
1146
1147void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1148                                llvm::LLVMContext &VMContext) const {
1149  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1150                                          Context, VMContext);
1151
1152  // Keep track of the number of assigned registers.
1153  unsigned freeIntRegs = 6, freeSSERegs = 8;
1154
1155  // If the return value is indirect, then the hidden argument is consuming one
1156  // integer register.
1157  if (FI.getReturnInfo().isIndirect())
1158    --freeIntRegs;
1159
1160  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1161  // get assigned (in left-to-right order) for passing as follows...
1162  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1163       it != ie; ++it) {
1164    unsigned neededInt, neededSSE;
1165    it->info = classifyArgumentType(it->type, Context, VMContext,
1166                                    neededInt, neededSSE);
1167
1168    // AMD64-ABI 3.2.3p3: If there are no registers available for any
1169    // eightbyte of an argument, the whole argument is passed on the
1170    // stack. If registers have already been assigned for some
1171    // eightbytes of such an argument, the assignments get reverted.
1172    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1173      freeIntRegs -= neededInt;
1174      freeSSERegs -= neededSSE;
1175    } else {
1176      it->info = getIndirectResult(it->type, Context);
1177    }
1178  }
1179}
1180
1181static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1182                                        QualType Ty,
1183                                        CodeGenFunction &CGF) {
1184  llvm::Value *overflow_arg_area_p =
1185    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1186  llvm::Value *overflow_arg_area =
1187    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1188
1189  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1190  // byte boundary if alignment needed by type exceeds 8 byte boundary.
1191  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1192  if (Align > 8) {
1193    // Note that we follow the ABI & gcc here, even though the type
1194    // could in theory have an alignment greater than 16. This case
1195    // shouldn't ever matter in practice.
1196
1197    // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1198    llvm::Value *Offset =
1199      llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
1200    overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1201    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1202                                 llvm::Type::getInt64Ty(CGF.getLLVMContext()));
1203    llvm::Value *Mask = llvm::ConstantInt::get(
1204        llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
1205    overflow_arg_area =
1206      CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1207                                 overflow_arg_area->getType(),
1208                                 "overflow_arg_area.align");
1209  }
1210
1211  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1212  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1213  llvm::Value *Res =
1214    CGF.Builder.CreateBitCast(overflow_arg_area,
1215                              llvm::PointerType::getUnqual(LTy));
1216
1217  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1218  // l->overflow_arg_area + sizeof(type).
1219  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1220  // an 8 byte boundary.
1221
1222  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1223  llvm::Value *Offset =
1224      llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
1225                                               (SizeInBytes + 7)  & ~7);
1226  overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1227                                            "overflow_arg_area.next");
1228  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1229
1230  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1231  return Res;
1232}
1233
1234llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1235                                      CodeGenFunction &CGF) const {
1236  llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1237  const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
1238  const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
1239
1240  // Assume that va_list type is correct; should be pointer to LLVM type:
1241  // struct {
1242  //   i32 gp_offset;
1243  //   i32 fp_offset;
1244  //   i8* overflow_arg_area;
1245  //   i8* reg_save_area;
1246  // };
1247  unsigned neededInt, neededSSE;
1248  ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
1249                                       neededInt, neededSSE);
1250
1251  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1252  // in the registers. If not go to step 7.
1253  if (!neededInt && !neededSSE)
1254    return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1255
1256  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1257  // general purpose registers needed to pass type and num_fp to hold
1258  // the number of floating point registers needed.
1259
1260  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1261  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1262  // l->fp_offset > 304 - num_fp * 16 go to step 7.
1263  //
1264  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1265  // register save space).
1266
1267  llvm::Value *InRegs = 0;
1268  llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1269  llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1270  if (neededInt) {
1271    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1272    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1273    InRegs =
1274      CGF.Builder.CreateICmpULE(gp_offset,
1275                                llvm::ConstantInt::get(i32Ty,
1276                                                       48 - neededInt * 8),
1277                                "fits_in_gp");
1278  }
1279
1280  if (neededSSE) {
1281    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1282    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1283    llvm::Value *FitsInFP =
1284      CGF.Builder.CreateICmpULE(fp_offset,
1285                                llvm::ConstantInt::get(i32Ty,
1286                                                       176 - neededSSE * 16),
1287                                "fits_in_fp");
1288    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1289  }
1290
1291  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1292  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1293  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1294  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1295
1296  // Emit code to load the value if it was passed in registers.
1297
1298  CGF.EmitBlock(InRegBlock);
1299
1300  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1301  // an offset of l->gp_offset and/or l->fp_offset. This may require
1302  // copying to a temporary location in case the parameter is passed
1303  // in different register classes or requires an alignment greater
1304  // than 8 for general purpose registers and 16 for XMM registers.
1305  //
1306  // FIXME: This really results in shameful code when we end up needing to
1307  // collect arguments from different places; often what should result in a
1308  // simple assembling of a structure from scattered addresses has many more
1309  // loads than necessary. Can we clean this up?
1310  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1311  llvm::Value *RegAddr =
1312    CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1313                           "reg_save_area");
1314  if (neededInt && neededSSE) {
1315    // FIXME: Cleanup.
1316    assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1317    const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1318    llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1319    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1320    const llvm::Type *TyLo = ST->getElementType(0);
1321    const llvm::Type *TyHi = ST->getElementType(1);
1322    assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1323           "Unexpected ABI info for mixed regs");
1324    const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1325    const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1326    llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1327    llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1328    llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1329    llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1330    llvm::Value *V =
1331      CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1332    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1333    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1334    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1335
1336    RegAddr = CGF.Builder.CreateBitCast(Tmp,
1337                                        llvm::PointerType::getUnqual(LTy));
1338  } else if (neededInt) {
1339    RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1340    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1341                                        llvm::PointerType::getUnqual(LTy));
1342  } else {
1343    if (neededSSE == 1) {
1344      RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1345      RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1346                                          llvm::PointerType::getUnqual(LTy));
1347    } else {
1348      assert(neededSSE == 2 && "Invalid number of needed registers!");
1349      // SSE registers are spaced 16 bytes apart in the register save
1350      // area, we need to collect the two eightbytes together.
1351      llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1352      llvm::Value *RegAddrHi =
1353        CGF.Builder.CreateGEP(RegAddrLo,
1354                            llvm::ConstantInt::get(i32Ty, 16));
1355      const llvm::Type *DblPtrTy =
1356        llvm::PointerType::getUnqual(DoubleTy);
1357      const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
1358                                                         DoubleTy, NULL);
1359      llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1360      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1361                                                           DblPtrTy));
1362      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1363      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1364                                                           DblPtrTy));
1365      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1366      RegAddr = CGF.Builder.CreateBitCast(Tmp,
1367                                          llvm::PointerType::getUnqual(LTy));
1368    }
1369  }
1370
1371  // AMD64-ABI 3.5.7p5: Step 5. Set:
1372  // l->gp_offset = l->gp_offset + num_gp * 8
1373  // l->fp_offset = l->fp_offset + num_fp * 16.
1374  if (neededInt) {
1375    llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
1376    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1377                            gp_offset_p);
1378  }
1379  if (neededSSE) {
1380    llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
1381    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1382                            fp_offset_p);
1383  }
1384  CGF.EmitBranch(ContBlock);
1385
1386  // Emit code to load the value if it was passed in memory.
1387
1388  CGF.EmitBlock(InMemBlock);
1389  llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1390
1391  // Return the appropriate result.
1392
1393  CGF.EmitBlock(ContBlock);
1394  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1395                                                 "vaarg.addr");
1396  ResAddr->reserveOperandSpace(2);
1397  ResAddr->addIncoming(RegAddr, InRegBlock);
1398  ResAddr->addIncoming(MemAddr, InMemBlock);
1399
1400  return ResAddr;
1401}
1402
1403// PIC16 ABI Implementation
1404
1405namespace {
1406
1407class PIC16ABIInfo : public ABIInfo {
1408  ABIArgInfo classifyReturnType(QualType RetTy,
1409                                ASTContext &Context,
1410                                llvm::LLVMContext &VMContext) const;
1411
1412  ABIArgInfo classifyArgumentType(QualType RetTy,
1413                                  ASTContext &Context,
1414                                  llvm::LLVMContext &VMContext) const;
1415
1416  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1417                           llvm::LLVMContext &VMContext) const {
1418    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1419                                            VMContext);
1420    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1421         it != ie; ++it)
1422      it->info = classifyArgumentType(it->type, Context, VMContext);
1423  }
1424
1425  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1426                                 CodeGenFunction &CGF) const;
1427};
1428
1429class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
1430public:
1431  PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {};
1432};
1433
1434}
1435
1436ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
1437                                            ASTContext &Context,
1438                                          llvm::LLVMContext &VMContext) const {
1439  if (RetTy->isVoidType()) {
1440    return ABIArgInfo::getIgnore();
1441  } else {
1442    return ABIArgInfo::getDirect();
1443  }
1444}
1445
1446ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
1447                                              ASTContext &Context,
1448                                          llvm::LLVMContext &VMContext) const {
1449  return ABIArgInfo::getDirect();
1450}
1451
1452llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1453                                       CodeGenFunction &CGF) const {
1454  return 0;
1455}
1456
1457// ARM ABI Implementation
1458
1459namespace {
1460
1461class ARMABIInfo : public ABIInfo {
1462public:
1463  enum ABIKind {
1464    APCS = 0,
1465    AAPCS = 1,
1466    AAPCS_VFP
1467  };
1468
1469private:
1470  ABIKind Kind;
1471
1472public:
1473  ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
1474
1475private:
1476  ABIKind getABIKind() const { return Kind; }
1477
1478  ABIArgInfo classifyReturnType(QualType RetTy,
1479                                ASTContext &Context,
1480                                llvm::LLVMContext &VMCOntext) const;
1481
1482  ABIArgInfo classifyArgumentType(QualType RetTy,
1483                                  ASTContext &Context,
1484                                  llvm::LLVMContext &VMContext) const;
1485
1486  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1487                           llvm::LLVMContext &VMContext) const;
1488
1489  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1490                                 CodeGenFunction &CGF) const;
1491};
1492
1493class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
1494public:
1495  ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
1496    :TargetCodeGenInfo(new ARMABIInfo(K)) {};
1497};
1498
1499}
1500
1501void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1502                             llvm::LLVMContext &VMContext) const {
1503  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1504                                          VMContext);
1505  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1506       it != ie; ++it) {
1507    it->info = classifyArgumentType(it->type, Context, VMContext);
1508  }
1509
1510  // ARM always overrides the calling convention.
1511  switch (getABIKind()) {
1512  case APCS:
1513    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
1514    break;
1515
1516  case AAPCS:
1517    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
1518    break;
1519
1520  case AAPCS_VFP:
1521    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
1522    break;
1523  }
1524}
1525
1526ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
1527                                            ASTContext &Context,
1528                                          llvm::LLVMContext &VMContext) const {
1529  if (!CodeGenFunction::hasAggregateLLVMType(Ty))
1530    return (Ty->isPromotableIntegerType() ?
1531            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1532
1533  // Ignore empty records.
1534  if (isEmptyRecord(Context, Ty, true))
1535    return ABIArgInfo::getIgnore();
1536
1537  // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1538  // backend doesn't support byval.
1539  // FIXME: This doesn't handle alignment > 64 bits.
1540  const llvm::Type* ElemTy;
1541  unsigned SizeRegs;
1542  if (Context.getTypeAlign(Ty) > 32) {
1543    ElemTy = llvm::Type::getInt64Ty(VMContext);
1544    SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1545  } else {
1546    ElemTy = llvm::Type::getInt32Ty(VMContext);
1547    SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1548  }
1549  std::vector<const llvm::Type*> LLVMFields;
1550  LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
1551  const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
1552  return ABIArgInfo::getCoerce(STy);
1553}
1554
1555static bool isIntegerLikeType(QualType Ty,
1556                              ASTContext &Context,
1557                              llvm::LLVMContext &VMContext) {
1558  // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
1559  // is called integer-like if its size is less than or equal to one word, and
1560  // the offset of each of its addressable sub-fields is zero.
1561
1562  uint64_t Size = Context.getTypeSize(Ty);
1563
1564  // Check that the type fits in a word.
1565  if (Size > 32)
1566    return false;
1567
1568  // FIXME: Handle vector types!
1569  if (Ty->isVectorType())
1570    return false;
1571
1572  // Float types are never treated as "integer like".
1573  if (Ty->isRealFloatingType())
1574    return false;
1575
1576  // If this is a builtin or pointer type then it is ok.
1577  if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
1578    return true;
1579
1580  // Complex types "should" be ok by the definition above, but they are not.
1581  if (Ty->isAnyComplexType())
1582    return false;
1583
1584  // Single element and zero sized arrays should be allowed, by the definition
1585  // above, but they are not.
1586
1587  // Otherwise, it must be a record type.
1588  const RecordType *RT = Ty->getAs<RecordType>();
1589  if (!RT) return false;
1590
1591  // Ignore records with flexible arrays.
1592  const RecordDecl *RD = RT->getDecl();
1593  if (RD->hasFlexibleArrayMember())
1594    return false;
1595
1596  // Check that all sub-fields are at offset 0, and are themselves "integer
1597  // like".
1598  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1599
1600  bool HadField = false;
1601  unsigned idx = 0;
1602  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1603       i != e; ++i, ++idx) {
1604    const FieldDecl *FD = *i;
1605
1606    // Check if this field is at offset 0.
1607    uint64_t Offset = Layout.getFieldOffset(idx);
1608    if (Offset != 0) {
1609      // Allow padding bit-fields, but only if they are all at the end of the
1610      // structure (despite the wording above, this matches gcc).
1611      if (FD->isBitField() &&
1612          !FD->getBitWidth()->EvaluateAsInt(Context).getZExtValue()) {
1613        for (; i != e; ++i)
1614          if (!i->isBitField() ||
1615              i->getBitWidth()->EvaluateAsInt(Context).getZExtValue())
1616            return false;
1617
1618        // All remaining fields are padding, allow this.
1619        return true;
1620      }
1621
1622      return false;
1623    }
1624
1625    if (!isIntegerLikeType(FD->getType(), Context, VMContext))
1626      return false;
1627
1628    // Only allow at most one field in a structure. Again this doesn't match the
1629    // wording above, but follows gcc.
1630    if (!RD->isUnion()) {
1631      if (HadField)
1632        return false;
1633
1634      HadField = true;
1635    }
1636  }
1637
1638  return true;
1639}
1640
1641ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
1642                                          ASTContext &Context,
1643                                          llvm::LLVMContext &VMContext) const {
1644  if (RetTy->isVoidType())
1645    return ABIArgInfo::getIgnore();
1646
1647  if (!CodeGenFunction::hasAggregateLLVMType(RetTy))
1648    return (RetTy->isPromotableIntegerType() ?
1649            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1650
1651  // Are we following APCS?
1652  if (getABIKind() == APCS) {
1653    if (isEmptyRecord(Context, RetTy, false))
1654      return ABIArgInfo::getIgnore();
1655
1656    // Integer like structures are returned in r0.
1657    if (isIntegerLikeType(RetTy, Context, VMContext)) {
1658      // Return in the smallest viable integer type.
1659      uint64_t Size = Context.getTypeSize(RetTy);
1660      if (Size <= 8)
1661        return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
1662      if (Size <= 16)
1663        return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
1664      return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
1665    }
1666
1667    // Otherwise return in memory.
1668    return ABIArgInfo::getIndirect(0);
1669  }
1670
1671  // Otherwise this is an AAPCS variant.
1672
1673  if (isEmptyRecord(Context, RetTy, true))
1674    return ABIArgInfo::getIgnore();
1675
1676  // Aggregates <= 4 bytes are returned in r0; other aggregates
1677  // are returned indirectly.
1678  uint64_t Size = Context.getTypeSize(RetTy);
1679  if (Size <= 32) {
1680    // Return in the smallest viable integer type.
1681    if (Size <= 8)
1682      return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
1683    if (Size <= 16)
1684      return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
1685    return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
1686  }
1687
1688  return ABIArgInfo::getIndirect(0);
1689}
1690
1691llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1692                                      CodeGenFunction &CGF) const {
1693  // FIXME: Need to handle alignment
1694  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1695  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
1696
1697  CGBuilderTy &Builder = CGF.Builder;
1698  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1699                                                       "ap");
1700  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1701  llvm::Type *PTy =
1702    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1703  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1704
1705  uint64_t Offset =
1706    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1707  llvm::Value *NextAddr =
1708    Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1709                          llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
1710                      "ap.next");
1711  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1712
1713  return AddrTyped;
1714}
1715
1716ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
1717                                              ASTContext &Context,
1718                                          llvm::LLVMContext &VMContext) const {
1719  if (RetTy->isVoidType()) {
1720    return ABIArgInfo::getIgnore();
1721  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1722    return ABIArgInfo::getIndirect(0);
1723  } else {
1724    return (RetTy->isPromotableIntegerType() ?
1725            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1726  }
1727}
1728
1729// SystemZ ABI Implementation
1730
1731namespace {
1732
1733class SystemZABIInfo : public ABIInfo {
1734  bool isPromotableIntegerType(QualType Ty) const;
1735
1736  ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
1737                                llvm::LLVMContext &VMContext) const;
1738
1739  ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
1740                                  llvm::LLVMContext &VMContext) const;
1741
1742  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1743                          llvm::LLVMContext &VMContext) const {
1744    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1745                                            Context, VMContext);
1746    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1747         it != ie; ++it)
1748      it->info = classifyArgumentType(it->type, Context, VMContext);
1749  }
1750
1751  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1752                                 CodeGenFunction &CGF) const;
1753};
1754
1755class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
1756public:
1757  SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {};
1758};
1759
1760}
1761
1762bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
1763  // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
1764  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
1765    switch (BT->getKind()) {
1766    case BuiltinType::Bool:
1767    case BuiltinType::Char_S:
1768    case BuiltinType::Char_U:
1769    case BuiltinType::SChar:
1770    case BuiltinType::UChar:
1771    case BuiltinType::Short:
1772    case BuiltinType::UShort:
1773    case BuiltinType::Int:
1774    case BuiltinType::UInt:
1775      return true;
1776    default:
1777      return false;
1778    }
1779  return false;
1780}
1781
1782llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1783                                       CodeGenFunction &CGF) const {
1784  // FIXME: Implement
1785  return 0;
1786}
1787
1788
1789ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
1790                                              ASTContext &Context,
1791                                           llvm::LLVMContext &VMContext) const {
1792  if (RetTy->isVoidType()) {
1793    return ABIArgInfo::getIgnore();
1794  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1795    return ABIArgInfo::getIndirect(0);
1796  } else {
1797    return (isPromotableIntegerType(RetTy) ?
1798            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1799  }
1800}
1801
1802ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
1803                                                ASTContext &Context,
1804                                           llvm::LLVMContext &VMContext) const {
1805  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1806    return ABIArgInfo::getIndirect(0);
1807  } else {
1808    return (isPromotableIntegerType(Ty) ?
1809            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1810  }
1811}
1812
1813// MSP430 ABI Implementation
1814
1815namespace {
1816
1817class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
1818public:
1819  MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {};
1820  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1821                           CodeGen::CodeGenModule &M) const;
1822};
1823
1824}
1825
1826void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1827                                                  llvm::GlobalValue *GV,
1828                                             CodeGen::CodeGenModule &M) const {
1829  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1830    if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
1831      // Handle 'interrupt' attribute:
1832      llvm::Function *F = cast<llvm::Function>(GV);
1833
1834      // Step 1: Set ISR calling convention.
1835      F->setCallingConv(llvm::CallingConv::MSP430_INTR);
1836
1837      // Step 2: Add attributes goodness.
1838      F->addFnAttr(llvm::Attribute::NoInline);
1839
1840      // Step 3: Emit ISR vector alias.
1841      unsigned Num = attr->getNumber() + 0xffe0;
1842      new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
1843                            "vector_" +
1844                            llvm::LowercaseString(llvm::utohexstr(Num)),
1845                            GV, &M.getModule());
1846    }
1847  }
1848}
1849
1850const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
1851  if (TheTargetCodeGenInfo)
1852    return *TheTargetCodeGenInfo;
1853
1854  // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
1855  // free it.
1856
1857  const llvm::Triple &Triple(getContext().Target.getTriple());
1858  switch (Triple.getArch()) {
1859  default:
1860    return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
1861
1862  case llvm::Triple::arm:
1863  case llvm::Triple::thumb:
1864    // FIXME: We want to know the float calling convention as well.
1865    if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
1866      return *(TheTargetCodeGenInfo =
1867               new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
1868
1869    return *(TheTargetCodeGenInfo =
1870             new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
1871
1872  case llvm::Triple::pic16:
1873    return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
1874
1875  case llvm::Triple::systemz:
1876    return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
1877
1878  case llvm::Triple::msp430:
1879    return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
1880
1881  case llvm::Triple::x86:
1882    switch (Triple.getOS()) {
1883    case llvm::Triple::Darwin:
1884      return *(TheTargetCodeGenInfo =
1885               new X86_32TargetCodeGenInfo(Context, true, true));
1886    case llvm::Triple::Cygwin:
1887    case llvm::Triple::MinGW32:
1888    case llvm::Triple::MinGW64:
1889    case llvm::Triple::AuroraUX:
1890    case llvm::Triple::DragonFly:
1891    case llvm::Triple::FreeBSD:
1892    case llvm::Triple::OpenBSD:
1893      return *(TheTargetCodeGenInfo =
1894               new X86_32TargetCodeGenInfo(Context, false, true));
1895
1896    default:
1897      return *(TheTargetCodeGenInfo =
1898               new X86_32TargetCodeGenInfo(Context, false, false));
1899    }
1900
1901  case llvm::Triple::x86_64:
1902    return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo());
1903  }
1904}
1905