1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "TargetInfo.h"
16#include "ABIInfo.h"
17#include "CGCXXABI.h"
18#include "CodeGenFunction.h"
19#include "clang/AST/RecordLayout.h"
20#include "clang/CodeGen/CGFunctionInfo.h"
21#include "clang/Frontend/CodeGenOptions.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Type.h"
25#include "llvm/Support/raw_ostream.h"
26
27#include <algorithm>    // std::sort
28
29using namespace clang;
30using namespace CodeGen;
31
32static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
33                               llvm::Value *Array,
34                               llvm::Value *Value,
35                               unsigned FirstIndex,
36                               unsigned LastIndex) {
37  // Alternatively, we could emit this as a loop in the source.
38  for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
39    llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
40    Builder.CreateStore(Value, Cell);
41  }
42}
43
44static bool isAggregateTypeForABI(QualType T) {
45  return !CodeGenFunction::hasScalarEvaluationKind(T) ||
46         T->isMemberFunctionPointerType();
47}
48
49ABIInfo::~ABIInfo() {}
50
51static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
52                                              CGCXXABI &CXXABI) {
53  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
54  if (!RD)
55    return CGCXXABI::RAA_Default;
56  return CXXABI.getRecordArgABI(RD);
57}
58
59static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
60                                              CGCXXABI &CXXABI) {
61  const RecordType *RT = T->getAs<RecordType>();
62  if (!RT)
63    return CGCXXABI::RAA_Default;
64  return getRecordArgABI(RT, CXXABI);
65}
66
67CGCXXABI &ABIInfo::getCXXABI() const {
68  return CGT.getCXXABI();
69}
70
71ASTContext &ABIInfo::getContext() const {
72  return CGT.getContext();
73}
74
75llvm::LLVMContext &ABIInfo::getVMContext() const {
76  return CGT.getLLVMContext();
77}
78
79const llvm::DataLayout &ABIInfo::getDataLayout() const {
80  return CGT.getDataLayout();
81}
82
83const TargetInfo &ABIInfo::getTarget() const {
84  return CGT.getTarget();
85}
86
87void ABIArgInfo::dump() const {
88  raw_ostream &OS = llvm::errs();
89  OS << "(ABIArgInfo Kind=";
90  switch (TheKind) {
91  case Direct:
92    OS << "Direct Type=";
93    if (llvm::Type *Ty = getCoerceToType())
94      Ty->print(OS);
95    else
96      OS << "null";
97    break;
98  case Extend:
99    OS << "Extend";
100    break;
101  case Ignore:
102    OS << "Ignore";
103    break;
104  case InAlloca:
105    OS << "InAlloca Offset=" << getInAllocaFieldIndex();
106    break;
107  case Indirect:
108    OS << "Indirect Align=" << getIndirectAlign()
109       << " ByVal=" << getIndirectByVal()
110       << " Realign=" << getIndirectRealign();
111    break;
112  case Expand:
113    OS << "Expand";
114    break;
115  }
116  OS << ")\n";
117}
118
119TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
120
121// If someone can figure out a general rule for this, that would be great.
122// It's probably just doomed to be platform-dependent, though.
123unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
124  // Verified for:
125  //   x86-64     FreeBSD, Linux, Darwin
126  //   x86-32     FreeBSD, Linux, Darwin
127  //   PowerPC    Linux, Darwin
128  //   ARM        Darwin (*not* EABI)
129  //   AArch64    Linux
130  return 32;
131}
132
133bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
134                                     const FunctionNoProtoType *fnType) const {
135  // The following conventions are known to require this to be false:
136  //   x86_stdcall
137  //   MIPS
138  // For everything else, we just prefer false unless we opt out.
139  return false;
140}
141
142void
143TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
144                                             llvm::SmallString<24> &Opt) const {
145  // This assumes the user is passing a library name like "rt" instead of a
146  // filename like "librt.a/so", and that they don't care whether it's static or
147  // dynamic.
148  Opt = "-l";
149  Opt += Lib;
150}
151
152static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
153
154/// isEmptyField - Return true iff a the field is "empty", that is it
155/// is an unnamed bit-field or an (array of) empty record(s).
156static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
157                         bool AllowArrays) {
158  if (FD->isUnnamedBitfield())
159    return true;
160
161  QualType FT = FD->getType();
162
163  // Constant arrays of empty records count as empty, strip them off.
164  // Constant arrays of zero length always count as empty.
165  if (AllowArrays)
166    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
167      if (AT->getSize() == 0)
168        return true;
169      FT = AT->getElementType();
170    }
171
172  const RecordType *RT = FT->getAs<RecordType>();
173  if (!RT)
174    return false;
175
176  // C++ record fields are never empty, at least in the Itanium ABI.
177  //
178  // FIXME: We should use a predicate for whether this behavior is true in the
179  // current ABI.
180  if (isa<CXXRecordDecl>(RT->getDecl()))
181    return false;
182
183  return isEmptyRecord(Context, FT, AllowArrays);
184}
185
186/// isEmptyRecord - Return true iff a structure contains only empty
187/// fields. Note that a structure with a flexible array member is not
188/// considered empty.
189static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
190  const RecordType *RT = T->getAs<RecordType>();
191  if (!RT)
192    return 0;
193  const RecordDecl *RD = RT->getDecl();
194  if (RD->hasFlexibleArrayMember())
195    return false;
196
197  // If this is a C++ record, check the bases first.
198  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
199    for (const auto &I : CXXRD->bases())
200      if (!isEmptyRecord(Context, I.getType(), true))
201        return false;
202
203  for (const auto *I : RD->fields())
204    if (!isEmptyField(Context, I, AllowArrays))
205      return false;
206  return true;
207}
208
209/// isSingleElementStruct - Determine if a structure is a "single
210/// element struct", i.e. it has exactly one non-empty field or
211/// exactly one field which is itself a single element
212/// struct. Structures with flexible array members are never
213/// considered single element structs.
214///
215/// \return The field declaration for the single non-empty field, if
216/// it exists.
217static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
218  const RecordType *RT = T->getAsStructureType();
219  if (!RT)
220    return nullptr;
221
222  const RecordDecl *RD = RT->getDecl();
223  if (RD->hasFlexibleArrayMember())
224    return nullptr;
225
226  const Type *Found = nullptr;
227
228  // If this is a C++ record, check the bases first.
229  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
230    for (const auto &I : CXXRD->bases()) {
231      // Ignore empty records.
232      if (isEmptyRecord(Context, I.getType(), true))
233        continue;
234
235      // If we already found an element then this isn't a single-element struct.
236      if (Found)
237        return nullptr;
238
239      // If this is non-empty and not a single element struct, the composite
240      // cannot be a single element struct.
241      Found = isSingleElementStruct(I.getType(), Context);
242      if (!Found)
243        return nullptr;
244    }
245  }
246
247  // Check for single element.
248  for (const auto *FD : RD->fields()) {
249    QualType FT = FD->getType();
250
251    // Ignore empty fields.
252    if (isEmptyField(Context, FD, true))
253      continue;
254
255    // If we already found an element then this isn't a single-element
256    // struct.
257    if (Found)
258      return nullptr;
259
260    // Treat single element arrays as the element.
261    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
262      if (AT->getSize().getZExtValue() != 1)
263        break;
264      FT = AT->getElementType();
265    }
266
267    if (!isAggregateTypeForABI(FT)) {
268      Found = FT.getTypePtr();
269    } else {
270      Found = isSingleElementStruct(FT, Context);
271      if (!Found)
272        return nullptr;
273    }
274  }
275
276  // We don't consider a struct a single-element struct if it has
277  // padding beyond the element type.
278  if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
279    return nullptr;
280
281  return Found;
282}
283
284static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
285  // Treat complex types as the element type.
286  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
287    Ty = CTy->getElementType();
288
289  // Check for a type which we know has a simple scalar argument-passing
290  // convention without any padding.  (We're specifically looking for 32
291  // and 64-bit integer and integer-equivalents, float, and double.)
292  if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
293      !Ty->isEnumeralType() && !Ty->isBlockPointerType())
294    return false;
295
296  uint64_t Size = Context.getTypeSize(Ty);
297  return Size == 32 || Size == 64;
298}
299
300/// canExpandIndirectArgument - Test whether an argument type which is to be
301/// passed indirectly (on the stack) would have the equivalent layout if it was
302/// expanded into separate arguments. If so, we prefer to do the latter to avoid
303/// inhibiting optimizations.
304///
305// FIXME: This predicate is missing many cases, currently it just follows
306// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
307// should probably make this smarter, or better yet make the LLVM backend
308// capable of handling it.
309static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
310  // We can only expand structure types.
311  const RecordType *RT = Ty->getAs<RecordType>();
312  if (!RT)
313    return false;
314
315  // We can only expand (C) structures.
316  //
317  // FIXME: This needs to be generalized to handle classes as well.
318  const RecordDecl *RD = RT->getDecl();
319  if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
320    return false;
321
322  uint64_t Size = 0;
323
324  for (const auto *FD : RD->fields()) {
325    if (!is32Or64BitBasicType(FD->getType(), Context))
326      return false;
327
328    // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
329    // how to expand them yet, and the predicate for telling if a bitfield still
330    // counts as "basic" is more complicated than what we were doing previously.
331    if (FD->isBitField())
332      return false;
333
334    Size += Context.getTypeSize(FD->getType());
335  }
336
337  // Make sure there are not any holes in the struct.
338  if (Size != Context.getTypeSize(Ty))
339    return false;
340
341  return true;
342}
343
344namespace {
345/// DefaultABIInfo - The default implementation for ABI specific
346/// details. This implementation provides information which results in
347/// self-consistent and sensible LLVM IR generation, but does not
348/// conform to any particular ABI.
349class DefaultABIInfo : public ABIInfo {
350public:
351  DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
352
353  ABIArgInfo classifyReturnType(QualType RetTy) const;
354  ABIArgInfo classifyArgumentType(QualType RetTy) const;
355
356  void computeInfo(CGFunctionInfo &FI) const override {
357    if (!getCXXABI().classifyReturnType(FI))
358      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
359    for (auto &I : FI.arguments())
360      I.info = classifyArgumentType(I.type);
361  }
362
363  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
364                         CodeGenFunction &CGF) const override;
365};
366
367class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
368public:
369  DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
370    : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
371};
372
373llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
374                                       CodeGenFunction &CGF) const {
375  return nullptr;
376}
377
378ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
379  if (isAggregateTypeForABI(Ty))
380    return ABIArgInfo::getIndirect(0);
381
382  // Treat an enum type as its underlying type.
383  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
384    Ty = EnumTy->getDecl()->getIntegerType();
385
386  return (Ty->isPromotableIntegerType() ?
387          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
388}
389
390ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
391  if (RetTy->isVoidType())
392    return ABIArgInfo::getIgnore();
393
394  if (isAggregateTypeForABI(RetTy))
395    return ABIArgInfo::getIndirect(0);
396
397  // Treat an enum type as its underlying type.
398  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
399    RetTy = EnumTy->getDecl()->getIntegerType();
400
401  return (RetTy->isPromotableIntegerType() ?
402          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
403}
404
405//===----------------------------------------------------------------------===//
406// le32/PNaCl bitcode ABI Implementation
407//
408// This is a simplified version of the x86_32 ABI.  Arguments and return values
409// are always passed on the stack.
410//===----------------------------------------------------------------------===//
411
412class PNaClABIInfo : public ABIInfo {
413 public:
414  PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
415
416  ABIArgInfo classifyReturnType(QualType RetTy) const;
417  ABIArgInfo classifyArgumentType(QualType RetTy) const;
418
419  void computeInfo(CGFunctionInfo &FI) const override;
420  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
421                         CodeGenFunction &CGF) const override;
422};
423
424class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
425 public:
426  PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
427    : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
428};
429
430void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
431  if (!getCXXABI().classifyReturnType(FI))
432    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
433
434  for (auto &I : FI.arguments())
435    I.info = classifyArgumentType(I.type);
436}
437
438llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
439                                       CodeGenFunction &CGF) const {
440  return nullptr;
441}
442
443/// \brief Classify argument of given type \p Ty.
444ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
445  if (isAggregateTypeForABI(Ty)) {
446    if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
447      return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
448    return ABIArgInfo::getIndirect(0);
449  } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
450    // Treat an enum type as its underlying type.
451    Ty = EnumTy->getDecl()->getIntegerType();
452  } else if (Ty->isFloatingType()) {
453    // Floating-point types don't go inreg.
454    return ABIArgInfo::getDirect();
455  }
456
457  return (Ty->isPromotableIntegerType() ?
458          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
459}
460
461ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
462  if (RetTy->isVoidType())
463    return ABIArgInfo::getIgnore();
464
465  // In the PNaCl ABI we always return records/structures on the stack.
466  if (isAggregateTypeForABI(RetTy))
467    return ABIArgInfo::getIndirect(0);
468
469  // Treat an enum type as its underlying type.
470  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
471    RetTy = EnumTy->getDecl()->getIntegerType();
472
473  return (RetTy->isPromotableIntegerType() ?
474          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
475}
476
477/// IsX86_MMXType - Return true if this is an MMX type.
478bool IsX86_MMXType(llvm::Type *IRType) {
479  // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
480  return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
481    cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
482    IRType->getScalarSizeInBits() != 64;
483}
484
485static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
486                                          StringRef Constraint,
487                                          llvm::Type* Ty) {
488  if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
489    if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
490      // Invalid MMX constraint
491      return nullptr;
492    }
493
494    return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
495  }
496
497  // No operation needed
498  return Ty;
499}
500
501//===----------------------------------------------------------------------===//
502// X86-32 ABI Implementation
503//===----------------------------------------------------------------------===//
504
505/// \brief Similar to llvm::CCState, but for Clang.
506struct CCState {
507  CCState(unsigned CC) : CC(CC), FreeRegs(0) {}
508
509  unsigned CC;
510  unsigned FreeRegs;
511  unsigned StackOffset;
512  bool UseInAlloca;
513};
514
515/// X86_32ABIInfo - The X86-32 ABI information.
516class X86_32ABIInfo : public ABIInfo {
517  enum Class {
518    Integer,
519    Float
520  };
521
522  static const unsigned MinABIStackAlignInBytes = 4;
523
524  bool IsDarwinVectorABI;
525  bool IsSmallStructInRegABI;
526  bool IsWin32StructABI;
527  unsigned DefaultNumRegisterParameters;
528
529  static bool isRegisterSize(unsigned Size) {
530    return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
531  }
532
533  bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
534
535  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
536  /// such that the argument will be passed in memory.
537  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
538
539  ABIArgInfo getIndirectReturnResult(CCState &State) const;
540
541  /// \brief Return the alignment to use for the given type on the stack.
542  unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
543
544  Class classify(QualType Ty) const;
545  ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
546  ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
547  bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
548
549  /// \brief Rewrite the function info so that all memory arguments use
550  /// inalloca.
551  void rewriteWithInAlloca(CGFunctionInfo &FI) const;
552
553  void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
554                           unsigned &StackOffset, ABIArgInfo &Info,
555                           QualType Type) const;
556
557public:
558
559  void computeInfo(CGFunctionInfo &FI) const override;
560  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
561                         CodeGenFunction &CGF) const override;
562
563  X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
564                unsigned r)
565    : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
566      IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
567};
568
569class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
570public:
571  X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
572      bool d, bool p, bool w, unsigned r)
573    :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
574
575  static bool isStructReturnInRegABI(
576      const llvm::Triple &Triple, const CodeGenOptions &Opts);
577
578  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
579                           CodeGen::CodeGenModule &CGM) const override;
580
581  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
582    // Darwin uses different dwarf register numbers for EH.
583    if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
584    return 4;
585  }
586
587  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
588                               llvm::Value *Address) const override;
589
590  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
591                                  StringRef Constraint,
592                                  llvm::Type* Ty) const override {
593    return X86AdjustInlineAsmType(CGF, Constraint, Ty);
594  }
595
596  llvm::Constant *
597  getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
598    unsigned Sig = (0xeb << 0) |  // jmp rel8
599                   (0x06 << 8) |  //           .+0x08
600                   ('F' << 16) |
601                   ('T' << 24);
602    return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
603  }
604
605};
606
607}
608
609/// shouldReturnTypeInRegister - Determine if the given type should be
610/// passed in a register (for the Darwin ABI).
611bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
612                                               ASTContext &Context) const {
613  uint64_t Size = Context.getTypeSize(Ty);
614
615  // Type must be register sized.
616  if (!isRegisterSize(Size))
617    return false;
618
619  if (Ty->isVectorType()) {
620    // 64- and 128- bit vectors inside structures are not returned in
621    // registers.
622    if (Size == 64 || Size == 128)
623      return false;
624
625    return true;
626  }
627
628  // If this is a builtin, pointer, enum, complex type, member pointer, or
629  // member function pointer it is ok.
630  if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
631      Ty->isAnyComplexType() || Ty->isEnumeralType() ||
632      Ty->isBlockPointerType() || Ty->isMemberPointerType())
633    return true;
634
635  // Arrays are treated like records.
636  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
637    return shouldReturnTypeInRegister(AT->getElementType(), Context);
638
639  // Otherwise, it must be a record type.
640  const RecordType *RT = Ty->getAs<RecordType>();
641  if (!RT) return false;
642
643  // FIXME: Traverse bases here too.
644
645  // Structure types are passed in register if all fields would be
646  // passed in a register.
647  for (const auto *FD : RT->getDecl()->fields()) {
648    // Empty fields are ignored.
649    if (isEmptyField(Context, FD, true))
650      continue;
651
652    // Check fields recursively.
653    if (!shouldReturnTypeInRegister(FD->getType(), Context))
654      return false;
655  }
656  return true;
657}
658
659ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
660  // If the return value is indirect, then the hidden argument is consuming one
661  // integer register.
662  if (State.FreeRegs) {
663    --State.FreeRegs;
664    return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
665  }
666  return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
667}
668
669ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const {
670  if (RetTy->isVoidType())
671    return ABIArgInfo::getIgnore();
672
673  if (const VectorType *VT = RetTy->getAs<VectorType>()) {
674    // On Darwin, some vectors are returned in registers.
675    if (IsDarwinVectorABI) {
676      uint64_t Size = getContext().getTypeSize(RetTy);
677
678      // 128-bit vectors are a special case; they are returned in
679      // registers and we need to make sure to pick a type the LLVM
680      // backend will like.
681      if (Size == 128)
682        return ABIArgInfo::getDirect(llvm::VectorType::get(
683                  llvm::Type::getInt64Ty(getVMContext()), 2));
684
685      // Always return in register if it fits in a general purpose
686      // register, or if it is 64 bits and has a single element.
687      if ((Size == 8 || Size == 16 || Size == 32) ||
688          (Size == 64 && VT->getNumElements() == 1))
689        return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
690                                                            Size));
691
692      return getIndirectReturnResult(State);
693    }
694
695    return ABIArgInfo::getDirect();
696  }
697
698  if (isAggregateTypeForABI(RetTy)) {
699    if (const RecordType *RT = RetTy->getAs<RecordType>()) {
700      // Structures with flexible arrays are always indirect.
701      if (RT->getDecl()->hasFlexibleArrayMember())
702        return getIndirectReturnResult(State);
703    }
704
705    // If specified, structs and unions are always indirect.
706    if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
707      return getIndirectReturnResult(State);
708
709    // Small structures which are register sized are generally returned
710    // in a register.
711    if (shouldReturnTypeInRegister(RetTy, getContext())) {
712      uint64_t Size = getContext().getTypeSize(RetTy);
713
714      // As a special-case, if the struct is a "single-element" struct, and
715      // the field is of type "float" or "double", return it in a
716      // floating-point register. (MSVC does not apply this special case.)
717      // We apply a similar transformation for pointer types to improve the
718      // quality of the generated IR.
719      if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
720        if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
721            || SeltTy->hasPointerRepresentation())
722          return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
723
724      // FIXME: We should be able to narrow this integer in cases with dead
725      // padding.
726      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
727    }
728
729    return getIndirectReturnResult(State);
730  }
731
732  // Treat an enum type as its underlying type.
733  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
734    RetTy = EnumTy->getDecl()->getIntegerType();
735
736  return (RetTy->isPromotableIntegerType() ?
737          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
738}
739
740static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
741  return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
742}
743
744static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
745  const RecordType *RT = Ty->getAs<RecordType>();
746  if (!RT)
747    return 0;
748  const RecordDecl *RD = RT->getDecl();
749
750  // If this is a C++ record, check the bases first.
751  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
752    for (const auto &I : CXXRD->bases())
753      if (!isRecordWithSSEVectorType(Context, I.getType()))
754        return false;
755
756  for (const auto *i : RD->fields()) {
757    QualType FT = i->getType();
758
759    if (isSSEVectorType(Context, FT))
760      return true;
761
762    if (isRecordWithSSEVectorType(Context, FT))
763      return true;
764  }
765
766  return false;
767}
768
769unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
770                                                 unsigned Align) const {
771  // Otherwise, if the alignment is less than or equal to the minimum ABI
772  // alignment, just use the default; the backend will handle this.
773  if (Align <= MinABIStackAlignInBytes)
774    return 0; // Use default alignment.
775
776  // On non-Darwin, the stack type alignment is always 4.
777  if (!IsDarwinVectorABI) {
778    // Set explicit alignment, since we may need to realign the top.
779    return MinABIStackAlignInBytes;
780  }
781
782  // Otherwise, if the type contains an SSE vector type, the alignment is 16.
783  if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
784                      isRecordWithSSEVectorType(getContext(), Ty)))
785    return 16;
786
787  return MinABIStackAlignInBytes;
788}
789
790ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
791                                            CCState &State) const {
792  if (!ByVal) {
793    if (State.FreeRegs) {
794      --State.FreeRegs; // Non-byval indirects just use one pointer.
795      return ABIArgInfo::getIndirectInReg(0, false);
796    }
797    return ABIArgInfo::getIndirect(0, false);
798  }
799
800  // Compute the byval alignment.
801  unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
802  unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
803  if (StackAlign == 0)
804    return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
805
806  // If the stack alignment is less than the type alignment, realign the
807  // argument.
808  bool Realign = TypeAlign > StackAlign;
809  return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
810}
811
812X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
813  const Type *T = isSingleElementStruct(Ty, getContext());
814  if (!T)
815    T = Ty.getTypePtr();
816
817  if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
818    BuiltinType::Kind K = BT->getKind();
819    if (K == BuiltinType::Float || K == BuiltinType::Double)
820      return Float;
821  }
822  return Integer;
823}
824
825bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
826                                   bool &NeedsPadding) const {
827  NeedsPadding = false;
828  Class C = classify(Ty);
829  if (C == Float)
830    return false;
831
832  unsigned Size = getContext().getTypeSize(Ty);
833  unsigned SizeInRegs = (Size + 31) / 32;
834
835  if (SizeInRegs == 0)
836    return false;
837
838  if (SizeInRegs > State.FreeRegs) {
839    State.FreeRegs = 0;
840    return false;
841  }
842
843  State.FreeRegs -= SizeInRegs;
844
845  if (State.CC == llvm::CallingConv::X86_FastCall) {
846    if (Size > 32)
847      return false;
848
849    if (Ty->isIntegralOrEnumerationType())
850      return true;
851
852    if (Ty->isPointerType())
853      return true;
854
855    if (Ty->isReferenceType())
856      return true;
857
858    if (State.FreeRegs)
859      NeedsPadding = true;
860
861    return false;
862  }
863
864  return true;
865}
866
867ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
868                                               CCState &State) const {
869  // FIXME: Set alignment on indirect arguments.
870  if (isAggregateTypeForABI(Ty)) {
871    if (const RecordType *RT = Ty->getAs<RecordType>()) {
872      // Check with the C++ ABI first.
873      CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
874      if (RAA == CGCXXABI::RAA_Indirect) {
875        return getIndirectResult(Ty, false, State);
876      } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
877        // The field index doesn't matter, we'll fix it up later.
878        return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
879      }
880
881      // Structs are always byval on win32, regardless of what they contain.
882      if (IsWin32StructABI)
883        return getIndirectResult(Ty, true, State);
884
885      // Structures with flexible arrays are always indirect.
886      if (RT->getDecl()->hasFlexibleArrayMember())
887        return getIndirectResult(Ty, true, State);
888    }
889
890    // Ignore empty structs/unions.
891    if (isEmptyRecord(getContext(), Ty, true))
892      return ABIArgInfo::getIgnore();
893
894    llvm::LLVMContext &LLVMContext = getVMContext();
895    llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
896    bool NeedsPadding;
897    if (shouldUseInReg(Ty, State, NeedsPadding)) {
898      unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
899      SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
900      llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
901      return ABIArgInfo::getDirectInReg(Result);
902    }
903    llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
904
905    // Expand small (<= 128-bit) record types when we know that the stack layout
906    // of those arguments will match the struct. This is important because the
907    // LLVM backend isn't smart enough to remove byval, which inhibits many
908    // optimizations.
909    if (getContext().getTypeSize(Ty) <= 4*32 &&
910        canExpandIndirectArgument(Ty, getContext()))
911      return ABIArgInfo::getExpandWithPadding(
912          State.CC == llvm::CallingConv::X86_FastCall, PaddingType);
913
914    return getIndirectResult(Ty, true, State);
915  }
916
917  if (const VectorType *VT = Ty->getAs<VectorType>()) {
918    // On Darwin, some vectors are passed in memory, we handle this by passing
919    // it as an i8/i16/i32/i64.
920    if (IsDarwinVectorABI) {
921      uint64_t Size = getContext().getTypeSize(Ty);
922      if ((Size == 8 || Size == 16 || Size == 32) ||
923          (Size == 64 && VT->getNumElements() == 1))
924        return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
925                                                            Size));
926    }
927
928    if (IsX86_MMXType(CGT.ConvertType(Ty)))
929      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
930
931    return ABIArgInfo::getDirect();
932  }
933
934
935  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
936    Ty = EnumTy->getDecl()->getIntegerType();
937
938  bool NeedsPadding;
939  bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
940
941  if (Ty->isPromotableIntegerType()) {
942    if (InReg)
943      return ABIArgInfo::getExtendInReg();
944    return ABIArgInfo::getExtend();
945  }
946  if (InReg)
947    return ABIArgInfo::getDirectInReg();
948  return ABIArgInfo::getDirect();
949}
950
951void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
952  CCState State(FI.getCallingConvention());
953  if (State.CC == llvm::CallingConv::X86_FastCall)
954    State.FreeRegs = 2;
955  else if (FI.getHasRegParm())
956    State.FreeRegs = FI.getRegParm();
957  else
958    State.FreeRegs = DefaultNumRegisterParameters;
959
960  if (!getCXXABI().classifyReturnType(FI)) {
961    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
962  } else if (FI.getReturnInfo().isIndirect()) {
963    // The C++ ABI is not aware of register usage, so we have to check if the
964    // return value was sret and put it in a register ourselves if appropriate.
965    if (State.FreeRegs) {
966      --State.FreeRegs;  // The sret parameter consumes a register.
967      FI.getReturnInfo().setInReg(true);
968    }
969  }
970
971  bool UsedInAlloca = false;
972  for (auto &I : FI.arguments()) {
973    I.info = classifyArgumentType(I.type, State);
974    UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
975  }
976
977  // If we needed to use inalloca for any argument, do a second pass and rewrite
978  // all the memory arguments to use inalloca.
979  if (UsedInAlloca)
980    rewriteWithInAlloca(FI);
981}
982
983void
984X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
985                                   unsigned &StackOffset,
986                                   ABIArgInfo &Info, QualType Type) const {
987  assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
988  Info = ABIArgInfo::getInAlloca(FrameFields.size());
989  FrameFields.push_back(CGT.ConvertTypeForMem(Type));
990  StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
991
992  // Insert padding bytes to respect alignment.  For x86_32, each argument is 4
993  // byte aligned.
994  if (StackOffset % 4U) {
995    unsigned OldOffset = StackOffset;
996    StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
997    unsigned NumBytes = StackOffset - OldOffset;
998    assert(NumBytes);
999    llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1000    Ty = llvm::ArrayType::get(Ty, NumBytes);
1001    FrameFields.push_back(Ty);
1002  }
1003}
1004
1005void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1006  assert(IsWin32StructABI && "inalloca only supported on win32");
1007
1008  // Build a packed struct type for all of the arguments in memory.
1009  SmallVector<llvm::Type *, 6> FrameFields;
1010
1011  unsigned StackOffset = 0;
1012
1013  // Put the sret parameter into the inalloca struct if it's in memory.
1014  ABIArgInfo &Ret = FI.getReturnInfo();
1015  if (Ret.isIndirect() && !Ret.getInReg()) {
1016    CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1017    addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1018    // On Windows, the hidden sret parameter is always returned in eax.
1019    Ret.setInAllocaSRet(IsWin32StructABI);
1020  }
1021
1022  // Skip the 'this' parameter in ecx.
1023  CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1024  if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall)
1025    ++I;
1026
1027  // Put arguments passed in memory into the struct.
1028  for (; I != E; ++I) {
1029
1030    // Leave ignored and inreg arguments alone.
1031    switch (I->info.getKind()) {
1032    case ABIArgInfo::Indirect:
1033      assert(I->info.getIndirectByVal());
1034      break;
1035    case ABIArgInfo::Ignore:
1036      continue;
1037    case ABIArgInfo::Direct:
1038    case ABIArgInfo::Extend:
1039      if (I->info.getInReg())
1040        continue;
1041      break;
1042    default:
1043      break;
1044    }
1045
1046    addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1047  }
1048
1049  FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1050                                        /*isPacked=*/true));
1051}
1052
1053llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1054                                      CodeGenFunction &CGF) const {
1055  llvm::Type *BPP = CGF.Int8PtrPtrTy;
1056
1057  CGBuilderTy &Builder = CGF.Builder;
1058  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1059                                                       "ap");
1060  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1061
1062  // Compute if the address needs to be aligned
1063  unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1064  Align = getTypeStackAlignInBytes(Ty, Align);
1065  Align = std::max(Align, 4U);
1066  if (Align > 4) {
1067    // addr = (addr + align - 1) & -align;
1068    llvm::Value *Offset =
1069      llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1070    Addr = CGF.Builder.CreateGEP(Addr, Offset);
1071    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1072                                                    CGF.Int32Ty);
1073    llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1074    Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1075                                      Addr->getType(),
1076                                      "ap.cur.aligned");
1077  }
1078
1079  llvm::Type *PTy =
1080    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1081  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1082
1083  uint64_t Offset =
1084    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1085  llvm::Value *NextAddr =
1086    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1087                      "ap.next");
1088  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1089
1090  return AddrTyped;
1091}
1092
1093bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1094    const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1095  assert(Triple.getArch() == llvm::Triple::x86);
1096
1097  switch (Opts.getStructReturnConvention()) {
1098  case CodeGenOptions::SRCK_Default:
1099    break;
1100  case CodeGenOptions::SRCK_OnStack:  // -fpcc-struct-return
1101    return false;
1102  case CodeGenOptions::SRCK_InRegs:  // -freg-struct-return
1103    return true;
1104  }
1105
1106  if (Triple.isOSDarwin())
1107    return true;
1108
1109  switch (Triple.getOS()) {
1110  case llvm::Triple::AuroraUX:
1111  case llvm::Triple::DragonFly:
1112  case llvm::Triple::FreeBSD:
1113  case llvm::Triple::OpenBSD:
1114  case llvm::Triple::Bitrig:
1115    return true;
1116  case llvm::Triple::Win32:
1117    switch (Triple.getEnvironment()) {
1118    case llvm::Triple::UnknownEnvironment:
1119    case llvm::Triple::Cygnus:
1120    case llvm::Triple::GNU:
1121    case llvm::Triple::MSVC:
1122      return true;
1123    default:
1124      return false;
1125    }
1126  default:
1127    return false;
1128  }
1129}
1130
1131void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1132                                                  llvm::GlobalValue *GV,
1133                                            CodeGen::CodeGenModule &CGM) const {
1134  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1135    if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1136      // Get the LLVM function.
1137      llvm::Function *Fn = cast<llvm::Function>(GV);
1138
1139      // Now add the 'alignstack' attribute with a value of 16.
1140      llvm::AttrBuilder B;
1141      B.addStackAlignmentAttr(16);
1142      Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1143                      llvm::AttributeSet::get(CGM.getLLVMContext(),
1144                                              llvm::AttributeSet::FunctionIndex,
1145                                              B));
1146    }
1147  }
1148}
1149
1150bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1151                                               CodeGen::CodeGenFunction &CGF,
1152                                               llvm::Value *Address) const {
1153  CodeGen::CGBuilderTy &Builder = CGF.Builder;
1154
1155  llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1156
1157  // 0-7 are the eight integer registers;  the order is different
1158  //   on Darwin (for EH), but the range is the same.
1159  // 8 is %eip.
1160  AssignToArrayRange(Builder, Address, Four8, 0, 8);
1161
1162  if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1163    // 12-16 are st(0..4).  Not sure why we stop at 4.
1164    // These have size 16, which is sizeof(long double) on
1165    // platforms with 8-byte alignment for that type.
1166    llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1167    AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1168
1169  } else {
1170    // 9 is %eflags, which doesn't get a size on Darwin for some
1171    // reason.
1172    Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1173
1174    // 11-16 are st(0..5).  Not sure why we stop at 5.
1175    // These have size 12, which is sizeof(long double) on
1176    // platforms with 4-byte alignment for that type.
1177    llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1178    AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1179  }
1180
1181  return false;
1182}
1183
1184//===----------------------------------------------------------------------===//
1185// X86-64 ABI Implementation
1186//===----------------------------------------------------------------------===//
1187
1188
1189namespace {
1190/// X86_64ABIInfo - The X86_64 ABI information.
1191class X86_64ABIInfo : public ABIInfo {
1192  enum Class {
1193    Integer = 0,
1194    SSE,
1195    SSEUp,
1196    X87,
1197    X87Up,
1198    ComplexX87,
1199    NoClass,
1200    Memory
1201  };
1202
1203  /// merge - Implement the X86_64 ABI merging algorithm.
1204  ///
1205  /// Merge an accumulating classification \arg Accum with a field
1206  /// classification \arg Field.
1207  ///
1208  /// \param Accum - The accumulating classification. This should
1209  /// always be either NoClass or the result of a previous merge
1210  /// call. In addition, this should never be Memory (the caller
1211  /// should just return Memory for the aggregate).
1212  static Class merge(Class Accum, Class Field);
1213
1214  /// postMerge - Implement the X86_64 ABI post merging algorithm.
1215  ///
1216  /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1217  /// final MEMORY or SSE classes when necessary.
1218  ///
1219  /// \param AggregateSize - The size of the current aggregate in
1220  /// the classification process.
1221  ///
1222  /// \param Lo - The classification for the parts of the type
1223  /// residing in the low word of the containing object.
1224  ///
1225  /// \param Hi - The classification for the parts of the type
1226  /// residing in the higher words of the containing object.
1227  ///
1228  void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1229
1230  /// classify - Determine the x86_64 register classes in which the
1231  /// given type T should be passed.
1232  ///
1233  /// \param Lo - The classification for the parts of the type
1234  /// residing in the low word of the containing object.
1235  ///
1236  /// \param Hi - The classification for the parts of the type
1237  /// residing in the high word of the containing object.
1238  ///
1239  /// \param OffsetBase - The bit offset of this type in the
1240  /// containing object.  Some parameters are classified different
1241  /// depending on whether they straddle an eightbyte boundary.
1242  ///
1243  /// \param isNamedArg - Whether the argument in question is a "named"
1244  /// argument, as used in AMD64-ABI 3.5.7.
1245  ///
1246  /// If a word is unused its result will be NoClass; if a type should
1247  /// be passed in Memory then at least the classification of \arg Lo
1248  /// will be Memory.
1249  ///
1250  /// The \arg Lo class will be NoClass iff the argument is ignored.
1251  ///
1252  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1253  /// also be ComplexX87.
1254  void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1255                bool isNamedArg) const;
1256
1257  llvm::Type *GetByteVectorType(QualType Ty) const;
1258  llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1259                                 unsigned IROffset, QualType SourceTy,
1260                                 unsigned SourceOffset) const;
1261  llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1262                                     unsigned IROffset, QualType SourceTy,
1263                                     unsigned SourceOffset) const;
1264
1265  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1266  /// such that the argument will be returned in memory.
1267  ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1268
1269  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1270  /// such that the argument will be passed in memory.
1271  ///
1272  /// \param freeIntRegs - The number of free integer registers remaining
1273  /// available.
1274  ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1275
1276  ABIArgInfo classifyReturnType(QualType RetTy) const;
1277
1278  ABIArgInfo classifyArgumentType(QualType Ty,
1279                                  unsigned freeIntRegs,
1280                                  unsigned &neededInt,
1281                                  unsigned &neededSSE,
1282                                  bool isNamedArg) const;
1283
1284  bool IsIllegalVectorType(QualType Ty) const;
1285
1286  /// The 0.98 ABI revision clarified a lot of ambiguities,
1287  /// unfortunately in ways that were not always consistent with
1288  /// certain previous compilers.  In particular, platforms which
1289  /// required strict binary compatibility with older versions of GCC
1290  /// may need to exempt themselves.
1291  bool honorsRevision0_98() const {
1292    return !getTarget().getTriple().isOSDarwin();
1293  }
1294
1295  bool HasAVX;
1296  // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1297  // 64-bit hardware.
1298  bool Has64BitPointers;
1299
1300public:
1301  X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1302      ABIInfo(CGT), HasAVX(hasavx),
1303      Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1304  }
1305
1306  bool isPassedUsingAVXType(QualType type) const {
1307    unsigned neededInt, neededSSE;
1308    // The freeIntRegs argument doesn't matter here.
1309    ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1310                                           /*isNamedArg*/true);
1311    if (info.isDirect()) {
1312      llvm::Type *ty = info.getCoerceToType();
1313      if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1314        return (vectorTy->getBitWidth() > 128);
1315    }
1316    return false;
1317  }
1318
1319  void computeInfo(CGFunctionInfo &FI) const override;
1320
1321  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1322                         CodeGenFunction &CGF) const override;
1323};
1324
1325/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1326class WinX86_64ABIInfo : public ABIInfo {
1327
1328  ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
1329
1330public:
1331  WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1332
1333  void computeInfo(CGFunctionInfo &FI) const override;
1334
1335  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1336                         CodeGenFunction &CGF) const override;
1337};
1338
1339class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1340public:
1341  X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1342      : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
1343
1344  const X86_64ABIInfo &getABIInfo() const {
1345    return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1346  }
1347
1348  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1349    return 7;
1350  }
1351
1352  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1353                               llvm::Value *Address) const override {
1354    llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1355
1356    // 0-15 are the 16 integer registers.
1357    // 16 is %rip.
1358    AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1359    return false;
1360  }
1361
1362  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1363                                  StringRef Constraint,
1364                                  llvm::Type* Ty) const override {
1365    return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1366  }
1367
1368  bool isNoProtoCallVariadic(const CallArgList &args,
1369                             const FunctionNoProtoType *fnType) const override {
1370    // The default CC on x86-64 sets %al to the number of SSA
1371    // registers used, and GCC sets this when calling an unprototyped
1372    // function, so we override the default behavior.  However, don't do
1373    // that when AVX types are involved: the ABI explicitly states it is
1374    // undefined, and it doesn't work in practice because of how the ABI
1375    // defines varargs anyway.
1376    if (fnType->getCallConv() == CC_C) {
1377      bool HasAVXType = false;
1378      for (CallArgList::const_iterator
1379             it = args.begin(), ie = args.end(); it != ie; ++it) {
1380        if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1381          HasAVXType = true;
1382          break;
1383        }
1384      }
1385
1386      if (!HasAVXType)
1387        return true;
1388    }
1389
1390    return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1391  }
1392
1393  llvm::Constant *
1394  getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1395    unsigned Sig = (0xeb << 0) |  // jmp rel8
1396                   (0x0a << 8) |  //           .+0x0c
1397                   ('F' << 16) |
1398                   ('T' << 24);
1399    return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1400  }
1401
1402};
1403
1404static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1405  // If the argument does not end in .lib, automatically add the suffix. This
1406  // matches the behavior of MSVC.
1407  std::string ArgStr = Lib;
1408  if (!Lib.endswith_lower(".lib"))
1409    ArgStr += ".lib";
1410  return ArgStr;
1411}
1412
1413class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1414public:
1415  WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1416        bool d, bool p, bool w, unsigned RegParms)
1417    : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1418
1419  void getDependentLibraryOption(llvm::StringRef Lib,
1420                                 llvm::SmallString<24> &Opt) const override {
1421    Opt = "/DEFAULTLIB:";
1422    Opt += qualifyWindowsLibrary(Lib);
1423  }
1424
1425  void getDetectMismatchOption(llvm::StringRef Name,
1426                               llvm::StringRef Value,
1427                               llvm::SmallString<32> &Opt) const override {
1428    Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1429  }
1430};
1431
1432class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1433public:
1434  WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1435    : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1436
1437  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1438    return 7;
1439  }
1440
1441  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1442                               llvm::Value *Address) const override {
1443    llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1444
1445    // 0-15 are the 16 integer registers.
1446    // 16 is %rip.
1447    AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1448    return false;
1449  }
1450
1451  void getDependentLibraryOption(llvm::StringRef Lib,
1452                                 llvm::SmallString<24> &Opt) const override {
1453    Opt = "/DEFAULTLIB:";
1454    Opt += qualifyWindowsLibrary(Lib);
1455  }
1456
1457  void getDetectMismatchOption(llvm::StringRef Name,
1458                               llvm::StringRef Value,
1459                               llvm::SmallString<32> &Opt) const override {
1460    Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1461  }
1462};
1463
1464}
1465
1466void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1467                              Class &Hi) const {
1468  // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1469  //
1470  // (a) If one of the classes is Memory, the whole argument is passed in
1471  //     memory.
1472  //
1473  // (b) If X87UP is not preceded by X87, the whole argument is passed in
1474  //     memory.
1475  //
1476  // (c) If the size of the aggregate exceeds two eightbytes and the first
1477  //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1478  //     argument is passed in memory. NOTE: This is necessary to keep the
1479  //     ABI working for processors that don't support the __m256 type.
1480  //
1481  // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1482  //
1483  // Some of these are enforced by the merging logic.  Others can arise
1484  // only with unions; for example:
1485  //   union { _Complex double; unsigned; }
1486  //
1487  // Note that clauses (b) and (c) were added in 0.98.
1488  //
1489  if (Hi == Memory)
1490    Lo = Memory;
1491  if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1492    Lo = Memory;
1493  if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1494    Lo = Memory;
1495  if (Hi == SSEUp && Lo != SSE)
1496    Hi = SSE;
1497}
1498
1499X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1500  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1501  // classified recursively so that always two fields are
1502  // considered. The resulting class is calculated according to
1503  // the classes of the fields in the eightbyte:
1504  //
1505  // (a) If both classes are equal, this is the resulting class.
1506  //
1507  // (b) If one of the classes is NO_CLASS, the resulting class is
1508  // the other class.
1509  //
1510  // (c) If one of the classes is MEMORY, the result is the MEMORY
1511  // class.
1512  //
1513  // (d) If one of the classes is INTEGER, the result is the
1514  // INTEGER.
1515  //
1516  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1517  // MEMORY is used as class.
1518  //
1519  // (f) Otherwise class SSE is used.
1520
1521  // Accum should never be memory (we should have returned) or
1522  // ComplexX87 (because this cannot be passed in a structure).
1523  assert((Accum != Memory && Accum != ComplexX87) &&
1524         "Invalid accumulated classification during merge.");
1525  if (Accum == Field || Field == NoClass)
1526    return Accum;
1527  if (Field == Memory)
1528    return Memory;
1529  if (Accum == NoClass)
1530    return Field;
1531  if (Accum == Integer || Field == Integer)
1532    return Integer;
1533  if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1534      Accum == X87 || Accum == X87Up)
1535    return Memory;
1536  return SSE;
1537}
1538
1539void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1540                             Class &Lo, Class &Hi, bool isNamedArg) const {
1541  // FIXME: This code can be simplified by introducing a simple value class for
1542  // Class pairs with appropriate constructor methods for the various
1543  // situations.
1544
1545  // FIXME: Some of the split computations are wrong; unaligned vectors
1546  // shouldn't be passed in registers for example, so there is no chance they
1547  // can straddle an eightbyte. Verify & simplify.
1548
1549  Lo = Hi = NoClass;
1550
1551  Class &Current = OffsetBase < 64 ? Lo : Hi;
1552  Current = Memory;
1553
1554  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1555    BuiltinType::Kind k = BT->getKind();
1556
1557    if (k == BuiltinType::Void) {
1558      Current = NoClass;
1559    } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1560      Lo = Integer;
1561      Hi = Integer;
1562    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1563      Current = Integer;
1564    } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1565               (k == BuiltinType::LongDouble &&
1566                getTarget().getTriple().isOSNaCl())) {
1567      Current = SSE;
1568    } else if (k == BuiltinType::LongDouble) {
1569      Lo = X87;
1570      Hi = X87Up;
1571    }
1572    // FIXME: _Decimal32 and _Decimal64 are SSE.
1573    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1574    return;
1575  }
1576
1577  if (const EnumType *ET = Ty->getAs<EnumType>()) {
1578    // Classify the underlying integer type.
1579    classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1580    return;
1581  }
1582
1583  if (Ty->hasPointerRepresentation()) {
1584    Current = Integer;
1585    return;
1586  }
1587
1588  if (Ty->isMemberPointerType()) {
1589    if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
1590      Lo = Hi = Integer;
1591    else
1592      Current = Integer;
1593    return;
1594  }
1595
1596  if (const VectorType *VT = Ty->getAs<VectorType>()) {
1597    uint64_t Size = getContext().getTypeSize(VT);
1598    if (Size == 32) {
1599      // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1600      // float> as integer.
1601      Current = Integer;
1602
1603      // If this type crosses an eightbyte boundary, it should be
1604      // split.
1605      uint64_t EB_Real = (OffsetBase) / 64;
1606      uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1607      if (EB_Real != EB_Imag)
1608        Hi = Lo;
1609    } else if (Size == 64) {
1610      // gcc passes <1 x double> in memory. :(
1611      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1612        return;
1613
1614      // gcc passes <1 x long long> as INTEGER.
1615      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1616          VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1617          VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1618          VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1619        Current = Integer;
1620      else
1621        Current = SSE;
1622
1623      // If this type crosses an eightbyte boundary, it should be
1624      // split.
1625      if (OffsetBase && OffsetBase != 64)
1626        Hi = Lo;
1627    } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1628      // Arguments of 256-bits are split into four eightbyte chunks. The
1629      // least significant one belongs to class SSE and all the others to class
1630      // SSEUP. The original Lo and Hi design considers that types can't be
1631      // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1632      // This design isn't correct for 256-bits, but since there're no cases
1633      // where the upper parts would need to be inspected, avoid adding
1634      // complexity and just consider Hi to match the 64-256 part.
1635      //
1636      // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1637      // registers if they are "named", i.e. not part of the "..." of a
1638      // variadic function.
1639      Lo = SSE;
1640      Hi = SSEUp;
1641    }
1642    return;
1643  }
1644
1645  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1646    QualType ET = getContext().getCanonicalType(CT->getElementType());
1647
1648    uint64_t Size = getContext().getTypeSize(Ty);
1649    if (ET->isIntegralOrEnumerationType()) {
1650      if (Size <= 64)
1651        Current = Integer;
1652      else if (Size <= 128)
1653        Lo = Hi = Integer;
1654    } else if (ET == getContext().FloatTy)
1655      Current = SSE;
1656    else if (ET == getContext().DoubleTy ||
1657             (ET == getContext().LongDoubleTy &&
1658              getTarget().getTriple().isOSNaCl()))
1659      Lo = Hi = SSE;
1660    else if (ET == getContext().LongDoubleTy)
1661      Current = ComplexX87;
1662
1663    // If this complex type crosses an eightbyte boundary then it
1664    // should be split.
1665    uint64_t EB_Real = (OffsetBase) / 64;
1666    uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1667    if (Hi == NoClass && EB_Real != EB_Imag)
1668      Hi = Lo;
1669
1670    return;
1671  }
1672
1673  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1674    // Arrays are treated like structures.
1675
1676    uint64_t Size = getContext().getTypeSize(Ty);
1677
1678    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1679    // than four eightbytes, ..., it has class MEMORY.
1680    if (Size > 256)
1681      return;
1682
1683    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1684    // fields, it has class MEMORY.
1685    //
1686    // Only need to check alignment of array base.
1687    if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1688      return;
1689
1690    // Otherwise implement simplified merge. We could be smarter about
1691    // this, but it isn't worth it and would be harder to verify.
1692    Current = NoClass;
1693    uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1694    uint64_t ArraySize = AT->getSize().getZExtValue();
1695
1696    // The only case a 256-bit wide vector could be used is when the array
1697    // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1698    // to work for sizes wider than 128, early check and fallback to memory.
1699    if (Size > 128 && EltSize != 256)
1700      return;
1701
1702    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1703      Class FieldLo, FieldHi;
1704      classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1705      Lo = merge(Lo, FieldLo);
1706      Hi = merge(Hi, FieldHi);
1707      if (Lo == Memory || Hi == Memory)
1708        break;
1709    }
1710
1711    postMerge(Size, Lo, Hi);
1712    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1713    return;
1714  }
1715
1716  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1717    uint64_t Size = getContext().getTypeSize(Ty);
1718
1719    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1720    // than four eightbytes, ..., it has class MEMORY.
1721    if (Size > 256)
1722      return;
1723
1724    // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1725    // copy constructor or a non-trivial destructor, it is passed by invisible
1726    // reference.
1727    if (getRecordArgABI(RT, getCXXABI()))
1728      return;
1729
1730    const RecordDecl *RD = RT->getDecl();
1731
1732    // Assume variable sized types are passed in memory.
1733    if (RD->hasFlexibleArrayMember())
1734      return;
1735
1736    const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1737
1738    // Reset Lo class, this will be recomputed.
1739    Current = NoClass;
1740
1741    // If this is a C++ record, classify the bases first.
1742    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1743      for (const auto &I : CXXRD->bases()) {
1744        assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1745               "Unexpected base class!");
1746        const CXXRecordDecl *Base =
1747          cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1748
1749        // Classify this field.
1750        //
1751        // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1752        // single eightbyte, each is classified separately. Each eightbyte gets
1753        // initialized to class NO_CLASS.
1754        Class FieldLo, FieldHi;
1755        uint64_t Offset =
1756          OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1757        classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
1758        Lo = merge(Lo, FieldLo);
1759        Hi = merge(Hi, FieldHi);
1760        if (Lo == Memory || Hi == Memory)
1761          break;
1762      }
1763    }
1764
1765    // Classify the fields one at a time, merging the results.
1766    unsigned idx = 0;
1767    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1768           i != e; ++i, ++idx) {
1769      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1770      bool BitField = i->isBitField();
1771
1772      // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1773      // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1774      //
1775      // The only case a 256-bit wide vector could be used is when the struct
1776      // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1777      // to work for sizes wider than 128, early check and fallback to memory.
1778      //
1779      if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1780        Lo = Memory;
1781        return;
1782      }
1783      // Note, skip this test for bit-fields, see below.
1784      if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1785        Lo = Memory;
1786        return;
1787      }
1788
1789      // Classify this field.
1790      //
1791      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1792      // exceeds a single eightbyte, each is classified
1793      // separately. Each eightbyte gets initialized to class
1794      // NO_CLASS.
1795      Class FieldLo, FieldHi;
1796
1797      // Bit-fields require special handling, they do not force the
1798      // structure to be passed in memory even if unaligned, and
1799      // therefore they can straddle an eightbyte.
1800      if (BitField) {
1801        // Ignore padding bit-fields.
1802        if (i->isUnnamedBitfield())
1803          continue;
1804
1805        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1806        uint64_t Size = i->getBitWidthValue(getContext());
1807
1808        uint64_t EB_Lo = Offset / 64;
1809        uint64_t EB_Hi = (Offset + Size - 1) / 64;
1810
1811        if (EB_Lo) {
1812          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1813          FieldLo = NoClass;
1814          FieldHi = Integer;
1815        } else {
1816          FieldLo = Integer;
1817          FieldHi = EB_Hi ? Integer : NoClass;
1818        }
1819      } else
1820        classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1821      Lo = merge(Lo, FieldLo);
1822      Hi = merge(Hi, FieldHi);
1823      if (Lo == Memory || Hi == Memory)
1824        break;
1825    }
1826
1827    postMerge(Size, Lo, Hi);
1828  }
1829}
1830
1831ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1832  // If this is a scalar LLVM value then assume LLVM will pass it in the right
1833  // place naturally.
1834  if (!isAggregateTypeForABI(Ty)) {
1835    // Treat an enum type as its underlying type.
1836    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1837      Ty = EnumTy->getDecl()->getIntegerType();
1838
1839    return (Ty->isPromotableIntegerType() ?
1840            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1841  }
1842
1843  return ABIArgInfo::getIndirect(0);
1844}
1845
1846bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1847  if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1848    uint64_t Size = getContext().getTypeSize(VecTy);
1849    unsigned LargestVector = HasAVX ? 256 : 128;
1850    if (Size <= 64 || Size > LargestVector)
1851      return true;
1852  }
1853
1854  return false;
1855}
1856
1857ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1858                                            unsigned freeIntRegs) const {
1859  // If this is a scalar LLVM value then assume LLVM will pass it in the right
1860  // place naturally.
1861  //
1862  // This assumption is optimistic, as there could be free registers available
1863  // when we need to pass this argument in memory, and LLVM could try to pass
1864  // the argument in the free register. This does not seem to happen currently,
1865  // but this code would be much safer if we could mark the argument with
1866  // 'onstack'. See PR12193.
1867  if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
1868    // Treat an enum type as its underlying type.
1869    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1870      Ty = EnumTy->getDecl()->getIntegerType();
1871
1872    return (Ty->isPromotableIntegerType() ?
1873            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1874  }
1875
1876  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
1877    return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
1878
1879  // Compute the byval alignment. We specify the alignment of the byval in all
1880  // cases so that the mid-level optimizer knows the alignment of the byval.
1881  unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
1882
1883  // Attempt to avoid passing indirect results using byval when possible. This
1884  // is important for good codegen.
1885  //
1886  // We do this by coercing the value into a scalar type which the backend can
1887  // handle naturally (i.e., without using byval).
1888  //
1889  // For simplicity, we currently only do this when we have exhausted all of the
1890  // free integer registers. Doing this when there are free integer registers
1891  // would require more care, as we would have to ensure that the coerced value
1892  // did not claim the unused register. That would require either reording the
1893  // arguments to the function (so that any subsequent inreg values came first),
1894  // or only doing this optimization when there were no following arguments that
1895  // might be inreg.
1896  //
1897  // We currently expect it to be rare (particularly in well written code) for
1898  // arguments to be passed on the stack when there are still free integer
1899  // registers available (this would typically imply large structs being passed
1900  // by value), so this seems like a fair tradeoff for now.
1901  //
1902  // We can revisit this if the backend grows support for 'onstack' parameter
1903  // attributes. See PR12193.
1904  if (freeIntRegs == 0) {
1905    uint64_t Size = getContext().getTypeSize(Ty);
1906
1907    // If this type fits in an eightbyte, coerce it into the matching integral
1908    // type, which will end up on the stack (with alignment 8).
1909    if (Align == 8 && Size <= 64)
1910      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1911                                                          Size));
1912  }
1913
1914  return ABIArgInfo::getIndirect(Align);
1915}
1916
1917/// GetByteVectorType - The ABI specifies that a value should be passed in an
1918/// full vector XMM/YMM register.  Pick an LLVM IR type that will be passed as a
1919/// vector register.
1920llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
1921  llvm::Type *IRType = CGT.ConvertType(Ty);
1922
1923  // Wrapper structs that just contain vectors are passed just like vectors,
1924  // strip them off if present.
1925  llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1926  while (STy && STy->getNumElements() == 1) {
1927    IRType = STy->getElementType(0);
1928    STy = dyn_cast<llvm::StructType>(IRType);
1929  }
1930
1931  // If the preferred type is a 16-byte vector, prefer to pass it.
1932  if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1933    llvm::Type *EltTy = VT->getElementType();
1934    unsigned BitWidth = VT->getBitWidth();
1935    if ((BitWidth >= 128 && BitWidth <= 256) &&
1936        (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1937         EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1938         EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1939         EltTy->isIntegerTy(128)))
1940      return VT;
1941  }
1942
1943  return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1944}
1945
1946/// BitsContainNoUserData - Return true if the specified [start,end) bit range
1947/// is known to either be off the end of the specified type or being in
1948/// alignment padding.  The user type specified is known to be at most 128 bits
1949/// in size, and have passed through X86_64ABIInfo::classify with a successful
1950/// classification that put one of the two halves in the INTEGER class.
1951///
1952/// It is conservatively correct to return false.
1953static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1954                                  unsigned EndBit, ASTContext &Context) {
1955  // If the bytes being queried are off the end of the type, there is no user
1956  // data hiding here.  This handles analysis of builtins, vectors and other
1957  // types that don't contain interesting padding.
1958  unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1959  if (TySize <= StartBit)
1960    return true;
1961
1962  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1963    unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1964    unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1965
1966    // Check each element to see if the element overlaps with the queried range.
1967    for (unsigned i = 0; i != NumElts; ++i) {
1968      // If the element is after the span we care about, then we're done..
1969      unsigned EltOffset = i*EltSize;
1970      if (EltOffset >= EndBit) break;
1971
1972      unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1973      if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1974                                 EndBit-EltOffset, Context))
1975        return false;
1976    }
1977    // If it overlaps no elements, then it is safe to process as padding.
1978    return true;
1979  }
1980
1981  if (const RecordType *RT = Ty->getAs<RecordType>()) {
1982    const RecordDecl *RD = RT->getDecl();
1983    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1984
1985    // If this is a C++ record, check the bases first.
1986    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1987      for (const auto &I : CXXRD->bases()) {
1988        assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1989               "Unexpected base class!");
1990        const CXXRecordDecl *Base =
1991          cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1992
1993        // If the base is after the span we care about, ignore it.
1994        unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
1995        if (BaseOffset >= EndBit) continue;
1996
1997        unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1998        if (!BitsContainNoUserData(I.getType(), BaseStart,
1999                                   EndBit-BaseOffset, Context))
2000          return false;
2001      }
2002    }
2003
2004    // Verify that no field has data that overlaps the region of interest.  Yes
2005    // this could be sped up a lot by being smarter about queried fields,
2006    // however we're only looking at structs up to 16 bytes, so we don't care
2007    // much.
2008    unsigned idx = 0;
2009    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2010         i != e; ++i, ++idx) {
2011      unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2012
2013      // If we found a field after the region we care about, then we're done.
2014      if (FieldOffset >= EndBit) break;
2015
2016      unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2017      if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2018                                 Context))
2019        return false;
2020    }
2021
2022    // If nothing in this record overlapped the area of interest, then we're
2023    // clean.
2024    return true;
2025  }
2026
2027  return false;
2028}
2029
2030/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2031/// float member at the specified offset.  For example, {int,{float}} has a
2032/// float at offset 4.  It is conservatively correct for this routine to return
2033/// false.
2034static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2035                                  const llvm::DataLayout &TD) {
2036  // Base case if we find a float.
2037  if (IROffset == 0 && IRType->isFloatTy())
2038    return true;
2039
2040  // If this is a struct, recurse into the field at the specified offset.
2041  if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2042    const llvm::StructLayout *SL = TD.getStructLayout(STy);
2043    unsigned Elt = SL->getElementContainingOffset(IROffset);
2044    IROffset -= SL->getElementOffset(Elt);
2045    return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2046  }
2047
2048  // If this is an array, recurse into the field at the specified offset.
2049  if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2050    llvm::Type *EltTy = ATy->getElementType();
2051    unsigned EltSize = TD.getTypeAllocSize(EltTy);
2052    IROffset -= IROffset/EltSize*EltSize;
2053    return ContainsFloatAtOffset(EltTy, IROffset, TD);
2054  }
2055
2056  return false;
2057}
2058
2059
2060/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2061/// low 8 bytes of an XMM register, corresponding to the SSE class.
2062llvm::Type *X86_64ABIInfo::
2063GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2064                   QualType SourceTy, unsigned SourceOffset) const {
2065  // The only three choices we have are either double, <2 x float>, or float. We
2066  // pass as float if the last 4 bytes is just padding.  This happens for
2067  // structs that contain 3 floats.
2068  if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2069                            SourceOffset*8+64, getContext()))
2070    return llvm::Type::getFloatTy(getVMContext());
2071
2072  // We want to pass as <2 x float> if the LLVM IR type contains a float at
2073  // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
2074  // case.
2075  if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2076      ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2077    return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2078
2079  return llvm::Type::getDoubleTy(getVMContext());
2080}
2081
2082
2083/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2084/// an 8-byte GPR.  This means that we either have a scalar or we are talking
2085/// about the high or low part of an up-to-16-byte struct.  This routine picks
2086/// the best LLVM IR type to represent this, which may be i64 or may be anything
2087/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2088/// etc).
2089///
2090/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2091/// the source type.  IROffset is an offset in bytes into the LLVM IR type that
2092/// the 8-byte value references.  PrefType may be null.
2093///
2094/// SourceTy is the source-level type for the entire argument.  SourceOffset is
2095/// an offset into this that we're processing (which is always either 0 or 8).
2096///
2097llvm::Type *X86_64ABIInfo::
2098GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2099                       QualType SourceTy, unsigned SourceOffset) const {
2100  // If we're dealing with an un-offset LLVM IR type, then it means that we're
2101  // returning an 8-byte unit starting with it.  See if we can safely use it.
2102  if (IROffset == 0) {
2103    // Pointers and int64's always fill the 8-byte unit.
2104    if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2105        IRType->isIntegerTy(64))
2106      return IRType;
2107
2108    // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2109    // goodness in the source type is just tail padding.  This is allowed to
2110    // kick in for struct {double,int} on the int, but not on
2111    // struct{double,int,int} because we wouldn't return the second int.  We
2112    // have to do this analysis on the source type because we can't depend on
2113    // unions being lowered a specific way etc.
2114    if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2115        IRType->isIntegerTy(32) ||
2116        (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2117      unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2118          cast<llvm::IntegerType>(IRType)->getBitWidth();
2119
2120      if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2121                                SourceOffset*8+64, getContext()))
2122        return IRType;
2123    }
2124  }
2125
2126  if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2127    // If this is a struct, recurse into the field at the specified offset.
2128    const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2129    if (IROffset < SL->getSizeInBytes()) {
2130      unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2131      IROffset -= SL->getElementOffset(FieldIdx);
2132
2133      return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2134                                    SourceTy, SourceOffset);
2135    }
2136  }
2137
2138  if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2139    llvm::Type *EltTy = ATy->getElementType();
2140    unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2141    unsigned EltOffset = IROffset/EltSize*EltSize;
2142    return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2143                                  SourceOffset);
2144  }
2145
2146  // Okay, we don't have any better idea of what to pass, so we pass this in an
2147  // integer register that isn't too big to fit the rest of the struct.
2148  unsigned TySizeInBytes =
2149    (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2150
2151  assert(TySizeInBytes != SourceOffset && "Empty field?");
2152
2153  // It is always safe to classify this as an integer type up to i64 that
2154  // isn't larger than the structure.
2155  return llvm::IntegerType::get(getVMContext(),
2156                                std::min(TySizeInBytes-SourceOffset, 8U)*8);
2157}
2158
2159
2160/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2161/// be used as elements of a two register pair to pass or return, return a
2162/// first class aggregate to represent them.  For example, if the low part of
2163/// a by-value argument should be passed as i32* and the high part as float,
2164/// return {i32*, float}.
2165static llvm::Type *
2166GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2167                           const llvm::DataLayout &TD) {
2168  // In order to correctly satisfy the ABI, we need to the high part to start
2169  // at offset 8.  If the high and low parts we inferred are both 4-byte types
2170  // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2171  // the second element at offset 8.  Check for this:
2172  unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2173  unsigned HiAlign = TD.getABITypeAlignment(Hi);
2174  unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
2175  assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2176
2177  // To handle this, we have to increase the size of the low part so that the
2178  // second element will start at an 8 byte offset.  We can't increase the size
2179  // of the second element because it might make us access off the end of the
2180  // struct.
2181  if (HiStart != 8) {
2182    // There are only two sorts of types the ABI generation code can produce for
2183    // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2184    // Promote these to a larger type.
2185    if (Lo->isFloatTy())
2186      Lo = llvm::Type::getDoubleTy(Lo->getContext());
2187    else {
2188      assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2189      Lo = llvm::Type::getInt64Ty(Lo->getContext());
2190    }
2191  }
2192
2193  llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
2194
2195
2196  // Verify that the second element is at an 8-byte offset.
2197  assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2198         "Invalid x86-64 argument pair!");
2199  return Result;
2200}
2201
2202ABIArgInfo X86_64ABIInfo::
2203classifyReturnType(QualType RetTy) const {
2204  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2205  // classification algorithm.
2206  X86_64ABIInfo::Class Lo, Hi;
2207  classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2208
2209  // Check some invariants.
2210  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2211  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2212
2213  llvm::Type *ResType = nullptr;
2214  switch (Lo) {
2215  case NoClass:
2216    if (Hi == NoClass)
2217      return ABIArgInfo::getIgnore();
2218    // If the low part is just padding, it takes no register, leave ResType
2219    // null.
2220    assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2221           "Unknown missing lo part");
2222    break;
2223
2224  case SSEUp:
2225  case X87Up:
2226    llvm_unreachable("Invalid classification for lo word.");
2227
2228    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2229    // hidden argument.
2230  case Memory:
2231    return getIndirectReturnResult(RetTy);
2232
2233    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2234    // available register of the sequence %rax, %rdx is used.
2235  case Integer:
2236    ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2237
2238    // If we have a sign or zero extended integer, make sure to return Extend
2239    // so that the parameter gets the right LLVM IR attributes.
2240    if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2241      // Treat an enum type as its underlying type.
2242      if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2243        RetTy = EnumTy->getDecl()->getIntegerType();
2244
2245      if (RetTy->isIntegralOrEnumerationType() &&
2246          RetTy->isPromotableIntegerType())
2247        return ABIArgInfo::getExtend();
2248    }
2249    break;
2250
2251    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2252    // available SSE register of the sequence %xmm0, %xmm1 is used.
2253  case SSE:
2254    ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2255    break;
2256
2257    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2258    // returned on the X87 stack in %st0 as 80-bit x87 number.
2259  case X87:
2260    ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2261    break;
2262
2263    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2264    // part of the value is returned in %st0 and the imaginary part in
2265    // %st1.
2266  case ComplexX87:
2267    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2268    ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2269                                    llvm::Type::getX86_FP80Ty(getVMContext()),
2270                                    NULL);
2271    break;
2272  }
2273
2274  llvm::Type *HighPart = nullptr;
2275  switch (Hi) {
2276    // Memory was handled previously and X87 should
2277    // never occur as a hi class.
2278  case Memory:
2279  case X87:
2280    llvm_unreachable("Invalid classification for hi word.");
2281
2282  case ComplexX87: // Previously handled.
2283  case NoClass:
2284    break;
2285
2286  case Integer:
2287    HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2288    if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2289      return ABIArgInfo::getDirect(HighPart, 8);
2290    break;
2291  case SSE:
2292    HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2293    if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2294      return ABIArgInfo::getDirect(HighPart, 8);
2295    break;
2296
2297    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2298    // is passed in the next available eightbyte chunk if the last used
2299    // vector register.
2300    //
2301    // SSEUP should always be preceded by SSE, just widen.
2302  case SSEUp:
2303    assert(Lo == SSE && "Unexpected SSEUp classification.");
2304    ResType = GetByteVectorType(RetTy);
2305    break;
2306
2307    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2308    // returned together with the previous X87 value in %st0.
2309  case X87Up:
2310    // If X87Up is preceded by X87, we don't need to do
2311    // anything. However, in some cases with unions it may not be
2312    // preceded by X87. In such situations we follow gcc and pass the
2313    // extra bits in an SSE reg.
2314    if (Lo != X87) {
2315      HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2316      if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2317        return ABIArgInfo::getDirect(HighPart, 8);
2318    }
2319    break;
2320  }
2321
2322  // If a high part was specified, merge it together with the low part.  It is
2323  // known to pass in the high eightbyte of the result.  We do this by forming a
2324  // first class struct aggregate with the high and low part: {low, high}
2325  if (HighPart)
2326    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2327
2328  return ABIArgInfo::getDirect(ResType);
2329}
2330
2331ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2332  QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2333  bool isNamedArg)
2334  const
2335{
2336  X86_64ABIInfo::Class Lo, Hi;
2337  classify(Ty, 0, Lo, Hi, isNamedArg);
2338
2339  // Check some invariants.
2340  // FIXME: Enforce these by construction.
2341  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2342  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2343
2344  neededInt = 0;
2345  neededSSE = 0;
2346  llvm::Type *ResType = nullptr;
2347  switch (Lo) {
2348  case NoClass:
2349    if (Hi == NoClass)
2350      return ABIArgInfo::getIgnore();
2351    // If the low part is just padding, it takes no register, leave ResType
2352    // null.
2353    assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2354           "Unknown missing lo part");
2355    break;
2356
2357    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2358    // on the stack.
2359  case Memory:
2360
2361    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2362    // COMPLEX_X87, it is passed in memory.
2363  case X87:
2364  case ComplexX87:
2365    if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2366      ++neededInt;
2367    return getIndirectResult(Ty, freeIntRegs);
2368
2369  case SSEUp:
2370  case X87Up:
2371    llvm_unreachable("Invalid classification for lo word.");
2372
2373    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2374    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2375    // and %r9 is used.
2376  case Integer:
2377    ++neededInt;
2378
2379    // Pick an 8-byte type based on the preferred type.
2380    ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2381
2382    // If we have a sign or zero extended integer, make sure to return Extend
2383    // so that the parameter gets the right LLVM IR attributes.
2384    if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2385      // Treat an enum type as its underlying type.
2386      if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2387        Ty = EnumTy->getDecl()->getIntegerType();
2388
2389      if (Ty->isIntegralOrEnumerationType() &&
2390          Ty->isPromotableIntegerType())
2391        return ABIArgInfo::getExtend();
2392    }
2393
2394    break;
2395
2396    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2397    // available SSE register is used, the registers are taken in the
2398    // order from %xmm0 to %xmm7.
2399  case SSE: {
2400    llvm::Type *IRType = CGT.ConvertType(Ty);
2401    ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2402    ++neededSSE;
2403    break;
2404  }
2405  }
2406
2407  llvm::Type *HighPart = nullptr;
2408  switch (Hi) {
2409    // Memory was handled previously, ComplexX87 and X87 should
2410    // never occur as hi classes, and X87Up must be preceded by X87,
2411    // which is passed in memory.
2412  case Memory:
2413  case X87:
2414  case ComplexX87:
2415    llvm_unreachable("Invalid classification for hi word.");
2416
2417  case NoClass: break;
2418
2419  case Integer:
2420    ++neededInt;
2421    // Pick an 8-byte type based on the preferred type.
2422    HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2423
2424    if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2425      return ABIArgInfo::getDirect(HighPart, 8);
2426    break;
2427
2428    // X87Up generally doesn't occur here (long double is passed in
2429    // memory), except in situations involving unions.
2430  case X87Up:
2431  case SSE:
2432    HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2433
2434    if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2435      return ABIArgInfo::getDirect(HighPart, 8);
2436
2437    ++neededSSE;
2438    break;
2439
2440    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2441    // eightbyte is passed in the upper half of the last used SSE
2442    // register.  This only happens when 128-bit vectors are passed.
2443  case SSEUp:
2444    assert(Lo == SSE && "Unexpected SSEUp classification");
2445    ResType = GetByteVectorType(Ty);
2446    break;
2447  }
2448
2449  // If a high part was specified, merge it together with the low part.  It is
2450  // known to pass in the high eightbyte of the result.  We do this by forming a
2451  // first class struct aggregate with the high and low part: {low, high}
2452  if (HighPart)
2453    ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2454
2455  return ABIArgInfo::getDirect(ResType);
2456}
2457
2458void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2459
2460  if (!getCXXABI().classifyReturnType(FI))
2461    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2462
2463  // Keep track of the number of assigned registers.
2464  unsigned freeIntRegs = 6, freeSSERegs = 8;
2465
2466  // If the return value is indirect, then the hidden argument is consuming one
2467  // integer register.
2468  if (FI.getReturnInfo().isIndirect())
2469    --freeIntRegs;
2470
2471  bool isVariadic = FI.isVariadic();
2472  unsigned numRequiredArgs = 0;
2473  if (isVariadic)
2474    numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2475
2476  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2477  // get assigned (in left-to-right order) for passing as follows...
2478  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2479       it != ie; ++it) {
2480    bool isNamedArg = true;
2481    if (isVariadic)
2482      isNamedArg = (it - FI.arg_begin()) <
2483                    static_cast<signed>(numRequiredArgs);
2484
2485    unsigned neededInt, neededSSE;
2486    it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2487                                    neededSSE, isNamedArg);
2488
2489    // AMD64-ABI 3.2.3p3: If there are no registers available for any
2490    // eightbyte of an argument, the whole argument is passed on the
2491    // stack. If registers have already been assigned for some
2492    // eightbytes of such an argument, the assignments get reverted.
2493    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2494      freeIntRegs -= neededInt;
2495      freeSSERegs -= neededSSE;
2496    } else {
2497      it->info = getIndirectResult(it->type, freeIntRegs);
2498    }
2499  }
2500}
2501
2502static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2503                                        QualType Ty,
2504                                        CodeGenFunction &CGF) {
2505  llvm::Value *overflow_arg_area_p =
2506    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2507  llvm::Value *overflow_arg_area =
2508    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2509
2510  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2511  // byte boundary if alignment needed by type exceeds 8 byte boundary.
2512  // It isn't stated explicitly in the standard, but in practice we use
2513  // alignment greater than 16 where necessary.
2514  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2515  if (Align > 8) {
2516    // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2517    llvm::Value *Offset =
2518      llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2519    overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2520    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2521                                                    CGF.Int64Ty);
2522    llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2523    overflow_arg_area =
2524      CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2525                                 overflow_arg_area->getType(),
2526                                 "overflow_arg_area.align");
2527  }
2528
2529  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2530  llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2531  llvm::Value *Res =
2532    CGF.Builder.CreateBitCast(overflow_arg_area,
2533                              llvm::PointerType::getUnqual(LTy));
2534
2535  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2536  // l->overflow_arg_area + sizeof(type).
2537  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2538  // an 8 byte boundary.
2539
2540  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2541  llvm::Value *Offset =
2542      llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2543  overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2544                                            "overflow_arg_area.next");
2545  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2546
2547  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2548  return Res;
2549}
2550
2551llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2552                                      CodeGenFunction &CGF) const {
2553  // Assume that va_list type is correct; should be pointer to LLVM type:
2554  // struct {
2555  //   i32 gp_offset;
2556  //   i32 fp_offset;
2557  //   i8* overflow_arg_area;
2558  //   i8* reg_save_area;
2559  // };
2560  unsigned neededInt, neededSSE;
2561
2562  Ty = CGF.getContext().getCanonicalType(Ty);
2563  ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2564                                       /*isNamedArg*/false);
2565
2566  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2567  // in the registers. If not go to step 7.
2568  if (!neededInt && !neededSSE)
2569    return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2570
2571  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2572  // general purpose registers needed to pass type and num_fp to hold
2573  // the number of floating point registers needed.
2574
2575  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2576  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2577  // l->fp_offset > 304 - num_fp * 16 go to step 7.
2578  //
2579  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2580  // register save space).
2581
2582  llvm::Value *InRegs = nullptr;
2583  llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
2584  llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
2585  if (neededInt) {
2586    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2587    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2588    InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2589    InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2590  }
2591
2592  if (neededSSE) {
2593    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2594    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2595    llvm::Value *FitsInFP =
2596      llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2597    FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2598    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2599  }
2600
2601  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2602  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2603  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2604  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2605
2606  // Emit code to load the value if it was passed in registers.
2607
2608  CGF.EmitBlock(InRegBlock);
2609
2610  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2611  // an offset of l->gp_offset and/or l->fp_offset. This may require
2612  // copying to a temporary location in case the parameter is passed
2613  // in different register classes or requires an alignment greater
2614  // than 8 for general purpose registers and 16 for XMM registers.
2615  //
2616  // FIXME: This really results in shameful code when we end up needing to
2617  // collect arguments from different places; often what should result in a
2618  // simple assembling of a structure from scattered addresses has many more
2619  // loads than necessary. Can we clean this up?
2620  llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2621  llvm::Value *RegAddr =
2622    CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2623                           "reg_save_area");
2624  if (neededInt && neededSSE) {
2625    // FIXME: Cleanup.
2626    assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2627    llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2628    llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2629    Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2630    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2631    llvm::Type *TyLo = ST->getElementType(0);
2632    llvm::Type *TyHi = ST->getElementType(1);
2633    assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2634           "Unexpected ABI info for mixed regs");
2635    llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2636    llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2637    llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2638    llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2639    llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2640    llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2641    llvm::Value *V =
2642      CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2643    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2644    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2645    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2646
2647    RegAddr = CGF.Builder.CreateBitCast(Tmp,
2648                                        llvm::PointerType::getUnqual(LTy));
2649  } else if (neededInt) {
2650    RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2651    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2652                                        llvm::PointerType::getUnqual(LTy));
2653
2654    // Copy to a temporary if necessary to ensure the appropriate alignment.
2655    std::pair<CharUnits, CharUnits> SizeAlign =
2656        CGF.getContext().getTypeInfoInChars(Ty);
2657    uint64_t TySize = SizeAlign.first.getQuantity();
2658    unsigned TyAlign = SizeAlign.second.getQuantity();
2659    if (TyAlign > 8) {
2660      llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2661      CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2662      RegAddr = Tmp;
2663    }
2664  } else if (neededSSE == 1) {
2665    RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2666    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2667                                        llvm::PointerType::getUnqual(LTy));
2668  } else {
2669    assert(neededSSE == 2 && "Invalid number of needed registers!");
2670    // SSE registers are spaced 16 bytes apart in the register save
2671    // area, we need to collect the two eightbytes together.
2672    llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2673    llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2674    llvm::Type *DoubleTy = CGF.DoubleTy;
2675    llvm::Type *DblPtrTy =
2676      llvm::PointerType::getUnqual(DoubleTy);
2677    llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2678    llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2679    Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2680    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2681                                                         DblPtrTy));
2682    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2683    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2684                                                         DblPtrTy));
2685    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2686    RegAddr = CGF.Builder.CreateBitCast(Tmp,
2687                                        llvm::PointerType::getUnqual(LTy));
2688  }
2689
2690  // AMD64-ABI 3.5.7p5: Step 5. Set:
2691  // l->gp_offset = l->gp_offset + num_gp * 8
2692  // l->fp_offset = l->fp_offset + num_fp * 16.
2693  if (neededInt) {
2694    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2695    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2696                            gp_offset_p);
2697  }
2698  if (neededSSE) {
2699    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2700    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2701                            fp_offset_p);
2702  }
2703  CGF.EmitBranch(ContBlock);
2704
2705  // Emit code to load the value if it was passed in memory.
2706
2707  CGF.EmitBlock(InMemBlock);
2708  llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2709
2710  // Return the appropriate result.
2711
2712  CGF.EmitBlock(ContBlock);
2713  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2714                                                 "vaarg.addr");
2715  ResAddr->addIncoming(RegAddr, InRegBlock);
2716  ResAddr->addIncoming(MemAddr, InMemBlock);
2717  return ResAddr;
2718}
2719
2720ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
2721
2722  if (Ty->isVoidType())
2723    return ABIArgInfo::getIgnore();
2724
2725  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2726    Ty = EnumTy->getDecl()->getIntegerType();
2727
2728  uint64_t Size = getContext().getTypeSize(Ty);
2729
2730  const RecordType *RT = Ty->getAs<RecordType>();
2731  if (RT) {
2732    if (!IsReturnType) {
2733      if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
2734        return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2735    }
2736
2737    if (RT->getDecl()->hasFlexibleArrayMember())
2738      return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2739
2740    // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2741    if (Size == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
2742      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2743                                                          Size));
2744  }
2745
2746  if (Ty->isMemberPointerType()) {
2747    // If the member pointer is represented by an LLVM int or ptr, pass it
2748    // directly.
2749    llvm::Type *LLTy = CGT.ConvertType(Ty);
2750    if (LLTy->isPointerTy() || LLTy->isIntegerTy())
2751      return ABIArgInfo::getDirect();
2752  }
2753
2754  if (RT || Ty->isMemberPointerType()) {
2755    // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2756    // not 1, 2, 4, or 8 bytes, must be passed by reference."
2757    if (Size > 64 || !llvm::isPowerOf2_64(Size))
2758      return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2759
2760    // Otherwise, coerce it to a small integer.
2761    return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
2762  }
2763
2764  if (Ty->isPromotableIntegerType())
2765    return ABIArgInfo::getExtend();
2766
2767  return ABIArgInfo::getDirect();
2768}
2769
2770void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2771  if (!getCXXABI().classifyReturnType(FI))
2772    FI.getReturnInfo() = classify(FI.getReturnType(), true);
2773
2774  for (auto &I : FI.arguments())
2775    I.info = classify(I.type, false);
2776}
2777
2778llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2779                                      CodeGenFunction &CGF) const {
2780  llvm::Type *BPP = CGF.Int8PtrPtrTy;
2781
2782  CGBuilderTy &Builder = CGF.Builder;
2783  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2784                                                       "ap");
2785  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2786  llvm::Type *PTy =
2787    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2788  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2789
2790  uint64_t Offset =
2791    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2792  llvm::Value *NextAddr =
2793    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2794                      "ap.next");
2795  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2796
2797  return AddrTyped;
2798}
2799
2800namespace {
2801
2802class NaClX86_64ABIInfo : public ABIInfo {
2803 public:
2804  NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2805      : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
2806  void computeInfo(CGFunctionInfo &FI) const override;
2807  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2808                         CodeGenFunction &CGF) const override;
2809 private:
2810  PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv.
2811  X86_64ABIInfo NInfo; // Used for everything else.
2812};
2813
2814class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo  {
2815 public:
2816  NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2817      : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2818};
2819
2820}
2821
2822void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2823  if (FI.getASTCallingConvention() == CC_PnaclCall)
2824    PInfo.computeInfo(FI);
2825  else
2826    NInfo.computeInfo(FI);
2827}
2828
2829llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2830                                          CodeGenFunction &CGF) const {
2831  // Always use the native convention; calling pnacl-style varargs functions
2832  // is unuspported.
2833  return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2834}
2835
2836
2837// PowerPC-32
2838
2839namespace {
2840class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2841public:
2842  PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2843
2844  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2845    // This is recovered from gcc output.
2846    return 1; // r1 is the dedicated stack pointer
2847  }
2848
2849  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2850                               llvm::Value *Address) const override;
2851};
2852
2853}
2854
2855bool
2856PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2857                                                llvm::Value *Address) const {
2858  // This is calculated from the LLVM and GCC tables and verified
2859  // against gcc output.  AFAIK all ABIs use the same encoding.
2860
2861  CodeGen::CGBuilderTy &Builder = CGF.Builder;
2862
2863  llvm::IntegerType *i8 = CGF.Int8Ty;
2864  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2865  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2866  llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2867
2868  // 0-31: r0-31, the 4-byte general-purpose registers
2869  AssignToArrayRange(Builder, Address, Four8, 0, 31);
2870
2871  // 32-63: fp0-31, the 8-byte floating-point registers
2872  AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2873
2874  // 64-76 are various 4-byte special-purpose registers:
2875  // 64: mq
2876  // 65: lr
2877  // 66: ctr
2878  // 67: ap
2879  // 68-75 cr0-7
2880  // 76: xer
2881  AssignToArrayRange(Builder, Address, Four8, 64, 76);
2882
2883  // 77-108: v0-31, the 16-byte vector registers
2884  AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2885
2886  // 109: vrsave
2887  // 110: vscr
2888  // 111: spe_acc
2889  // 112: spefscr
2890  // 113: sfp
2891  AssignToArrayRange(Builder, Address, Four8, 109, 113);
2892
2893  return false;
2894}
2895
2896// PowerPC-64
2897
2898namespace {
2899/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2900class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2901
2902public:
2903  PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
2904
2905  bool isPromotableTypeForABI(QualType Ty) const;
2906  bool isAlignedParamType(QualType Ty) const;
2907
2908  ABIArgInfo classifyReturnType(QualType RetTy) const;
2909  ABIArgInfo classifyArgumentType(QualType Ty) const;
2910
2911  // TODO: We can add more logic to computeInfo to improve performance.
2912  // Example: For aggregate arguments that fit in a register, we could
2913  // use getDirectInReg (as is done below for structs containing a single
2914  // floating-point value) to avoid pushing them to memory on function
2915  // entry.  This would require changing the logic in PPCISelLowering
2916  // when lowering the parameters in the caller and args in the callee.
2917  void computeInfo(CGFunctionInfo &FI) const override {
2918    if (!getCXXABI().classifyReturnType(FI))
2919      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2920    for (auto &I : FI.arguments()) {
2921      // We rely on the default argument classification for the most part.
2922      // One exception:  An aggregate containing a single floating-point
2923      // or vector item must be passed in a register if one is available.
2924      const Type *T = isSingleElementStruct(I.type, getContext());
2925      if (T) {
2926        const BuiltinType *BT = T->getAs<BuiltinType>();
2927        if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
2928            (BT && BT->isFloatingPoint())) {
2929          QualType QT(T, 0);
2930          I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
2931          continue;
2932        }
2933      }
2934      I.info = classifyArgumentType(I.type);
2935    }
2936  }
2937
2938  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2939                         CodeGenFunction &CGF) const override;
2940};
2941
2942class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2943public:
2944  PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
2945    : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
2946
2947  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2948    // This is recovered from gcc output.
2949    return 1; // r1 is the dedicated stack pointer
2950  }
2951
2952  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2953                               llvm::Value *Address) const override;
2954};
2955
2956class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2957public:
2958  PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2959
2960  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2961    // This is recovered from gcc output.
2962    return 1; // r1 is the dedicated stack pointer
2963  }
2964
2965  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2966                               llvm::Value *Address) const override;
2967};
2968
2969}
2970
2971// Return true if the ABI requires Ty to be passed sign- or zero-
2972// extended to 64 bits.
2973bool
2974PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2975  // Treat an enum type as its underlying type.
2976  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2977    Ty = EnumTy->getDecl()->getIntegerType();
2978
2979  // Promotable integer types are required to be promoted by the ABI.
2980  if (Ty->isPromotableIntegerType())
2981    return true;
2982
2983  // In addition to the usual promotable integer types, we also need to
2984  // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2985  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2986    switch (BT->getKind()) {
2987    case BuiltinType::Int:
2988    case BuiltinType::UInt:
2989      return true;
2990    default:
2991      break;
2992    }
2993
2994  return false;
2995}
2996
2997/// isAlignedParamType - Determine whether a type requires 16-byte
2998/// alignment in the parameter area.
2999bool
3000PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
3001  // Complex types are passed just like their elements.
3002  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3003    Ty = CTy->getElementType();
3004
3005  // Only vector types of size 16 bytes need alignment (larger types are
3006  // passed via reference, smaller types are not aligned).
3007  if (Ty->isVectorType())
3008    return getContext().getTypeSize(Ty) == 128;
3009
3010  // For single-element float/vector structs, we consider the whole type
3011  // to have the same alignment requirements as its single element.
3012  const Type *AlignAsType = nullptr;
3013  const Type *EltType = isSingleElementStruct(Ty, getContext());
3014  if (EltType) {
3015    const BuiltinType *BT = EltType->getAs<BuiltinType>();
3016    if ((EltType->isVectorType() &&
3017         getContext().getTypeSize(EltType) == 128) ||
3018        (BT && BT->isFloatingPoint()))
3019      AlignAsType = EltType;
3020  }
3021
3022  // With special case aggregates, only vector base types need alignment.
3023  if (AlignAsType)
3024    return AlignAsType->isVectorType();
3025
3026  // Otherwise, we only need alignment for any aggregate type that
3027  // has an alignment requirement of >= 16 bytes.
3028  if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
3029    return true;
3030
3031  return false;
3032}
3033
3034ABIArgInfo
3035PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3036  if (Ty->isAnyComplexType())
3037    return ABIArgInfo::getDirect();
3038
3039  // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3040  // or via reference (larger than 16 bytes).
3041  if (Ty->isVectorType()) {
3042    uint64_t Size = getContext().getTypeSize(Ty);
3043    if (Size > 128)
3044      return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3045    else if (Size < 128) {
3046      llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3047      return ABIArgInfo::getDirect(CoerceTy);
3048    }
3049  }
3050
3051  if (isAggregateTypeForABI(Ty)) {
3052    if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3053      return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3054
3055    uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
3056    uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3057    return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
3058                                   /*Realign=*/TyAlign > ABIAlign);
3059  }
3060
3061  return (isPromotableTypeForABI(Ty) ?
3062          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3063}
3064
3065ABIArgInfo
3066PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3067  if (RetTy->isVoidType())
3068    return ABIArgInfo::getIgnore();
3069
3070  if (RetTy->isAnyComplexType())
3071    return ABIArgInfo::getDirect();
3072
3073  // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
3074  // or via reference (larger than 16 bytes).
3075  if (RetTy->isVectorType()) {
3076    uint64_t Size = getContext().getTypeSize(RetTy);
3077    if (Size > 128)
3078      return ABIArgInfo::getIndirect(0);
3079    else if (Size < 128) {
3080      llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3081      return ABIArgInfo::getDirect(CoerceTy);
3082    }
3083  }
3084
3085  if (isAggregateTypeForABI(RetTy))
3086    return ABIArgInfo::getIndirect(0);
3087
3088  return (isPromotableTypeForABI(RetTy) ?
3089          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3090}
3091
3092// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3093llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3094                                           QualType Ty,
3095                                           CodeGenFunction &CGF) const {
3096  llvm::Type *BP = CGF.Int8PtrTy;
3097  llvm::Type *BPP = CGF.Int8PtrPtrTy;
3098
3099  CGBuilderTy &Builder = CGF.Builder;
3100  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3101  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3102
3103  // Handle types that require 16-byte alignment in the parameter save area.
3104  if (isAlignedParamType(Ty)) {
3105    llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3106    AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
3107    AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
3108    Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3109  }
3110
3111  // Update the va_list pointer.  The pointer should be bumped by the
3112  // size of the object.  We can trust getTypeSize() except for a complex
3113  // type whose base type is smaller than a doubleword.  For these, the
3114  // size of the object is 16 bytes; see below for further explanation.
3115  unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3116  QualType BaseTy;
3117  unsigned CplxBaseSize = 0;
3118
3119  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3120    BaseTy = CTy->getElementType();
3121    CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3122    if (CplxBaseSize < 8)
3123      SizeInBytes = 16;
3124  }
3125
3126  unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3127  llvm::Value *NextAddr =
3128    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3129                      "ap.next");
3130  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3131
3132  // If we have a complex type and the base type is smaller than 8 bytes,
3133  // the ABI calls for the real and imaginary parts to be right-adjusted
3134  // in separate doublewords.  However, Clang expects us to produce a
3135  // pointer to a structure with the two parts packed tightly.  So generate
3136  // loads of the real and imaginary parts relative to the va_list pointer,
3137  // and store them to a temporary structure.
3138  if (CplxBaseSize && CplxBaseSize < 8) {
3139    llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3140    llvm::Value *ImagAddr = RealAddr;
3141    if (CGF.CGM.getDataLayout().isBigEndian()) {
3142      RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3143      ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3144    } else {
3145      ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3146    }
3147    llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3148    RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3149    ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3150    llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3151    llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3152    llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3153                                            "vacplx");
3154    llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3155    llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3156    Builder.CreateStore(Real, RealPtr, false);
3157    Builder.CreateStore(Imag, ImagPtr, false);
3158    return Ptr;
3159  }
3160
3161  // If the argument is smaller than 8 bytes, it is right-adjusted in
3162  // its doubleword slot.  Adjust the pointer to pick it up from the
3163  // correct offset.
3164  if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
3165    llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3166    AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3167    Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3168  }
3169
3170  llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3171  return Builder.CreateBitCast(Addr, PTy);
3172}
3173
3174static bool
3175PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3176                              llvm::Value *Address) {
3177  // This is calculated from the LLVM and GCC tables and verified
3178  // against gcc output.  AFAIK all ABIs use the same encoding.
3179
3180  CodeGen::CGBuilderTy &Builder = CGF.Builder;
3181
3182  llvm::IntegerType *i8 = CGF.Int8Ty;
3183  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3184  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3185  llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3186
3187  // 0-31: r0-31, the 8-byte general-purpose registers
3188  AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3189
3190  // 32-63: fp0-31, the 8-byte floating-point registers
3191  AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3192
3193  // 64-76 are various 4-byte special-purpose registers:
3194  // 64: mq
3195  // 65: lr
3196  // 66: ctr
3197  // 67: ap
3198  // 68-75 cr0-7
3199  // 76: xer
3200  AssignToArrayRange(Builder, Address, Four8, 64, 76);
3201
3202  // 77-108: v0-31, the 16-byte vector registers
3203  AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3204
3205  // 109: vrsave
3206  // 110: vscr
3207  // 111: spe_acc
3208  // 112: spefscr
3209  // 113: sfp
3210  AssignToArrayRange(Builder, Address, Four8, 109, 113);
3211
3212  return false;
3213}
3214
3215bool
3216PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3217  CodeGen::CodeGenFunction &CGF,
3218  llvm::Value *Address) const {
3219
3220  return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3221}
3222
3223bool
3224PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3225                                                llvm::Value *Address) const {
3226
3227  return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3228}
3229
3230//===----------------------------------------------------------------------===//
3231// AArch64 ABI Implementation
3232//===----------------------------------------------------------------------===//
3233
3234namespace {
3235
3236class AArch64ABIInfo : public ABIInfo {
3237public:
3238  enum ABIKind {
3239    AAPCS = 0,
3240    DarwinPCS
3241  };
3242
3243private:
3244  ABIKind Kind;
3245
3246public:
3247  AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3248
3249private:
3250  ABIKind getABIKind() const { return Kind; }
3251  bool isDarwinPCS() const { return Kind == DarwinPCS; }
3252
3253  ABIArgInfo classifyReturnType(QualType RetTy) const;
3254  ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
3255                                  bool &IsHA, unsigned &AllocatedGPR,
3256                                  bool &IsSmallAggr, bool IsNamedArg) const;
3257  bool isIllegalVectorType(QualType Ty) const;
3258
3259  virtual void computeInfo(CGFunctionInfo &FI) const {
3260    // To correctly handle Homogeneous Aggregate, we need to keep track of the
3261    // number of SIMD and Floating-point registers allocated so far.
3262    // If the argument is an HFA or an HVA and there are sufficient unallocated
3263    // SIMD and Floating-point registers, then the argument is allocated to SIMD
3264    // and Floating-point Registers (with one register per member of the HFA or
3265    // HVA). Otherwise, the NSRN is set to 8.
3266    unsigned AllocatedVFP = 0;
3267
3268    // To correctly handle small aggregates, we need to keep track of the number
3269    // of GPRs allocated so far. If the small aggregate can't all fit into
3270    // registers, it will be on stack. We don't allow the aggregate to be
3271    // partially in registers.
3272    unsigned AllocatedGPR = 0;
3273
3274    // Find the number of named arguments. Variadic arguments get special
3275    // treatment with the Darwin ABI.
3276    unsigned NumRequiredArgs = (FI.isVariadic() ?
3277                                FI.getRequiredArgs().getNumRequiredArgs() :
3278                                FI.arg_size());
3279
3280    if (!getCXXABI().classifyReturnType(FI))
3281      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3282    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3283         it != ie; ++it) {
3284      unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
3285      bool IsHA = false, IsSmallAggr = false;
3286      const unsigned NumVFPs = 8;
3287      const unsigned NumGPRs = 8;
3288      bool IsNamedArg = ((it - FI.arg_begin()) <
3289                         static_cast<signed>(NumRequiredArgs));
3290      it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
3291                                      AllocatedGPR, IsSmallAggr, IsNamedArg);
3292
3293      // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
3294      // as sequences of floats since they'll get "holes" inserted as
3295      // padding by the back end.
3296      if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS() &&
3297          getContext().getTypeAlign(it->type) < 64) {
3298        uint32_t NumStackSlots = getContext().getTypeSize(it->type);
3299        NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
3300
3301        llvm::Type *CoerceTy = llvm::ArrayType::get(
3302            llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
3303        it->info = ABIArgInfo::getDirect(CoerceTy);
3304      }
3305
3306      // If we do not have enough VFP registers for the HA, any VFP registers
3307      // that are unallocated are marked as unavailable. To achieve this, we add
3308      // padding of (NumVFPs - PreAllocation) floats.
3309      if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3310        llvm::Type *PaddingTy = llvm::ArrayType::get(
3311            llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
3312        it->info.setPaddingType(PaddingTy);
3313      }
3314
3315      // If we do not have enough GPRs for the small aggregate, any GPR regs
3316      // that are unallocated are marked as unavailable.
3317      if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
3318        llvm::Type *PaddingTy = llvm::ArrayType::get(
3319            llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
3320        it->info =
3321            ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
3322      }
3323    }
3324  }
3325
3326  llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3327                               CodeGenFunction &CGF) const;
3328
3329  llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3330                              CodeGenFunction &CGF) const;
3331
3332  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3333                                 CodeGenFunction &CGF) const {
3334    return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3335                         : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3336  }
3337};
3338
3339class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3340public:
3341  AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
3342      : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
3343
3344  StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3345    return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3346  }
3347
3348  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3349
3350  virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3351};
3352}
3353
3354static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3355                                   ASTContext &Context,
3356                                   uint64_t *HAMembers = nullptr);
3357
3358ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
3359                                                unsigned &AllocatedVFP,
3360                                                bool &IsHA,
3361                                                unsigned &AllocatedGPR,
3362                                                bool &IsSmallAggr,
3363                                                bool IsNamedArg) const {
3364  // Handle illegal vector types here.
3365  if (isIllegalVectorType(Ty)) {
3366    uint64_t Size = getContext().getTypeSize(Ty);
3367    // Android promotes <2 x i8> to i16, not i32
3368    if (Size <= 16) {
3369      llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
3370      AllocatedGPR++;
3371      return ABIArgInfo::getDirect(ResType);
3372    }
3373    if (Size == 32) {
3374      llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3375      AllocatedGPR++;
3376      return ABIArgInfo::getDirect(ResType);
3377    }
3378    if (Size == 64) {
3379      llvm::Type *ResType =
3380          llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3381      AllocatedVFP++;
3382      return ABIArgInfo::getDirect(ResType);
3383    }
3384    if (Size == 128) {
3385      llvm::Type *ResType =
3386          llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3387      AllocatedVFP++;
3388      return ABIArgInfo::getDirect(ResType);
3389    }
3390    AllocatedGPR++;
3391    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3392  }
3393  if (Ty->isVectorType())
3394    // Size of a legal vector should be either 64 or 128.
3395    AllocatedVFP++;
3396  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3397    if (BT->getKind() == BuiltinType::Half ||
3398        BT->getKind() == BuiltinType::Float ||
3399        BT->getKind() == BuiltinType::Double ||
3400        BT->getKind() == BuiltinType::LongDouble)
3401      AllocatedVFP++;
3402  }
3403
3404  if (!isAggregateTypeForABI(Ty)) {
3405    // Treat an enum type as its underlying type.
3406    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3407      Ty = EnumTy->getDecl()->getIntegerType();
3408
3409    if (!Ty->isFloatingType() && !Ty->isVectorType()) {
3410      unsigned Alignment = getContext().getTypeAlign(Ty);
3411      if (!isDarwinPCS() && Alignment > 64)
3412        AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3413
3414      int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3415      AllocatedGPR += RegsNeeded;
3416    }
3417    return (Ty->isPromotableIntegerType() && isDarwinPCS()
3418                ? ABIArgInfo::getExtend()
3419                : ABIArgInfo::getDirect());
3420  }
3421
3422  // Structures with either a non-trivial destructor or a non-trivial
3423  // copy constructor are always indirect.
3424  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
3425    AllocatedGPR++;
3426    return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
3427                                          CGCXXABI::RAA_DirectInMemory);
3428  }
3429
3430  // Empty records are always ignored on Darwin, but actually passed in C++ mode
3431  // elsewhere for GNU compatibility.
3432  if (isEmptyRecord(getContext(), Ty, true)) {
3433    if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3434      return ABIArgInfo::getIgnore();
3435
3436    ++AllocatedGPR;
3437    return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3438  }
3439
3440  // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3441  const Type *Base = nullptr;
3442  uint64_t Members = 0;
3443  if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
3444    IsHA = true;
3445    if (!IsNamedArg && isDarwinPCS()) {
3446      // With the Darwin ABI, variadic arguments are always passed on the stack
3447      // and should not be expanded. Treat variadic HFAs as arrays of doubles.
3448      uint64_t Size = getContext().getTypeSize(Ty);
3449      llvm::Type *BaseTy = llvm::Type::getDoubleTy(getVMContext());
3450      return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3451    }
3452    AllocatedVFP += Members;
3453    return ABIArgInfo::getExpand();
3454  }
3455
3456  // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3457  uint64_t Size = getContext().getTypeSize(Ty);
3458  if (Size <= 128) {
3459    unsigned Alignment = getContext().getTypeAlign(Ty);
3460    if (!isDarwinPCS() && Alignment > 64)
3461      AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3462
3463    Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3464    AllocatedGPR += Size / 64;
3465    IsSmallAggr = true;
3466    // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3467    // For aggregates with 16-byte alignment, we use i128.
3468    if (Alignment < 128 && Size == 128) {
3469      llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3470      return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3471    }
3472    return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3473  }
3474
3475  AllocatedGPR++;
3476  return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3477}
3478
3479ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
3480  if (RetTy->isVoidType())
3481    return ABIArgInfo::getIgnore();
3482
3483  // Large vector types should be returned via memory.
3484  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3485    return ABIArgInfo::getIndirect(0);
3486
3487  if (!isAggregateTypeForABI(RetTy)) {
3488    // Treat an enum type as its underlying type.
3489    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3490      RetTy = EnumTy->getDecl()->getIntegerType();
3491
3492    return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3493                ? ABIArgInfo::getExtend()
3494                : ABIArgInfo::getDirect());
3495  }
3496
3497  if (isEmptyRecord(getContext(), RetTy, true))
3498    return ABIArgInfo::getIgnore();
3499
3500  const Type *Base = nullptr;
3501  if (isHomogeneousAggregate(RetTy, Base, getContext()))
3502    // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3503    return ABIArgInfo::getDirect();
3504
3505  // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3506  uint64_t Size = getContext().getTypeSize(RetTy);
3507  if (Size <= 128) {
3508    Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3509    return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3510  }
3511
3512  return ABIArgInfo::getIndirect(0);
3513}
3514
3515/// isIllegalVectorType - check whether the vector type is legal for AArch64.
3516bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
3517  if (const VectorType *VT = Ty->getAs<VectorType>()) {
3518    // Check whether VT is legal.
3519    unsigned NumElements = VT->getNumElements();
3520    uint64_t Size = getContext().getTypeSize(VT);
3521    // NumElements should be power of 2 between 1 and 16.
3522    if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3523      return true;
3524    return Size != 64 && (Size != 128 || NumElements == 1);
3525  }
3526  return false;
3527}
3528
3529static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
3530                                     int AllocatedGPR, int AllocatedVFP,
3531                                     bool IsIndirect, CodeGenFunction &CGF) {
3532  // The AArch64 va_list type and handling is specified in the Procedure Call
3533  // Standard, section B.4:
3534  //
3535  // struct {
3536  //   void *__stack;
3537  //   void *__gr_top;
3538  //   void *__vr_top;
3539  //   int __gr_offs;
3540  //   int __vr_offs;
3541  // };
3542
3543  llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3544  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3545  llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3546  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3547  auto &Ctx = CGF.getContext();
3548
3549  llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
3550  int reg_top_index;
3551  int RegSize;
3552  if (AllocatedGPR) {
3553    assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
3554    // 3 is the field number of __gr_offs
3555    reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3556    reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3557    reg_top_index = 1; // field number for __gr_top
3558    RegSize = 8 * AllocatedGPR;
3559  } else {
3560    assert(!AllocatedGPR && "Argument must go in VFP or int regs");
3561    // 4 is the field number of __vr_offs.
3562    reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3563    reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3564    reg_top_index = 2; // field number for __vr_top
3565    RegSize = 16 * AllocatedVFP;
3566  }
3567
3568  //=======================================
3569  // Find out where argument was passed
3570  //=======================================
3571
3572  // If reg_offs >= 0 we're already using the stack for this type of
3573  // argument. We don't want to keep updating reg_offs (in case it overflows,
3574  // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3575  // whatever they get).
3576  llvm::Value *UsingStack = nullptr;
3577  UsingStack = CGF.Builder.CreateICmpSGE(
3578      reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
3579
3580  CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3581
3582  // Otherwise, at least some kind of argument could go in these registers, the
3583  // question is whether this particular type is too big.
3584  CGF.EmitBlock(MaybeRegBlock);
3585
3586  // Integer arguments may need to correct register alignment (for example a
3587  // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3588  // align __gr_offs to calculate the potential address.
3589  if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3590    int Align = Ctx.getTypeAlign(Ty) / 8;
3591
3592    reg_offs = CGF.Builder.CreateAdd(
3593        reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3594        "align_regoffs");
3595    reg_offs = CGF.Builder.CreateAnd(
3596        reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3597        "aligned_regoffs");
3598  }
3599
3600  // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3601  llvm::Value *NewOffset = nullptr;
3602  NewOffset = CGF.Builder.CreateAdd(
3603      reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
3604  CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3605
3606  // Now we're in a position to decide whether this argument really was in
3607  // registers or not.
3608  llvm::Value *InRegs = nullptr;
3609  InRegs = CGF.Builder.CreateICmpSLE(
3610      NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
3611
3612  CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
3613
3614  //=======================================
3615  // Argument was in registers
3616  //=======================================
3617
3618  // Now we emit the code for if the argument was originally passed in
3619  // registers. First start the appropriate block:
3620  CGF.EmitBlock(InRegBlock);
3621
3622  llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
3623  reg_top_p =
3624      CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
3625  reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
3626  llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
3627  llvm::Value *RegAddr = nullptr;
3628  llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
3629
3630  if (IsIndirect) {
3631    // If it's been passed indirectly (actually a struct), whatever we find from
3632    // stored registers or on the stack will actually be a struct **.
3633    MemTy = llvm::PointerType::getUnqual(MemTy);
3634  }
3635
3636  const Type *Base = nullptr;
3637  uint64_t NumMembers;
3638  bool IsHFA = isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers);
3639  if (IsHFA && NumMembers > 1) {
3640    // Homogeneous aggregates passed in registers will have their elements split
3641    // and stored 16-bytes apart regardless of size (they're notionally in qN,
3642    // qN+1, ...). We reload and store into a temporary local variable
3643    // contiguously.
3644    assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
3645    llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
3646    llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
3647    llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
3648    int Offset = 0;
3649
3650    if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
3651      Offset = 16 - Ctx.getTypeSize(Base) / 8;
3652    for (unsigned i = 0; i < NumMembers; ++i) {
3653      llvm::Value *BaseOffset =
3654          llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
3655      llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
3656      LoadAddr = CGF.Builder.CreateBitCast(
3657          LoadAddr, llvm::PointerType::getUnqual(BaseTy));
3658      llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
3659
3660      llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
3661      CGF.Builder.CreateStore(Elem, StoreAddr);
3662    }
3663
3664    RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
3665  } else {
3666    // Otherwise the object is contiguous in memory
3667    unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
3668    if (CGF.CGM.getDataLayout().isBigEndian() &&
3669        (IsHFA || !isAggregateTypeForABI(Ty)) &&
3670        Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
3671      int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
3672      BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
3673
3674      BaseAddr = CGF.Builder.CreateAdd(
3675          BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3676
3677      BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
3678    }
3679
3680    RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
3681  }
3682
3683  CGF.EmitBranch(ContBlock);
3684
3685  //=======================================
3686  // Argument was on the stack
3687  //=======================================
3688  CGF.EmitBlock(OnStackBlock);
3689
3690  llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
3691  stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
3692  OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
3693
3694  // Again, stack arguments may need realigmnent. In this case both integer and
3695  // floating-point ones might be affected.
3696  if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3697    int Align = Ctx.getTypeAlign(Ty) / 8;
3698
3699    OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3700
3701    OnStackAddr = CGF.Builder.CreateAdd(
3702        OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
3703        "align_stack");
3704    OnStackAddr = CGF.Builder.CreateAnd(
3705        OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
3706        "align_stack");
3707
3708    OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3709  }
3710
3711  uint64_t StackSize;
3712  if (IsIndirect)
3713    StackSize = 8;
3714  else
3715    StackSize = Ctx.getTypeSize(Ty) / 8;
3716
3717  // All stack slots are 8 bytes
3718  StackSize = llvm::RoundUpToAlignment(StackSize, 8);
3719
3720  llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
3721  llvm::Value *NewStack =
3722      CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
3723
3724  // Write the new value of __stack for the next call to va_arg
3725  CGF.Builder.CreateStore(NewStack, stack_p);
3726
3727  if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
3728      Ctx.getTypeSize(Ty) < 64) {
3729    int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
3730    OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3731
3732    OnStackAddr = CGF.Builder.CreateAdd(
3733        OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3734
3735    OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3736  }
3737
3738  OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
3739
3740  CGF.EmitBranch(ContBlock);
3741
3742  //=======================================
3743  // Tidy up
3744  //=======================================
3745  CGF.EmitBlock(ContBlock);
3746
3747  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
3748  ResAddr->addIncoming(RegAddr, InRegBlock);
3749  ResAddr->addIncoming(OnStackAddr, OnStackBlock);
3750
3751  if (IsIndirect)
3752    return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
3753
3754  return ResAddr;
3755}
3756
3757llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3758                                          CodeGenFunction &CGF) const {
3759
3760  unsigned AllocatedGPR = 0, AllocatedVFP = 0;
3761  bool IsHA = false, IsSmallAggr = false;
3762  ABIArgInfo AI = classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR,
3763                                       IsSmallAggr, false /*IsNamedArg*/);
3764
3765  return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
3766                          AI.isIndirect(), CGF);
3767}
3768
3769llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3770                                           CodeGenFunction &CGF) const {
3771  // We do not support va_arg for aggregates or illegal vector types.
3772  // Lower VAArg here for these cases and use the LLVM va_arg instruction for
3773  // other cases.
3774  if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
3775    return nullptr;
3776
3777  uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
3778  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
3779
3780  const Type *Base = nullptr;
3781  bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
3782
3783  bool isIndirect = false;
3784  // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
3785  // be passed indirectly.
3786  if (Size > 16 && !isHA) {
3787    isIndirect = true;
3788    Size = 8;
3789    Align = 8;
3790  }
3791
3792  llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
3793  llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
3794
3795  CGBuilderTy &Builder = CGF.Builder;
3796  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3797  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3798
3799  if (isEmptyRecord(getContext(), Ty, true)) {
3800    // These are ignored for parameter passing purposes.
3801    llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3802    return Builder.CreateBitCast(Addr, PTy);
3803  }
3804
3805  const uint64_t MinABIAlign = 8;
3806  if (Align > MinABIAlign) {
3807    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
3808    Addr = Builder.CreateGEP(Addr, Offset);
3809    llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3810    llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
3811    llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
3812    Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
3813  }
3814
3815  uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
3816  llvm::Value *NextAddr = Builder.CreateGEP(
3817      Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
3818  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3819
3820  if (isIndirect)
3821    Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
3822  llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3823  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3824
3825  return AddrTyped;
3826}
3827
3828//===----------------------------------------------------------------------===//
3829// ARM ABI Implementation
3830//===----------------------------------------------------------------------===//
3831
3832namespace {
3833
3834class ARMABIInfo : public ABIInfo {
3835public:
3836  enum ABIKind {
3837    APCS = 0,
3838    AAPCS = 1,
3839    AAPCS_VFP
3840  };
3841
3842private:
3843  ABIKind Kind;
3844  mutable int VFPRegs[16];
3845  const unsigned NumVFPs;
3846  const unsigned NumGPRs;
3847  mutable unsigned AllocatedGPRs;
3848  mutable unsigned AllocatedVFPs;
3849
3850public:
3851  ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
3852    NumVFPs(16), NumGPRs(4) {
3853    setRuntimeCC();
3854    resetAllocatedRegs();
3855  }
3856
3857  bool isEABI() const {
3858    switch (getTarget().getTriple().getEnvironment()) {
3859    case llvm::Triple::Android:
3860    case llvm::Triple::EABI:
3861    case llvm::Triple::EABIHF:
3862    case llvm::Triple::GNUEABI:
3863    case llvm::Triple::GNUEABIHF:
3864      return true;
3865    default:
3866      return false;
3867    }
3868  }
3869
3870  bool isEABIHF() const {
3871    switch (getTarget().getTriple().getEnvironment()) {
3872    case llvm::Triple::EABIHF:
3873    case llvm::Triple::GNUEABIHF:
3874      return true;
3875    default:
3876      return false;
3877    }
3878  }
3879
3880  ABIKind getABIKind() const { return Kind; }
3881
3882private:
3883  ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
3884  ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
3885                                  bool &IsCPRC) const;
3886  bool isIllegalVectorType(QualType Ty) const;
3887
3888  void computeInfo(CGFunctionInfo &FI) const override;
3889
3890  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3891                         CodeGenFunction &CGF) const override;
3892
3893  llvm::CallingConv::ID getLLVMDefaultCC() const;
3894  llvm::CallingConv::ID getABIDefaultCC() const;
3895  void setRuntimeCC();
3896
3897  void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
3898  void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
3899  void resetAllocatedRegs(void) const;
3900};
3901
3902class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
3903public:
3904  ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
3905    :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
3906
3907  const ARMABIInfo &getABIInfo() const {
3908    return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
3909  }
3910
3911  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3912    return 13;
3913  }
3914
3915  StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
3916    return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
3917  }
3918
3919  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3920                               llvm::Value *Address) const override {
3921    llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
3922
3923    // 0-15 are the 16 integer registers.
3924    AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
3925    return false;
3926  }
3927
3928  unsigned getSizeOfUnwindException() const override {
3929    if (getABIInfo().isEABI()) return 88;
3930    return TargetCodeGenInfo::getSizeOfUnwindException();
3931  }
3932
3933  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3934                           CodeGen::CodeGenModule &CGM) const override {
3935    const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
3936    if (!FD)
3937      return;
3938
3939    const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
3940    if (!Attr)
3941      return;
3942
3943    const char *Kind;
3944    switch (Attr->getInterrupt()) {
3945    case ARMInterruptAttr::Generic: Kind = ""; break;
3946    case ARMInterruptAttr::IRQ:     Kind = "IRQ"; break;
3947    case ARMInterruptAttr::FIQ:     Kind = "FIQ"; break;
3948    case ARMInterruptAttr::SWI:     Kind = "SWI"; break;
3949    case ARMInterruptAttr::ABORT:   Kind = "ABORT"; break;
3950    case ARMInterruptAttr::UNDEF:   Kind = "UNDEF"; break;
3951    }
3952
3953    llvm::Function *Fn = cast<llvm::Function>(GV);
3954
3955    Fn->addFnAttr("interrupt", Kind);
3956
3957    if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
3958      return;
3959
3960    // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
3961    // however this is not necessarily true on taking any interrupt. Instruct
3962    // the backend to perform a realignment as part of the function prologue.
3963    llvm::AttrBuilder B;
3964    B.addStackAlignmentAttr(8);
3965    Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
3966                      llvm::AttributeSet::get(CGM.getLLVMContext(),
3967                                              llvm::AttributeSet::FunctionIndex,
3968                                              B));
3969  }
3970
3971};
3972
3973}
3974
3975void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3976  // To correctly handle Homogeneous Aggregate, we need to keep track of the
3977  // VFP registers allocated so far.
3978  // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3979  // VFP registers of the appropriate type unallocated then the argument is
3980  // allocated to the lowest-numbered sequence of such registers.
3981  // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3982  // unallocated are marked as unavailable.
3983  resetAllocatedRegs();
3984
3985  if (getCXXABI().classifyReturnType(FI)) {
3986    if (FI.getReturnInfo().isIndirect())
3987      markAllocatedGPRs(1, 1);
3988  } else {
3989    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
3990  }
3991  for (auto &I : FI.arguments()) {
3992    unsigned PreAllocationVFPs = AllocatedVFPs;
3993    unsigned PreAllocationGPRs = AllocatedGPRs;
3994    bool IsCPRC = false;
3995    // 6.1.2.3 There is one VFP co-processor register class using registers
3996    // s0-s15 (d0-d7) for passing arguments.
3997    I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
3998
3999    // If we have allocated some arguments onto the stack (due to running
4000    // out of VFP registers), we cannot split an argument between GPRs and
4001    // the stack. If this situation occurs, we add padding to prevent the
4002    // GPRs from being used. In this situation, the current argument could
4003    // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
4004    // unusable anyway.
4005    const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
4006    if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && StackUsed) {
4007      llvm::Type *PaddingTy = llvm::ArrayType::get(
4008          llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
4009      if (I.info.canHaveCoerceToType()) {
4010        I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */, 0 /* offset */,
4011                                       PaddingTy);
4012      } else {
4013        I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
4014                                       PaddingTy);
4015      }
4016    }
4017  }
4018
4019  // Always honor user-specified calling convention.
4020  if (FI.getCallingConvention() != llvm::CallingConv::C)
4021    return;
4022
4023  llvm::CallingConv::ID cc = getRuntimeCC();
4024  if (cc != llvm::CallingConv::C)
4025    FI.setEffectiveCallingConvention(cc);
4026}
4027
4028/// Return the default calling convention that LLVM will use.
4029llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4030  // The default calling convention that LLVM will infer.
4031  if (isEABIHF())
4032    return llvm::CallingConv::ARM_AAPCS_VFP;
4033  else if (isEABI())
4034    return llvm::CallingConv::ARM_AAPCS;
4035  else
4036    return llvm::CallingConv::ARM_APCS;
4037}
4038
4039/// Return the calling convention that our ABI would like us to use
4040/// as the C calling convention.
4041llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4042  switch (getABIKind()) {
4043  case APCS: return llvm::CallingConv::ARM_APCS;
4044  case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4045  case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4046  }
4047  llvm_unreachable("bad ABI kind");
4048}
4049
4050void ARMABIInfo::setRuntimeCC() {
4051  assert(getRuntimeCC() == llvm::CallingConv::C);
4052
4053  // Don't muddy up the IR with a ton of explicit annotations if
4054  // they'd just match what LLVM will infer from the triple.
4055  llvm::CallingConv::ID abiCC = getABIDefaultCC();
4056  if (abiCC != getLLVMDefaultCC())
4057    RuntimeCC = abiCC;
4058}
4059
4060/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
4061/// aggregate.  If HAMembers is non-null, the number of base elements
4062/// contained in the type is returned through it; this is used for the
4063/// recursive calls that check aggregate component types.
4064static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
4065                                   ASTContext &Context, uint64_t *HAMembers) {
4066  uint64_t Members = 0;
4067  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
4068    if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
4069      return false;
4070    Members *= AT->getSize().getZExtValue();
4071  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4072    const RecordDecl *RD = RT->getDecl();
4073    if (RD->hasFlexibleArrayMember())
4074      return false;
4075
4076    Members = 0;
4077    for (const auto *FD : RD->fields()) {
4078      uint64_t FldMembers;
4079      if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
4080        return false;
4081
4082      Members = (RD->isUnion() ?
4083                 std::max(Members, FldMembers) : Members + FldMembers);
4084    }
4085  } else {
4086    Members = 1;
4087    if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4088      Members = 2;
4089      Ty = CT->getElementType();
4090    }
4091
4092    // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4093    // double, or 64-bit or 128-bit vectors.
4094    if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4095      if (BT->getKind() != BuiltinType::Float &&
4096          BT->getKind() != BuiltinType::Double &&
4097          BT->getKind() != BuiltinType::LongDouble)
4098        return false;
4099    } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4100      unsigned VecSize = Context.getTypeSize(VT);
4101      if (VecSize != 64 && VecSize != 128)
4102        return false;
4103    } else {
4104      return false;
4105    }
4106
4107    // The base type must be the same for all members.  Vector types of the
4108    // same total size are treated as being equivalent here.
4109    const Type *TyPtr = Ty.getTypePtr();
4110    if (!Base)
4111      Base = TyPtr;
4112
4113    if (Base != TyPtr) {
4114      // Homogeneous aggregates are defined as containing members with the
4115      // same machine type. There are two cases in which two members have
4116      // different TypePtrs but the same machine type:
4117
4118      // 1) Vectors of the same length, regardless of the type and number
4119      //    of their members.
4120      const bool SameLengthVectors = Base->isVectorType() && TyPtr->isVectorType()
4121        && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4122
4123      // 2) In the 32-bit AAPCS, `double' and `long double' have the same
4124      //    machine type. This is not the case for the 64-bit AAPCS.
4125      const bool SameSizeDoubles =
4126           (   (   Base->isSpecificBuiltinType(BuiltinType::Double)
4127                && TyPtr->isSpecificBuiltinType(BuiltinType::LongDouble))
4128            || (   Base->isSpecificBuiltinType(BuiltinType::LongDouble)
4129                && TyPtr->isSpecificBuiltinType(BuiltinType::Double)))
4130        && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4131
4132      if (!SameLengthVectors && !SameSizeDoubles)
4133        return false;
4134    }
4135  }
4136
4137  // Homogeneous Aggregates can have at most 4 members of the base type.
4138  if (HAMembers)
4139    *HAMembers = Members;
4140
4141  return (Members > 0 && Members <= 4);
4142}
4143
4144/// markAllocatedVFPs - update VFPRegs according to the alignment and
4145/// number of VFP registers (unit is S register) requested.
4146void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4147                                   unsigned NumRequired) const {
4148  // Early Exit.
4149  if (AllocatedVFPs >= 16) {
4150    // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4151    // the stack.
4152    AllocatedVFPs = 17;
4153    return;
4154  }
4155  // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4156  // VFP registers of the appropriate type unallocated then the argument is
4157  // allocated to the lowest-numbered sequence of such registers.
4158  for (unsigned I = 0; I < 16; I += Alignment) {
4159    bool FoundSlot = true;
4160    for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4161      if (J >= 16 || VFPRegs[J]) {
4162         FoundSlot = false;
4163         break;
4164      }
4165    if (FoundSlot) {
4166      for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4167        VFPRegs[J] = 1;
4168      AllocatedVFPs += NumRequired;
4169      return;
4170    }
4171  }
4172  // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4173  // unallocated are marked as unavailable.
4174  for (unsigned I = 0; I < 16; I++)
4175    VFPRegs[I] = 1;
4176  AllocatedVFPs = 17; // We do not have enough VFP registers.
4177}
4178
4179/// Update AllocatedGPRs to record the number of general purpose registers
4180/// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4181/// this represents arguments being stored on the stack.
4182void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4183                                   unsigned NumRequired) const {
4184  assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4185
4186  if (Alignment == 2 && AllocatedGPRs & 0x1)
4187    AllocatedGPRs += 1;
4188
4189  AllocatedGPRs += NumRequired;
4190}
4191
4192void ARMABIInfo::resetAllocatedRegs(void) const {
4193  AllocatedGPRs = 0;
4194  AllocatedVFPs = 0;
4195  for (unsigned i = 0; i < NumVFPs; ++i)
4196    VFPRegs[i] = 0;
4197}
4198
4199ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
4200                                            bool &IsCPRC) const {
4201  // We update number of allocated VFPs according to
4202  // 6.1.2.1 The following argument types are VFP CPRCs:
4203  //   A single-precision floating-point type (including promoted
4204  //   half-precision types); A double-precision floating-point type;
4205  //   A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4206  //   with a Base Type of a single- or double-precision floating-point type,
4207  //   64-bit containerized vectors or 128-bit containerized vectors with one
4208  //   to four Elements.
4209
4210  // Handle illegal vector types here.
4211  if (isIllegalVectorType(Ty)) {
4212    uint64_t Size = getContext().getTypeSize(Ty);
4213    if (Size <= 32) {
4214      llvm::Type *ResType =
4215          llvm::Type::getInt32Ty(getVMContext());
4216      markAllocatedGPRs(1, 1);
4217      return ABIArgInfo::getDirect(ResType);
4218    }
4219    if (Size == 64) {
4220      llvm::Type *ResType = llvm::VectorType::get(
4221          llvm::Type::getInt32Ty(getVMContext()), 2);
4222      if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4223        markAllocatedGPRs(2, 2);
4224      } else {
4225        markAllocatedVFPs(2, 2);
4226        IsCPRC = true;
4227      }
4228      return ABIArgInfo::getDirect(ResType);
4229    }
4230    if (Size == 128) {
4231      llvm::Type *ResType = llvm::VectorType::get(
4232          llvm::Type::getInt32Ty(getVMContext()), 4);
4233      if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4234        markAllocatedGPRs(2, 4);
4235      } else {
4236        markAllocatedVFPs(4, 4);
4237        IsCPRC = true;
4238      }
4239      return ABIArgInfo::getDirect(ResType);
4240    }
4241    markAllocatedGPRs(1, 1);
4242    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4243  }
4244  // Update VFPRegs for legal vector types.
4245  if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4246    if (const VectorType *VT = Ty->getAs<VectorType>()) {
4247      uint64_t Size = getContext().getTypeSize(VT);
4248      // Size of a legal vector should be power of 2 and above 64.
4249      markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4250      IsCPRC = true;
4251    }
4252  }
4253  // Update VFPRegs for floating point types.
4254  if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4255    if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4256      if (BT->getKind() == BuiltinType::Half ||
4257          BT->getKind() == BuiltinType::Float) {
4258        markAllocatedVFPs(1, 1);
4259        IsCPRC = true;
4260      }
4261      if (BT->getKind() == BuiltinType::Double ||
4262          BT->getKind() == BuiltinType::LongDouble) {
4263        markAllocatedVFPs(2, 2);
4264        IsCPRC = true;
4265      }
4266    }
4267  }
4268
4269  if (!isAggregateTypeForABI(Ty)) {
4270    // Treat an enum type as its underlying type.
4271    if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4272      Ty = EnumTy->getDecl()->getIntegerType();
4273    }
4274
4275    unsigned Size = getContext().getTypeSize(Ty);
4276    if (!IsCPRC)
4277      markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
4278    return (Ty->isPromotableIntegerType() ?
4279            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4280  }
4281
4282  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4283    markAllocatedGPRs(1, 1);
4284    return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4285  }
4286
4287  // Ignore empty records.
4288  if (isEmptyRecord(getContext(), Ty, true))
4289    return ABIArgInfo::getIgnore();
4290
4291  if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4292    // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4293    // into VFP registers.
4294    const Type *Base = nullptr;
4295    uint64_t Members = 0;
4296    if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
4297      assert(Base && "Base class should be set for homogeneous aggregate");
4298      // Base can be a floating-point or a vector.
4299      if (Base->isVectorType()) {
4300        // ElementSize is in number of floats.
4301        unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
4302        markAllocatedVFPs(ElementSize,
4303                          Members * ElementSize);
4304      } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
4305        markAllocatedVFPs(1, Members);
4306      else {
4307        assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4308               Base->isSpecificBuiltinType(BuiltinType::LongDouble));
4309        markAllocatedVFPs(2, Members * 2);
4310      }
4311      IsCPRC = true;
4312      return ABIArgInfo::getDirect();
4313    }
4314  }
4315
4316  // Support byval for ARM.
4317  // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4318  // most 8-byte. We realign the indirect argument if type alignment is bigger
4319  // than ABI alignment.
4320  uint64_t ABIAlign = 4;
4321  uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4322  if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4323      getABIKind() == ARMABIInfo::AAPCS)
4324    ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4325  if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4326    // Update Allocated GPRs. Since this is only used when the size of the
4327    // argument is greater than 64 bytes, this will always use up any available
4328    // registers (of which there are 4). We also don't care about getting the
4329    // alignment right, because general-purpose registers cannot be back-filled.
4330    markAllocatedGPRs(1, 4);
4331    return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
4332           /*Realign=*/TyAlign > ABIAlign);
4333  }
4334
4335  // Otherwise, pass by coercing to a structure of the appropriate size.
4336  llvm::Type* ElemTy;
4337  unsigned SizeRegs;
4338  // FIXME: Try to match the types of the arguments more accurately where
4339  // we can.
4340  if (getContext().getTypeAlign(Ty) <= 32) {
4341    ElemTy = llvm::Type::getInt32Ty(getVMContext());
4342    SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4343    markAllocatedGPRs(1, SizeRegs);
4344  } else {
4345    ElemTy = llvm::Type::getInt64Ty(getVMContext());
4346    SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4347    markAllocatedGPRs(2, SizeRegs * 2);
4348  }
4349
4350  llvm::Type *STy =
4351    llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
4352  return ABIArgInfo::getDirect(STy);
4353}
4354
4355static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4356                              llvm::LLVMContext &VMContext) {
4357  // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4358  // is called integer-like if its size is less than or equal to one word, and
4359  // the offset of each of its addressable sub-fields is zero.
4360
4361  uint64_t Size = Context.getTypeSize(Ty);
4362
4363  // Check that the type fits in a word.
4364  if (Size > 32)
4365    return false;
4366
4367  // FIXME: Handle vector types!
4368  if (Ty->isVectorType())
4369    return false;
4370
4371  // Float types are never treated as "integer like".
4372  if (Ty->isRealFloatingType())
4373    return false;
4374
4375  // If this is a builtin or pointer type then it is ok.
4376  if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4377    return true;
4378
4379  // Small complex integer types are "integer like".
4380  if (const ComplexType *CT = Ty->getAs<ComplexType>())
4381    return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4382
4383  // Single element and zero sized arrays should be allowed, by the definition
4384  // above, but they are not.
4385
4386  // Otherwise, it must be a record type.
4387  const RecordType *RT = Ty->getAs<RecordType>();
4388  if (!RT) return false;
4389
4390  // Ignore records with flexible arrays.
4391  const RecordDecl *RD = RT->getDecl();
4392  if (RD->hasFlexibleArrayMember())
4393    return false;
4394
4395  // Check that all sub-fields are at offset 0, and are themselves "integer
4396  // like".
4397  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4398
4399  bool HadField = false;
4400  unsigned idx = 0;
4401  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4402       i != e; ++i, ++idx) {
4403    const FieldDecl *FD = *i;
4404
4405    // Bit-fields are not addressable, we only need to verify they are "integer
4406    // like". We still have to disallow a subsequent non-bitfield, for example:
4407    //   struct { int : 0; int x }
4408    // is non-integer like according to gcc.
4409    if (FD->isBitField()) {
4410      if (!RD->isUnion())
4411        HadField = true;
4412
4413      if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4414        return false;
4415
4416      continue;
4417    }
4418
4419    // Check if this field is at offset 0.
4420    if (Layout.getFieldOffset(idx) != 0)
4421      return false;
4422
4423    if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4424      return false;
4425
4426    // Only allow at most one field in a structure. This doesn't match the
4427    // wording above, but follows gcc in situations with a field following an
4428    // empty structure.
4429    if (!RD->isUnion()) {
4430      if (HadField)
4431        return false;
4432
4433      HadField = true;
4434    }
4435  }
4436
4437  return true;
4438}
4439
4440ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4441                                          bool isVariadic) const {
4442  if (RetTy->isVoidType())
4443    return ABIArgInfo::getIgnore();
4444
4445  // Large vector types should be returned via memory.
4446  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4447    markAllocatedGPRs(1, 1);
4448    return ABIArgInfo::getIndirect(0);
4449  }
4450
4451  if (!isAggregateTypeForABI(RetTy)) {
4452    // Treat an enum type as its underlying type.
4453    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4454      RetTy = EnumTy->getDecl()->getIntegerType();
4455
4456    return (RetTy->isPromotableIntegerType() ?
4457            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4458  }
4459
4460  // Are we following APCS?
4461  if (getABIKind() == APCS) {
4462    if (isEmptyRecord(getContext(), RetTy, false))
4463      return ABIArgInfo::getIgnore();
4464
4465    // Complex types are all returned as packed integers.
4466    //
4467    // FIXME: Consider using 2 x vector types if the back end handles them
4468    // correctly.
4469    if (RetTy->isAnyComplexType())
4470      return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
4471                                              getContext().getTypeSize(RetTy)));
4472
4473    // Integer like structures are returned in r0.
4474    if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4475      // Return in the smallest viable integer type.
4476      uint64_t Size = getContext().getTypeSize(RetTy);
4477      if (Size <= 8)
4478        return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4479      if (Size <= 16)
4480        return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4481      return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4482    }
4483
4484    // Otherwise return in memory.
4485    markAllocatedGPRs(1, 1);
4486    return ABIArgInfo::getIndirect(0);
4487  }
4488
4489  // Otherwise this is an AAPCS variant.
4490
4491  if (isEmptyRecord(getContext(), RetTy, true))
4492    return ABIArgInfo::getIgnore();
4493
4494  // Check for homogeneous aggregates with AAPCS-VFP.
4495  if (getABIKind() == AAPCS_VFP && !isVariadic) {
4496    const Type *Base = nullptr;
4497    if (isHomogeneousAggregate(RetTy, Base, getContext())) {
4498      assert(Base && "Base class should be set for homogeneous aggregate");
4499      // Homogeneous Aggregates are returned directly.
4500      return ABIArgInfo::getDirect();
4501    }
4502  }
4503
4504  // Aggregates <= 4 bytes are returned in r0; other aggregates
4505  // are returned indirectly.
4506  uint64_t Size = getContext().getTypeSize(RetTy);
4507  if (Size <= 32) {
4508    if (getDataLayout().isBigEndian())
4509      // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
4510      return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4511
4512    // Return in the smallest viable integer type.
4513    if (Size <= 8)
4514      return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4515    if (Size <= 16)
4516      return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4517    return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4518  }
4519
4520  markAllocatedGPRs(1, 1);
4521  return ABIArgInfo::getIndirect(0);
4522}
4523
4524/// isIllegalVector - check whether Ty is an illegal vector type.
4525bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4526  if (const VectorType *VT = Ty->getAs<VectorType>()) {
4527    // Check whether VT is legal.
4528    unsigned NumElements = VT->getNumElements();
4529    // NumElements should be power of 2.
4530    if (((NumElements & (NumElements - 1)) != 0) && NumElements != 3)
4531      return true;
4532  }
4533  return false;
4534}
4535
4536llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4537                                   CodeGenFunction &CGF) const {
4538  llvm::Type *BP = CGF.Int8PtrTy;
4539  llvm::Type *BPP = CGF.Int8PtrPtrTy;
4540
4541  CGBuilderTy &Builder = CGF.Builder;
4542  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4543  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4544
4545  if (isEmptyRecord(getContext(), Ty, true)) {
4546    // These are ignored for parameter passing purposes.
4547    llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4548    return Builder.CreateBitCast(Addr, PTy);
4549  }
4550
4551  uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4552  uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4553  bool IsIndirect = false;
4554
4555  // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4556  // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
4557  if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4558      getABIKind() == ARMABIInfo::AAPCS)
4559    TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4560  else
4561    TyAlign = 4;
4562  // Use indirect if size of the illegal vector is bigger than 32 bytes.
4563  if (isIllegalVectorType(Ty) && Size > 32) {
4564    IsIndirect = true;
4565    Size = 4;
4566    TyAlign = 4;
4567  }
4568
4569  // Handle address alignment for ABI alignment > 4 bytes.
4570  if (TyAlign > 4) {
4571    assert((TyAlign & (TyAlign - 1)) == 0 &&
4572           "Alignment is not power of 2!");
4573    llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
4574    AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
4575    AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
4576    Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
4577  }
4578
4579  uint64_t Offset =
4580    llvm::RoundUpToAlignment(Size, 4);
4581  llvm::Value *NextAddr =
4582    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
4583                      "ap.next");
4584  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4585
4586  if (IsIndirect)
4587    Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4588  else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
4589    // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
4590    // may not be correctly aligned for the vector type. We create an aligned
4591    // temporary space and copy the content over from ap.cur to the temporary
4592    // space. This is necessary if the natural alignment of the type is greater
4593    // than the ABI alignment.
4594    llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
4595    CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
4596    llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
4597                                                    "var.align");
4598    llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
4599    llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
4600    Builder.CreateMemCpy(Dst, Src,
4601        llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
4602        TyAlign, false);
4603    Addr = AlignedTemp; //The content is in aligned location.
4604  }
4605  llvm::Type *PTy =
4606    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4607  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4608
4609  return AddrTyped;
4610}
4611
4612namespace {
4613
4614class NaClARMABIInfo : public ABIInfo {
4615 public:
4616  NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4617      : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
4618  void computeInfo(CGFunctionInfo &FI) const override;
4619  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4620                         CodeGenFunction &CGF) const override;
4621 private:
4622  PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
4623  ARMABIInfo NInfo; // Used for everything else.
4624};
4625
4626class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo  {
4627 public:
4628  NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4629      : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
4630};
4631
4632}
4633
4634void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4635  if (FI.getASTCallingConvention() == CC_PnaclCall)
4636    PInfo.computeInfo(FI);
4637  else
4638    static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
4639}
4640
4641llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4642                                       CodeGenFunction &CGF) const {
4643  // Always use the native convention; calling pnacl-style varargs functions
4644  // is unsupported.
4645  return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
4646}
4647
4648//===----------------------------------------------------------------------===//
4649// NVPTX ABI Implementation
4650//===----------------------------------------------------------------------===//
4651
4652namespace {
4653
4654class NVPTXABIInfo : public ABIInfo {
4655public:
4656  NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4657
4658  ABIArgInfo classifyReturnType(QualType RetTy) const;
4659  ABIArgInfo classifyArgumentType(QualType Ty) const;
4660
4661  void computeInfo(CGFunctionInfo &FI) const override;
4662  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4663                         CodeGenFunction &CFG) const override;
4664};
4665
4666class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
4667public:
4668  NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4669    : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
4670
4671  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4672                           CodeGen::CodeGenModule &M) const override;
4673private:
4674  // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
4675  // resulting MDNode to the nvvm.annotations MDNode.
4676  static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
4677};
4678
4679ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
4680  if (RetTy->isVoidType())
4681    return ABIArgInfo::getIgnore();
4682
4683  // note: this is different from default ABI
4684  if (!RetTy->isScalarType())
4685    return ABIArgInfo::getDirect();
4686
4687  // Treat an enum type as its underlying type.
4688  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4689    RetTy = EnumTy->getDecl()->getIntegerType();
4690
4691  return (RetTy->isPromotableIntegerType() ?
4692          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4693}
4694
4695ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
4696  // Treat an enum type as its underlying type.
4697  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4698    Ty = EnumTy->getDecl()->getIntegerType();
4699
4700  return (Ty->isPromotableIntegerType() ?
4701          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4702}
4703
4704void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
4705  if (!getCXXABI().classifyReturnType(FI))
4706    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4707  for (auto &I : FI.arguments())
4708    I.info = classifyArgumentType(I.type);
4709
4710  // Always honor user-specified calling convention.
4711  if (FI.getCallingConvention() != llvm::CallingConv::C)
4712    return;
4713
4714  FI.setEffectiveCallingConvention(getRuntimeCC());
4715}
4716
4717llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4718                                     CodeGenFunction &CFG) const {
4719  llvm_unreachable("NVPTX does not support varargs");
4720}
4721
4722void NVPTXTargetCodeGenInfo::
4723SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4724                    CodeGen::CodeGenModule &M) const{
4725  const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4726  if (!FD) return;
4727
4728  llvm::Function *F = cast<llvm::Function>(GV);
4729
4730  // Perform special handling in OpenCL mode
4731  if (M.getLangOpts().OpenCL) {
4732    // Use OpenCL function attributes to check for kernel functions
4733    // By default, all functions are device functions
4734    if (FD->hasAttr<OpenCLKernelAttr>()) {
4735      // OpenCL __kernel functions get kernel metadata
4736      // Create !{<func-ref>, metadata !"kernel", i32 1} node
4737      addNVVMMetadata(F, "kernel", 1);
4738      // And kernel functions are not subject to inlining
4739      F->addFnAttr(llvm::Attribute::NoInline);
4740    }
4741  }
4742
4743  // Perform special handling in CUDA mode.
4744  if (M.getLangOpts().CUDA) {
4745    // CUDA __global__ functions get a kernel metadata entry.  Since
4746    // __global__ functions cannot be called from the device, we do not
4747    // need to set the noinline attribute.
4748    if (FD->hasAttr<CUDAGlobalAttr>()) {
4749      // Create !{<func-ref>, metadata !"kernel", i32 1} node
4750      addNVVMMetadata(F, "kernel", 1);
4751    }
4752    if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
4753      // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
4754      addNVVMMetadata(F, "maxntidx",
4755                      FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
4756      // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
4757      // zero value from getMinBlocks either means it was not specified in
4758      // __launch_bounds__ or the user specified a 0 value. In both cases, we
4759      // don't have to add a PTX directive.
4760      int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
4761      if (MinCTASM > 0) {
4762        // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
4763        addNVVMMetadata(F, "minctasm", MinCTASM);
4764      }
4765    }
4766  }
4767}
4768
4769void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
4770                                             int Operand) {
4771  llvm::Module *M = F->getParent();
4772  llvm::LLVMContext &Ctx = M->getContext();
4773
4774  // Get "nvvm.annotations" metadata node
4775  llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4776
4777  llvm::Value *MDVals[] = {
4778      F, llvm::MDString::get(Ctx, Name),
4779      llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand)};
4780  // Append metadata to nvvm.annotations
4781  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4782}
4783}
4784
4785//===----------------------------------------------------------------------===//
4786// SystemZ ABI Implementation
4787//===----------------------------------------------------------------------===//
4788
4789namespace {
4790
4791class SystemZABIInfo : public ABIInfo {
4792public:
4793  SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4794
4795  bool isPromotableIntegerType(QualType Ty) const;
4796  bool isCompoundType(QualType Ty) const;
4797  bool isFPArgumentType(QualType Ty) const;
4798
4799  ABIArgInfo classifyReturnType(QualType RetTy) const;
4800  ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4801
4802  void computeInfo(CGFunctionInfo &FI) const override {
4803    if (!getCXXABI().classifyReturnType(FI))
4804      FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4805    for (auto &I : FI.arguments())
4806      I.info = classifyArgumentType(I.type);
4807  }
4808
4809  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4810                         CodeGenFunction &CGF) const override;
4811};
4812
4813class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4814public:
4815  SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4816    : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
4817};
4818
4819}
4820
4821bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
4822  // Treat an enum type as its underlying type.
4823  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4824    Ty = EnumTy->getDecl()->getIntegerType();
4825
4826  // Promotable integer types are required to be promoted by the ABI.
4827  if (Ty->isPromotableIntegerType())
4828    return true;
4829
4830  // 32-bit values must also be promoted.
4831  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4832    switch (BT->getKind()) {
4833    case BuiltinType::Int:
4834    case BuiltinType::UInt:
4835      return true;
4836    default:
4837      return false;
4838    }
4839  return false;
4840}
4841
4842bool SystemZABIInfo::isCompoundType(QualType Ty) const {
4843  return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
4844}
4845
4846bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
4847  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4848    switch (BT->getKind()) {
4849    case BuiltinType::Float:
4850    case BuiltinType::Double:
4851      return true;
4852    default:
4853      return false;
4854    }
4855
4856  if (const RecordType *RT = Ty->getAsStructureType()) {
4857    const RecordDecl *RD = RT->getDecl();
4858    bool Found = false;
4859
4860    // If this is a C++ record, check the bases first.
4861    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
4862      for (const auto &I : CXXRD->bases()) {
4863        QualType Base = I.getType();
4864
4865        // Empty bases don't affect things either way.
4866        if (isEmptyRecord(getContext(), Base, true))
4867          continue;
4868
4869        if (Found)
4870          return false;
4871        Found = isFPArgumentType(Base);
4872        if (!Found)
4873          return false;
4874      }
4875
4876    // Check the fields.
4877    for (const auto *FD : RD->fields()) {
4878      // Empty bitfields don't affect things either way.
4879      // Unlike isSingleElementStruct(), empty structure and array fields
4880      // do count.  So do anonymous bitfields that aren't zero-sized.
4881      if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4882        return true;
4883
4884      // Unlike isSingleElementStruct(), arrays do not count.
4885      // Nested isFPArgumentType structures still do though.
4886      if (Found)
4887        return false;
4888      Found = isFPArgumentType(FD->getType());
4889      if (!Found)
4890        return false;
4891    }
4892
4893    // Unlike isSingleElementStruct(), trailing padding is allowed.
4894    // An 8-byte aligned struct s { float f; } is passed as a double.
4895    return Found;
4896  }
4897
4898  return false;
4899}
4900
4901llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4902                                       CodeGenFunction &CGF) const {
4903  // Assume that va_list type is correct; should be pointer to LLVM type:
4904  // struct {
4905  //   i64 __gpr;
4906  //   i64 __fpr;
4907  //   i8 *__overflow_arg_area;
4908  //   i8 *__reg_save_area;
4909  // };
4910
4911  // Every argument occupies 8 bytes and is passed by preference in either
4912  // GPRs or FPRs.
4913  Ty = CGF.getContext().getCanonicalType(Ty);
4914  ABIArgInfo AI = classifyArgumentType(Ty);
4915  bool InFPRs = isFPArgumentType(Ty);
4916
4917  llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4918  bool IsIndirect = AI.isIndirect();
4919  unsigned UnpaddedBitSize;
4920  if (IsIndirect) {
4921    APTy = llvm::PointerType::getUnqual(APTy);
4922    UnpaddedBitSize = 64;
4923  } else
4924    UnpaddedBitSize = getContext().getTypeSize(Ty);
4925  unsigned PaddedBitSize = 64;
4926  assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
4927
4928  unsigned PaddedSize = PaddedBitSize / 8;
4929  unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
4930
4931  unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
4932  if (InFPRs) {
4933    MaxRegs = 4; // Maximum of 4 FPR arguments
4934    RegCountField = 1; // __fpr
4935    RegSaveIndex = 16; // save offset for f0
4936    RegPadding = 0; // floats are passed in the high bits of an FPR
4937  } else {
4938    MaxRegs = 5; // Maximum of 5 GPR arguments
4939    RegCountField = 0; // __gpr
4940    RegSaveIndex = 2; // save offset for r2
4941    RegPadding = Padding; // values are passed in the low bits of a GPR
4942  }
4943
4944  llvm::Value *RegCountPtr =
4945    CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
4946  llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
4947  llvm::Type *IndexTy = RegCount->getType();
4948  llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
4949  llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
4950                                                 "fits_in_regs");
4951
4952  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4953  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4954  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4955  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4956
4957  // Emit code to load the value if it was passed in registers.
4958  CGF.EmitBlock(InRegBlock);
4959
4960  // Work out the address of an argument register.
4961  llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
4962  llvm::Value *ScaledRegCount =
4963    CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
4964  llvm::Value *RegBase =
4965    llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
4966  llvm::Value *RegOffset =
4967    CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
4968  llvm::Value *RegSaveAreaPtr =
4969    CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
4970  llvm::Value *RegSaveArea =
4971    CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
4972  llvm::Value *RawRegAddr =
4973    CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
4974  llvm::Value *RegAddr =
4975    CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
4976
4977  // Update the register count
4978  llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
4979  llvm::Value *NewRegCount =
4980    CGF.Builder.CreateAdd(RegCount, One, "reg_count");
4981  CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
4982  CGF.EmitBranch(ContBlock);
4983
4984  // Emit code to load the value if it was passed in memory.
4985  CGF.EmitBlock(InMemBlock);
4986
4987  // Work out the address of a stack argument.
4988  llvm::Value *OverflowArgAreaPtr =
4989    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
4990  llvm::Value *OverflowArgArea =
4991    CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
4992  llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
4993  llvm::Value *RawMemAddr =
4994    CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
4995  llvm::Value *MemAddr =
4996    CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
4997
4998  // Update overflow_arg_area_ptr pointer
4999  llvm::Value *NewOverflowArgArea =
5000    CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5001  CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5002  CGF.EmitBranch(ContBlock);
5003
5004  // Return the appropriate result.
5005  CGF.EmitBlock(ContBlock);
5006  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5007  ResAddr->addIncoming(RegAddr, InRegBlock);
5008  ResAddr->addIncoming(MemAddr, InMemBlock);
5009
5010  if (IsIndirect)
5011    return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5012
5013  return ResAddr;
5014}
5015
5016ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5017  if (RetTy->isVoidType())
5018    return ABIArgInfo::getIgnore();
5019  if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5020    return ABIArgInfo::getIndirect(0);
5021  return (isPromotableIntegerType(RetTy) ?
5022          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5023}
5024
5025ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5026  // Handle the generic C++ ABI.
5027  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5028    return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5029
5030  // Integers and enums are extended to full register width.
5031  if (isPromotableIntegerType(Ty))
5032    return ABIArgInfo::getExtend();
5033
5034  // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5035  uint64_t Size = getContext().getTypeSize(Ty);
5036  if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5037    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5038
5039  // Handle small structures.
5040  if (const RecordType *RT = Ty->getAs<RecordType>()) {
5041    // Structures with flexible arrays have variable length, so really
5042    // fail the size test above.
5043    const RecordDecl *RD = RT->getDecl();
5044    if (RD->hasFlexibleArrayMember())
5045      return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5046
5047    // The structure is passed as an unextended integer, a float, or a double.
5048    llvm::Type *PassTy;
5049    if (isFPArgumentType(Ty)) {
5050      assert(Size == 32 || Size == 64);
5051      if (Size == 32)
5052        PassTy = llvm::Type::getFloatTy(getVMContext());
5053      else
5054        PassTy = llvm::Type::getDoubleTy(getVMContext());
5055    } else
5056      PassTy = llvm::IntegerType::get(getVMContext(), Size);
5057    return ABIArgInfo::getDirect(PassTy);
5058  }
5059
5060  // Non-structure compounds are passed indirectly.
5061  if (isCompoundType(Ty))
5062    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5063
5064  return ABIArgInfo::getDirect(nullptr);
5065}
5066
5067//===----------------------------------------------------------------------===//
5068// MSP430 ABI Implementation
5069//===----------------------------------------------------------------------===//
5070
5071namespace {
5072
5073class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5074public:
5075  MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5076    : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5077  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5078                           CodeGen::CodeGenModule &M) const override;
5079};
5080
5081}
5082
5083void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5084                                                  llvm::GlobalValue *GV,
5085                                             CodeGen::CodeGenModule &M) const {
5086  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5087    if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5088      // Handle 'interrupt' attribute:
5089      llvm::Function *F = cast<llvm::Function>(GV);
5090
5091      // Step 1: Set ISR calling convention.
5092      F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5093
5094      // Step 2: Add attributes goodness.
5095      F->addFnAttr(llvm::Attribute::NoInline);
5096
5097      // Step 3: Emit ISR vector alias.
5098      unsigned Num = attr->getNumber() / 2;
5099      llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5100                                "__isr_" + Twine(Num), F);
5101    }
5102  }
5103}
5104
5105//===----------------------------------------------------------------------===//
5106// MIPS ABI Implementation.  This works for both little-endian and
5107// big-endian variants.
5108//===----------------------------------------------------------------------===//
5109
5110namespace {
5111class MipsABIInfo : public ABIInfo {
5112  bool IsO32;
5113  unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5114  void CoerceToIntArgs(uint64_t TySize,
5115                       SmallVectorImpl<llvm::Type *> &ArgList) const;
5116  llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5117  llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5118  llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5119public:
5120  MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5121    ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5122    StackAlignInBytes(IsO32 ? 8 : 16) {}
5123
5124  ABIArgInfo classifyReturnType(QualType RetTy) const;
5125  ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5126  void computeInfo(CGFunctionInfo &FI) const override;
5127  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5128                         CodeGenFunction &CGF) const override;
5129};
5130
5131class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5132  unsigned SizeOfUnwindException;
5133public:
5134  MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5135    : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5136      SizeOfUnwindException(IsO32 ? 24 : 32) {}
5137
5138  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5139    return 29;
5140  }
5141
5142  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5143                           CodeGen::CodeGenModule &CGM) const override {
5144    const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5145    if (!FD) return;
5146    llvm::Function *Fn = cast<llvm::Function>(GV);
5147    if (FD->hasAttr<Mips16Attr>()) {
5148      Fn->addFnAttr("mips16");
5149    }
5150    else if (FD->hasAttr<NoMips16Attr>()) {
5151      Fn->addFnAttr("nomips16");
5152    }
5153  }
5154
5155  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5156                               llvm::Value *Address) const override;
5157
5158  unsigned getSizeOfUnwindException() const override {
5159    return SizeOfUnwindException;
5160  }
5161};
5162}
5163
5164void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
5165                                  SmallVectorImpl<llvm::Type *> &ArgList) const {
5166  llvm::IntegerType *IntTy =
5167    llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5168
5169  // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5170  for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5171    ArgList.push_back(IntTy);
5172
5173  // If necessary, add one more integer type to ArgList.
5174  unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5175
5176  if (R)
5177    ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5178}
5179
5180// In N32/64, an aligned double precision floating point field is passed in
5181// a register.
5182llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5183  SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5184
5185  if (IsO32) {
5186    CoerceToIntArgs(TySize, ArgList);
5187    return llvm::StructType::get(getVMContext(), ArgList);
5188  }
5189
5190  if (Ty->isComplexType())
5191    return CGT.ConvertType(Ty);
5192
5193  const RecordType *RT = Ty->getAs<RecordType>();
5194
5195  // Unions/vectors are passed in integer registers.
5196  if (!RT || !RT->isStructureOrClassType()) {
5197    CoerceToIntArgs(TySize, ArgList);
5198    return llvm::StructType::get(getVMContext(), ArgList);
5199  }
5200
5201  const RecordDecl *RD = RT->getDecl();
5202  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5203  assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5204
5205  uint64_t LastOffset = 0;
5206  unsigned idx = 0;
5207  llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5208
5209  // Iterate over fields in the struct/class and check if there are any aligned
5210  // double fields.
5211  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5212       i != e; ++i, ++idx) {
5213    const QualType Ty = i->getType();
5214    const BuiltinType *BT = Ty->getAs<BuiltinType>();
5215
5216    if (!BT || BT->getKind() != BuiltinType::Double)
5217      continue;
5218
5219    uint64_t Offset = Layout.getFieldOffset(idx);
5220    if (Offset % 64) // Ignore doubles that are not aligned.
5221      continue;
5222
5223    // Add ((Offset - LastOffset) / 64) args of type i64.
5224    for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5225      ArgList.push_back(I64);
5226
5227    // Add double type.
5228    ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5229    LastOffset = Offset + 64;
5230  }
5231
5232  CoerceToIntArgs(TySize - LastOffset, IntArgList);
5233  ArgList.append(IntArgList.begin(), IntArgList.end());
5234
5235  return llvm::StructType::get(getVMContext(), ArgList);
5236}
5237
5238llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5239                                        uint64_t Offset) const {
5240  if (OrigOffset + MinABIStackAlignInBytes > Offset)
5241    return nullptr;
5242
5243  return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5244}
5245
5246ABIArgInfo
5247MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5248  uint64_t OrigOffset = Offset;
5249  uint64_t TySize = getContext().getTypeSize(Ty);
5250  uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5251
5252  Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5253                   (uint64_t)StackAlignInBytes);
5254  unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5255  Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5256
5257  if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5258    // Ignore empty aggregates.
5259    if (TySize == 0)
5260      return ABIArgInfo::getIgnore();
5261
5262    if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5263      Offset = OrigOffset + MinABIStackAlignInBytes;
5264      return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5265    }
5266
5267    // If we have reached here, aggregates are passed directly by coercing to
5268    // another structure type. Padding is inserted if the offset of the
5269    // aggregate is unaligned.
5270    return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5271                                 getPaddingType(OrigOffset, CurrOffset));
5272  }
5273
5274  // Treat an enum type as its underlying type.
5275  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5276    Ty = EnumTy->getDecl()->getIntegerType();
5277
5278  if (Ty->isPromotableIntegerType())
5279    return ABIArgInfo::getExtend();
5280
5281  return ABIArgInfo::getDirect(
5282      nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
5283}
5284
5285llvm::Type*
5286MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5287  const RecordType *RT = RetTy->getAs<RecordType>();
5288  SmallVector<llvm::Type*, 8> RTList;
5289
5290  if (RT && RT->isStructureOrClassType()) {
5291    const RecordDecl *RD = RT->getDecl();
5292    const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5293    unsigned FieldCnt = Layout.getFieldCount();
5294
5295    // N32/64 returns struct/classes in floating point registers if the
5296    // following conditions are met:
5297    // 1. The size of the struct/class is no larger than 128-bit.
5298    // 2. The struct/class has one or two fields all of which are floating
5299    //    point types.
5300    // 3. The offset of the first field is zero (this follows what gcc does).
5301    //
5302    // Any other composite results are returned in integer registers.
5303    //
5304    if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5305      RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5306      for (; b != e; ++b) {
5307        const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5308
5309        if (!BT || !BT->isFloatingPoint())
5310          break;
5311
5312        RTList.push_back(CGT.ConvertType(b->getType()));
5313      }
5314
5315      if (b == e)
5316        return llvm::StructType::get(getVMContext(), RTList,
5317                                     RD->hasAttr<PackedAttr>());
5318
5319      RTList.clear();
5320    }
5321  }
5322
5323  CoerceToIntArgs(Size, RTList);
5324  return llvm::StructType::get(getVMContext(), RTList);
5325}
5326
5327ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5328  uint64_t Size = getContext().getTypeSize(RetTy);
5329
5330  if (RetTy->isVoidType() || Size == 0)
5331    return ABIArgInfo::getIgnore();
5332
5333  if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5334    if (Size <= 128) {
5335      if (RetTy->isAnyComplexType())
5336        return ABIArgInfo::getDirect();
5337
5338      // O32 returns integer vectors in registers.
5339      if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
5340        return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5341
5342      if (!IsO32)
5343        return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5344    }
5345
5346    return ABIArgInfo::getIndirect(0);
5347  }
5348
5349  // Treat an enum type as its underlying type.
5350  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5351    RetTy = EnumTy->getDecl()->getIntegerType();
5352
5353  return (RetTy->isPromotableIntegerType() ?
5354          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5355}
5356
5357void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5358  ABIArgInfo &RetInfo = FI.getReturnInfo();
5359  if (!getCXXABI().classifyReturnType(FI))
5360    RetInfo = classifyReturnType(FI.getReturnType());
5361
5362  // Check if a pointer to an aggregate is passed as a hidden argument.
5363  uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5364
5365  for (auto &I : FI.arguments())
5366    I.info = classifyArgumentType(I.type, Offset);
5367}
5368
5369llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5370                                    CodeGenFunction &CGF) const {
5371  llvm::Type *BP = CGF.Int8PtrTy;
5372  llvm::Type *BPP = CGF.Int8PtrPtrTy;
5373
5374  CGBuilderTy &Builder = CGF.Builder;
5375  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5376  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5377  int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
5378  llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5379  llvm::Value *AddrTyped;
5380  unsigned PtrWidth = getTarget().getPointerWidth(0);
5381  llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5382
5383  if (TypeAlign > MinABIStackAlignInBytes) {
5384    llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5385    llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5386    llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5387    llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5388    llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5389    AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5390  }
5391  else
5392    AddrTyped = Builder.CreateBitCast(Addr, PTy);
5393
5394  llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5395  TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5396  uint64_t Offset =
5397    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
5398  llvm::Value *NextAddr =
5399    Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5400                      "ap.next");
5401  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5402
5403  return AddrTyped;
5404}
5405
5406bool
5407MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5408                                               llvm::Value *Address) const {
5409  // This information comes from gcc's implementation, which seems to
5410  // as canonical as it gets.
5411
5412  // Everything on MIPS is 4 bytes.  Double-precision FP registers
5413  // are aliased to pairs of single-precision FP registers.
5414  llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5415
5416  // 0-31 are the general purpose registers, $0 - $31.
5417  // 32-63 are the floating-point registers, $f0 - $f31.
5418  // 64 and 65 are the multiply/divide registers, $hi and $lo.
5419  // 66 is the (notional, I think) register for signal-handler return.
5420  AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5421
5422  // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5423  // They are one bit wide and ignored here.
5424
5425  // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5426  // (coprocessor 1 is the FP unit)
5427  // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5428  // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5429  // 176-181 are the DSP accumulator registers.
5430  AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5431  return false;
5432}
5433
5434//===----------------------------------------------------------------------===//
5435// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5436// Currently subclassed only to implement custom OpenCL C function attribute
5437// handling.
5438//===----------------------------------------------------------------------===//
5439
5440namespace {
5441
5442class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5443public:
5444  TCETargetCodeGenInfo(CodeGenTypes &CGT)
5445    : DefaultTargetCodeGenInfo(CGT) {}
5446
5447  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5448                           CodeGen::CodeGenModule &M) const override;
5449};
5450
5451void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5452                                               llvm::GlobalValue *GV,
5453                                               CodeGen::CodeGenModule &M) const {
5454  const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5455  if (!FD) return;
5456
5457  llvm::Function *F = cast<llvm::Function>(GV);
5458
5459  if (M.getLangOpts().OpenCL) {
5460    if (FD->hasAttr<OpenCLKernelAttr>()) {
5461      // OpenCL C Kernel functions are not subject to inlining
5462      F->addFnAttr(llvm::Attribute::NoInline);
5463      const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5464      if (Attr) {
5465        // Convert the reqd_work_group_size() attributes to metadata.
5466        llvm::LLVMContext &Context = F->getContext();
5467        llvm::NamedMDNode *OpenCLMetadata =
5468            M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5469
5470        SmallVector<llvm::Value*, 5> Operands;
5471        Operands.push_back(F);
5472
5473        Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5474                             llvm::APInt(32, Attr->getXDim())));
5475        Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5476                             llvm::APInt(32, Attr->getYDim())));
5477        Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5478                             llvm::APInt(32, Attr->getZDim())));
5479
5480        // Add a boolean constant operand for "required" (true) or "hint" (false)
5481        // for implementing the work_group_size_hint attr later. Currently
5482        // always true as the hint is not yet implemented.
5483        Operands.push_back(llvm::ConstantInt::getTrue(Context));
5484        OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5485      }
5486    }
5487  }
5488}
5489
5490}
5491
5492//===----------------------------------------------------------------------===//
5493// Hexagon ABI Implementation
5494//===----------------------------------------------------------------------===//
5495
5496namespace {
5497
5498class HexagonABIInfo : public ABIInfo {
5499
5500
5501public:
5502  HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5503
5504private:
5505
5506  ABIArgInfo classifyReturnType(QualType RetTy) const;
5507  ABIArgInfo classifyArgumentType(QualType RetTy) const;
5508
5509  void computeInfo(CGFunctionInfo &FI) const override;
5510
5511  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5512                         CodeGenFunction &CGF) const override;
5513};
5514
5515class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5516public:
5517  HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5518    :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5519
5520  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5521    return 29;
5522  }
5523};
5524
5525}
5526
5527void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5528  if (!getCXXABI().classifyReturnType(FI))
5529    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5530  for (auto &I : FI.arguments())
5531    I.info = classifyArgumentType(I.type);
5532}
5533
5534ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5535  if (!isAggregateTypeForABI(Ty)) {
5536    // Treat an enum type as its underlying type.
5537    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5538      Ty = EnumTy->getDecl()->getIntegerType();
5539
5540    return (Ty->isPromotableIntegerType() ?
5541            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5542  }
5543
5544  // Ignore empty records.
5545  if (isEmptyRecord(getContext(), Ty, true))
5546    return ABIArgInfo::getIgnore();
5547
5548  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5549    return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5550
5551  uint64_t Size = getContext().getTypeSize(Ty);
5552  if (Size > 64)
5553    return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5554    // Pass in the smallest viable integer type.
5555  else if (Size > 32)
5556      return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5557  else if (Size > 16)
5558      return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5559  else if (Size > 8)
5560      return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5561  else
5562      return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5563}
5564
5565ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5566  if (RetTy->isVoidType())
5567    return ABIArgInfo::getIgnore();
5568
5569  // Large vector types should be returned via memory.
5570  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5571    return ABIArgInfo::getIndirect(0);
5572
5573  if (!isAggregateTypeForABI(RetTy)) {
5574    // Treat an enum type as its underlying type.
5575    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5576      RetTy = EnumTy->getDecl()->getIntegerType();
5577
5578    return (RetTy->isPromotableIntegerType() ?
5579            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5580  }
5581
5582  if (isEmptyRecord(getContext(), RetTy, true))
5583    return ABIArgInfo::getIgnore();
5584
5585  // Aggregates <= 8 bytes are returned in r0; other aggregates
5586  // are returned indirectly.
5587  uint64_t Size = getContext().getTypeSize(RetTy);
5588  if (Size <= 64) {
5589    // Return in the smallest viable integer type.
5590    if (Size <= 8)
5591      return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5592    if (Size <= 16)
5593      return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5594    if (Size <= 32)
5595      return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5596    return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5597  }
5598
5599  return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5600}
5601
5602llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5603                                       CodeGenFunction &CGF) const {
5604  // FIXME: Need to handle alignment
5605  llvm::Type *BPP = CGF.Int8PtrPtrTy;
5606
5607  CGBuilderTy &Builder = CGF.Builder;
5608  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5609                                                       "ap");
5610  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5611  llvm::Type *PTy =
5612    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5613  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5614
5615  uint64_t Offset =
5616    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5617  llvm::Value *NextAddr =
5618    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5619                      "ap.next");
5620  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5621
5622  return AddrTyped;
5623}
5624
5625
5626//===----------------------------------------------------------------------===//
5627// SPARC v9 ABI Implementation.
5628// Based on the SPARC Compliance Definition version 2.4.1.
5629//
5630// Function arguments a mapped to a nominal "parameter array" and promoted to
5631// registers depending on their type. Each argument occupies 8 or 16 bytes in
5632// the array, structs larger than 16 bytes are passed indirectly.
5633//
5634// One case requires special care:
5635//
5636//   struct mixed {
5637//     int i;
5638//     float f;
5639//   };
5640//
5641// When a struct mixed is passed by value, it only occupies 8 bytes in the
5642// parameter array, but the int is passed in an integer register, and the float
5643// is passed in a floating point register. This is represented as two arguments
5644// with the LLVM IR inreg attribute:
5645//
5646//   declare void f(i32 inreg %i, float inreg %f)
5647//
5648// The code generator will only allocate 4 bytes from the parameter array for
5649// the inreg arguments. All other arguments are allocated a multiple of 8
5650// bytes.
5651//
5652namespace {
5653class SparcV9ABIInfo : public ABIInfo {
5654public:
5655  SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5656
5657private:
5658  ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
5659  void computeInfo(CGFunctionInfo &FI) const override;
5660  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5661                         CodeGenFunction &CGF) const override;
5662
5663  // Coercion type builder for structs passed in registers. The coercion type
5664  // serves two purposes:
5665  //
5666  // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5667  //    in registers.
5668  // 2. Expose aligned floating point elements as first-level elements, so the
5669  //    code generator knows to pass them in floating point registers.
5670  //
5671  // We also compute the InReg flag which indicates that the struct contains
5672  // aligned 32-bit floats.
5673  //
5674  struct CoerceBuilder {
5675    llvm::LLVMContext &Context;
5676    const llvm::DataLayout &DL;
5677    SmallVector<llvm::Type*, 8> Elems;
5678    uint64_t Size;
5679    bool InReg;
5680
5681    CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5682      : Context(c), DL(dl), Size(0), InReg(false) {}
5683
5684    // Pad Elems with integers until Size is ToSize.
5685    void pad(uint64_t ToSize) {
5686      assert(ToSize >= Size && "Cannot remove elements");
5687      if (ToSize == Size)
5688        return;
5689
5690      // Finish the current 64-bit word.
5691      uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5692      if (Aligned > Size && Aligned <= ToSize) {
5693        Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5694        Size = Aligned;
5695      }
5696
5697      // Add whole 64-bit words.
5698      while (Size + 64 <= ToSize) {
5699        Elems.push_back(llvm::Type::getInt64Ty(Context));
5700        Size += 64;
5701      }
5702
5703      // Final in-word padding.
5704      if (Size < ToSize) {
5705        Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5706        Size = ToSize;
5707      }
5708    }
5709
5710    // Add a floating point element at Offset.
5711    void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5712      // Unaligned floats are treated as integers.
5713      if (Offset % Bits)
5714        return;
5715      // The InReg flag is only required if there are any floats < 64 bits.
5716      if (Bits < 64)
5717        InReg = true;
5718      pad(Offset);
5719      Elems.push_back(Ty);
5720      Size = Offset + Bits;
5721    }
5722
5723    // Add a struct type to the coercion type, starting at Offset (in bits).
5724    void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5725      const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5726      for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5727        llvm::Type *ElemTy = StrTy->getElementType(i);
5728        uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5729        switch (ElemTy->getTypeID()) {
5730        case llvm::Type::StructTyID:
5731          addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5732          break;
5733        case llvm::Type::FloatTyID:
5734          addFloat(ElemOffset, ElemTy, 32);
5735          break;
5736        case llvm::Type::DoubleTyID:
5737          addFloat(ElemOffset, ElemTy, 64);
5738          break;
5739        case llvm::Type::FP128TyID:
5740          addFloat(ElemOffset, ElemTy, 128);
5741          break;
5742        case llvm::Type::PointerTyID:
5743          if (ElemOffset % 64 == 0) {
5744            pad(ElemOffset);
5745            Elems.push_back(ElemTy);
5746            Size += 64;
5747          }
5748          break;
5749        default:
5750          break;
5751        }
5752      }
5753    }
5754
5755    // Check if Ty is a usable substitute for the coercion type.
5756    bool isUsableType(llvm::StructType *Ty) const {
5757      if (Ty->getNumElements() != Elems.size())
5758        return false;
5759      for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5760        if (Elems[i] != Ty->getElementType(i))
5761          return false;
5762      return true;
5763    }
5764
5765    // Get the coercion type as a literal struct type.
5766    llvm::Type *getType() const {
5767      if (Elems.size() == 1)
5768        return Elems.front();
5769      else
5770        return llvm::StructType::get(Context, Elems);
5771    }
5772  };
5773};
5774} // end anonymous namespace
5775
5776ABIArgInfo
5777SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5778  if (Ty->isVoidType())
5779    return ABIArgInfo::getIgnore();
5780
5781  uint64_t Size = getContext().getTypeSize(Ty);
5782
5783  // Anything too big to fit in registers is passed with an explicit indirect
5784  // pointer / sret pointer.
5785  if (Size > SizeLimit)
5786    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5787
5788  // Treat an enum type as its underlying type.
5789  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5790    Ty = EnumTy->getDecl()->getIntegerType();
5791
5792  // Integer types smaller than a register are extended.
5793  if (Size < 64 && Ty->isIntegerType())
5794    return ABIArgInfo::getExtend();
5795
5796  // Other non-aggregates go in registers.
5797  if (!isAggregateTypeForABI(Ty))
5798    return ABIArgInfo::getDirect();
5799
5800  // If a C++ object has either a non-trivial copy constructor or a non-trivial
5801  // destructor, it is passed with an explicit indirect pointer / sret pointer.
5802  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5803    return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5804
5805  // This is a small aggregate type that should be passed in registers.
5806  // Build a coercion type from the LLVM struct type.
5807  llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5808  if (!StrTy)
5809    return ABIArgInfo::getDirect();
5810
5811  CoerceBuilder CB(getVMContext(), getDataLayout());
5812  CB.addStruct(0, StrTy);
5813  CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5814
5815  // Try to use the original type for coercion.
5816  llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5817
5818  if (CB.InReg)
5819    return ABIArgInfo::getDirectInReg(CoerceTy);
5820  else
5821    return ABIArgInfo::getDirect(CoerceTy);
5822}
5823
5824llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5825                                       CodeGenFunction &CGF) const {
5826  ABIArgInfo AI = classifyType(Ty, 16 * 8);
5827  llvm::Type *ArgTy = CGT.ConvertType(Ty);
5828  if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
5829    AI.setCoerceToType(ArgTy);
5830
5831  llvm::Type *BPP = CGF.Int8PtrPtrTy;
5832  CGBuilderTy &Builder = CGF.Builder;
5833  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5834  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5835  llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
5836  llvm::Value *ArgAddr;
5837  unsigned Stride;
5838
5839  switch (AI.getKind()) {
5840  case ABIArgInfo::Expand:
5841  case ABIArgInfo::InAlloca:
5842    llvm_unreachable("Unsupported ABI kind for va_arg");
5843
5844  case ABIArgInfo::Extend:
5845    Stride = 8;
5846    ArgAddr = Builder
5847      .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
5848                          "extend");
5849    break;
5850
5851  case ABIArgInfo::Direct:
5852    Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
5853    ArgAddr = Addr;
5854    break;
5855
5856  case ABIArgInfo::Indirect:
5857    Stride = 8;
5858    ArgAddr = Builder.CreateBitCast(Addr,
5859                                    llvm::PointerType::getUnqual(ArgPtrTy),
5860                                    "indirect");
5861    ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
5862    break;
5863
5864  case ABIArgInfo::Ignore:
5865    return llvm::UndefValue::get(ArgPtrTy);
5866  }
5867
5868  // Update VAList.
5869  Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
5870  Builder.CreateStore(Addr, VAListAddrAsBPP);
5871
5872  return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
5873}
5874
5875void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
5876  FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
5877  for (auto &I : FI.arguments())
5878    I.info = classifyType(I.type, 16 * 8);
5879}
5880
5881namespace {
5882class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
5883public:
5884  SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
5885    : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
5886
5887  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5888    return 14;
5889  }
5890
5891  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5892                               llvm::Value *Address) const override;
5893};
5894} // end anonymous namespace
5895
5896bool
5897SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5898                                                llvm::Value *Address) const {
5899  // This is calculated from the LLVM and GCC tables and verified
5900  // against gcc output.  AFAIK all ABIs use the same encoding.
5901
5902  CodeGen::CGBuilderTy &Builder = CGF.Builder;
5903
5904  llvm::IntegerType *i8 = CGF.Int8Ty;
5905  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
5906  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
5907
5908  // 0-31: the 8-byte general-purpose registers
5909  AssignToArrayRange(Builder, Address, Eight8, 0, 31);
5910
5911  // 32-63: f0-31, the 4-byte floating-point registers
5912  AssignToArrayRange(Builder, Address, Four8, 32, 63);
5913
5914  //   Y   = 64
5915  //   PSR = 65
5916  //   WIM = 66
5917  //   TBR = 67
5918  //   PC  = 68
5919  //   NPC = 69
5920  //   FSR = 70
5921  //   CSR = 71
5922  AssignToArrayRange(Builder, Address, Eight8, 64, 71);
5923
5924  // 72-87: d0-15, the 8-byte floating-point registers
5925  AssignToArrayRange(Builder, Address, Eight8, 72, 87);
5926
5927  return false;
5928}
5929
5930
5931//===----------------------------------------------------------------------===//
5932// XCore ABI Implementation
5933//===----------------------------------------------------------------------===//
5934
5935namespace {
5936
5937/// A SmallStringEnc instance is used to build up the TypeString by passing
5938/// it by reference between functions that append to it.
5939typedef llvm::SmallString<128> SmallStringEnc;
5940
5941/// TypeStringCache caches the meta encodings of Types.
5942///
5943/// The reason for caching TypeStrings is two fold:
5944///   1. To cache a type's encoding for later uses;
5945///   2. As a means to break recursive member type inclusion.
5946///
5947/// A cache Entry can have a Status of:
5948///   NonRecursive:   The type encoding is not recursive;
5949///   Recursive:      The type encoding is recursive;
5950///   Incomplete:     An incomplete TypeString;
5951///   IncompleteUsed: An incomplete TypeString that has been used in a
5952///                   Recursive type encoding.
5953///
5954/// A NonRecursive entry will have all of its sub-members expanded as fully
5955/// as possible. Whilst it may contain types which are recursive, the type
5956/// itself is not recursive and thus its encoding may be safely used whenever
5957/// the type is encountered.
5958///
5959/// A Recursive entry will have all of its sub-members expanded as fully as
5960/// possible. The type itself is recursive and it may contain other types which
5961/// are recursive. The Recursive encoding must not be used during the expansion
5962/// of a recursive type's recursive branch. For simplicity the code uses
5963/// IncompleteCount to reject all usage of Recursive encodings for member types.
5964///
5965/// An Incomplete entry is always a RecordType and only encodes its
5966/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
5967/// are placed into the cache during type expansion as a means to identify and
5968/// handle recursive inclusion of types as sub-members. If there is recursion
5969/// the entry becomes IncompleteUsed.
5970///
5971/// During the expansion of a RecordType's members:
5972///
5973///   If the cache contains a NonRecursive encoding for the member type, the
5974///   cached encoding is used;
5975///
5976///   If the cache contains a Recursive encoding for the member type, the
5977///   cached encoding is 'Swapped' out, as it may be incorrect, and...
5978///
5979///   If the member is a RecordType, an Incomplete encoding is placed into the
5980///   cache to break potential recursive inclusion of itself as a sub-member;
5981///
5982///   Once a member RecordType has been expanded, its temporary incomplete
5983///   entry is removed from the cache. If a Recursive encoding was swapped out
5984///   it is swapped back in;
5985///
5986///   If an incomplete entry is used to expand a sub-member, the incomplete
5987///   entry is marked as IncompleteUsed. The cache keeps count of how many
5988///   IncompleteUsed entries it currently contains in IncompleteUsedCount;
5989///
5990///   If a member's encoding is found to be a NonRecursive or Recursive viz:
5991///   IncompleteUsedCount==0, the member's encoding is added to the cache.
5992///   Else the member is part of a recursive type and thus the recursion has
5993///   been exited too soon for the encoding to be correct for the member.
5994///
5995class TypeStringCache {
5996  enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
5997  struct Entry {
5998    std::string Str;     // The encoded TypeString for the type.
5999    enum Status State;   // Information about the encoding in 'Str'.
6000    std::string Swapped; // A temporary place holder for a Recursive encoding
6001                         // during the expansion of RecordType's members.
6002  };
6003  std::map<const IdentifierInfo *, struct Entry> Map;
6004  unsigned IncompleteCount;     // Number of Incomplete entries in the Map.
6005  unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6006public:
6007  TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6008  void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6009  bool removeIncomplete(const IdentifierInfo *ID);
6010  void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6011                     bool IsRecursive);
6012  StringRef lookupStr(const IdentifierInfo *ID);
6013};
6014
6015/// TypeString encodings for enum & union fields must be order.
6016/// FieldEncoding is a helper for this ordering process.
6017class FieldEncoding {
6018  bool HasName;
6019  std::string Enc;
6020public:
6021  FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6022  StringRef str() {return Enc.c_str();};
6023  bool operator<(const FieldEncoding &rhs) const {
6024    if (HasName != rhs.HasName) return HasName;
6025    return Enc < rhs.Enc;
6026  }
6027};
6028
6029class XCoreABIInfo : public DefaultABIInfo {
6030public:
6031  XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6032  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6033                         CodeGenFunction &CGF) const override;
6034};
6035
6036class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6037  mutable TypeStringCache TSC;
6038public:
6039  XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6040    :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6041  void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6042                    CodeGen::CodeGenModule &M) const override;
6043};
6044
6045} // End anonymous namespace.
6046
6047llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6048                                     CodeGenFunction &CGF) const {
6049  CGBuilderTy &Builder = CGF.Builder;
6050
6051  // Get the VAList.
6052  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6053                                                       CGF.Int8PtrPtrTy);
6054  llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6055
6056  // Handle the argument.
6057  ABIArgInfo AI = classifyArgumentType(Ty);
6058  llvm::Type *ArgTy = CGT.ConvertType(Ty);
6059  if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6060    AI.setCoerceToType(ArgTy);
6061  llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6062  llvm::Value *Val;
6063  uint64_t ArgSize = 0;
6064  switch (AI.getKind()) {
6065  case ABIArgInfo::Expand:
6066  case ABIArgInfo::InAlloca:
6067    llvm_unreachable("Unsupported ABI kind for va_arg");
6068  case ABIArgInfo::Ignore:
6069    Val = llvm::UndefValue::get(ArgPtrTy);
6070    ArgSize = 0;
6071    break;
6072  case ABIArgInfo::Extend:
6073  case ABIArgInfo::Direct:
6074    Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6075    ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6076    if (ArgSize < 4)
6077      ArgSize = 4;
6078    break;
6079  case ABIArgInfo::Indirect:
6080    llvm::Value *ArgAddr;
6081    ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6082    ArgAddr = Builder.CreateLoad(ArgAddr);
6083    Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6084    ArgSize = 4;
6085    break;
6086  }
6087
6088  // Increment the VAList.
6089  if (ArgSize) {
6090    llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6091    Builder.CreateStore(APN, VAListAddrAsBPP);
6092  }
6093  return Val;
6094}
6095
6096/// During the expansion of a RecordType, an incomplete TypeString is placed
6097/// into the cache as a means to identify and break recursion.
6098/// If there is a Recursive encoding in the cache, it is swapped out and will
6099/// be reinserted by removeIncomplete().
6100/// All other types of encoding should have been used rather than arriving here.
6101void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6102                                    std::string StubEnc) {
6103  if (!ID)
6104    return;
6105  Entry &E = Map[ID];
6106  assert( (E.Str.empty() || E.State == Recursive) &&
6107         "Incorrectly use of addIncomplete");
6108  assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6109  E.Swapped.swap(E.Str); // swap out the Recursive
6110  E.Str.swap(StubEnc);
6111  E.State = Incomplete;
6112  ++IncompleteCount;
6113}
6114
6115/// Once the RecordType has been expanded, the temporary incomplete TypeString
6116/// must be removed from the cache.
6117/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6118/// Returns true if the RecordType was defined recursively.
6119bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6120  if (!ID)
6121    return false;
6122  auto I = Map.find(ID);
6123  assert(I != Map.end() && "Entry not present");
6124  Entry &E = I->second;
6125  assert( (E.State == Incomplete ||
6126           E.State == IncompleteUsed) &&
6127         "Entry must be an incomplete type");
6128  bool IsRecursive = false;
6129  if (E.State == IncompleteUsed) {
6130    // We made use of our Incomplete encoding, thus we are recursive.
6131    IsRecursive = true;
6132    --IncompleteUsedCount;
6133  }
6134  if (E.Swapped.empty())
6135    Map.erase(I);
6136  else {
6137    // Swap the Recursive back.
6138    E.Swapped.swap(E.Str);
6139    E.Swapped.clear();
6140    E.State = Recursive;
6141  }
6142  --IncompleteCount;
6143  return IsRecursive;
6144}
6145
6146/// Add the encoded TypeString to the cache only if it is NonRecursive or
6147/// Recursive (viz: all sub-members were expanded as fully as possible).
6148void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6149                                    bool IsRecursive) {
6150  if (!ID || IncompleteUsedCount)
6151    return; // No key or it is is an incomplete sub-type so don't add.
6152  Entry &E = Map[ID];
6153  if (IsRecursive && !E.Str.empty()) {
6154    assert(E.State==Recursive && E.Str.size() == Str.size() &&
6155           "This is not the same Recursive entry");
6156    // The parent container was not recursive after all, so we could have used
6157    // this Recursive sub-member entry after all, but we assumed the worse when
6158    // we started viz: IncompleteCount!=0.
6159    return;
6160  }
6161  assert(E.Str.empty() && "Entry already present");
6162  E.Str = Str.str();
6163  E.State = IsRecursive? Recursive : NonRecursive;
6164}
6165
6166/// Return a cached TypeString encoding for the ID. If there isn't one, or we
6167/// are recursively expanding a type (IncompleteCount != 0) and the cached
6168/// encoding is Recursive, return an empty StringRef.
6169StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6170  if (!ID)
6171    return StringRef();   // We have no key.
6172  auto I = Map.find(ID);
6173  if (I == Map.end())
6174    return StringRef();   // We have no encoding.
6175  Entry &E = I->second;
6176  if (E.State == Recursive && IncompleteCount)
6177    return StringRef();   // We don't use Recursive encodings for member types.
6178
6179  if (E.State == Incomplete) {
6180    // The incomplete type is being used to break out of recursion.
6181    E.State = IncompleteUsed;
6182    ++IncompleteUsedCount;
6183  }
6184  return E.Str.c_str();
6185}
6186
6187/// The XCore ABI includes a type information section that communicates symbol
6188/// type information to the linker. The linker uses this information to verify
6189/// safety/correctness of things such as array bound and pointers et al.
6190/// The ABI only requires C (and XC) language modules to emit TypeStrings.
6191/// This type information (TypeString) is emitted into meta data for all global
6192/// symbols: definitions, declarations, functions & variables.
6193///
6194/// The TypeString carries type, qualifier, name, size & value details.
6195/// Please see 'Tools Development Guide' section 2.16.2 for format details:
6196/// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf>
6197/// The output is tested by test/CodeGen/xcore-stringtype.c.
6198///
6199static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6200                          CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6201
6202/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
6203void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6204                                          CodeGen::CodeGenModule &CGM) const {
6205  SmallStringEnc Enc;
6206  if (getTypeString(Enc, D, CGM, TSC)) {
6207    llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6208    llvm::SmallVector<llvm::Value *, 2> MDVals;
6209    MDVals.push_back(GV);
6210    MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6211    llvm::NamedMDNode *MD =
6212      CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6213    MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6214  }
6215}
6216
6217static bool appendType(SmallStringEnc &Enc, QualType QType,
6218                       const CodeGen::CodeGenModule &CGM,
6219                       TypeStringCache &TSC);
6220
6221/// Helper function for appendRecordType().
6222/// Builds a SmallVector containing the encoded field types in declaration order.
6223static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6224                             const RecordDecl *RD,
6225                             const CodeGen::CodeGenModule &CGM,
6226                             TypeStringCache &TSC) {
6227  for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
6228       I != E; ++I) {
6229    SmallStringEnc Enc;
6230    Enc += "m(";
6231    Enc += I->getName();
6232    Enc += "){";
6233    if (I->isBitField()) {
6234      Enc += "b(";
6235      llvm::raw_svector_ostream OS(Enc);
6236      OS.resync();
6237      OS << I->getBitWidthValue(CGM.getContext());
6238      OS.flush();
6239      Enc += ':';
6240    }
6241    if (!appendType(Enc, I->getType(), CGM, TSC))
6242      return false;
6243    if (I->isBitField())
6244      Enc += ')';
6245    Enc += '}';
6246    FE.push_back(FieldEncoding(!I->getName().empty(), Enc));
6247  }
6248  return true;
6249}
6250
6251/// Appends structure and union types to Enc and adds encoding to cache.
6252/// Recursively calls appendType (via extractFieldType) for each field.
6253/// Union types have their fields ordered according to the ABI.
6254static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6255                             const CodeGen::CodeGenModule &CGM,
6256                             TypeStringCache &TSC, const IdentifierInfo *ID) {
6257  // Append the cached TypeString if we have one.
6258  StringRef TypeString = TSC.lookupStr(ID);
6259  if (!TypeString.empty()) {
6260    Enc += TypeString;
6261    return true;
6262  }
6263
6264  // Start to emit an incomplete TypeString.
6265  size_t Start = Enc.size();
6266  Enc += (RT->isUnionType()? 'u' : 's');
6267  Enc += '(';
6268  if (ID)
6269    Enc += ID->getName();
6270  Enc += "){";
6271
6272  // We collect all encoded fields and order as necessary.
6273  bool IsRecursive = false;
6274  const RecordDecl *RD = RT->getDecl()->getDefinition();
6275  if (RD && !RD->field_empty()) {
6276    // An incomplete TypeString stub is placed in the cache for this RecordType
6277    // so that recursive calls to this RecordType will use it whilst building a
6278    // complete TypeString for this RecordType.
6279    SmallVector<FieldEncoding, 16> FE;
6280    std::string StubEnc(Enc.substr(Start).str());
6281    StubEnc += '}';  // StubEnc now holds a valid incomplete TypeString.
6282    TSC.addIncomplete(ID, std::move(StubEnc));
6283    if (!extractFieldType(FE, RD, CGM, TSC)) {
6284      (void) TSC.removeIncomplete(ID);
6285      return false;
6286    }
6287    IsRecursive = TSC.removeIncomplete(ID);
6288    // The ABI requires unions to be sorted but not structures.
6289    // See FieldEncoding::operator< for sort algorithm.
6290    if (RT->isUnionType())
6291      std::sort(FE.begin(), FE.end());
6292    // We can now complete the TypeString.
6293    unsigned E = FE.size();
6294    for (unsigned I = 0; I != E; ++I) {
6295      if (I)
6296        Enc += ',';
6297      Enc += FE[I].str();
6298    }
6299  }
6300  Enc += '}';
6301  TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6302  return true;
6303}
6304
6305/// Appends enum types to Enc and adds the encoding to the cache.
6306static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6307                           TypeStringCache &TSC,
6308                           const IdentifierInfo *ID) {
6309  // Append the cached TypeString if we have one.
6310  StringRef TypeString = TSC.lookupStr(ID);
6311  if (!TypeString.empty()) {
6312    Enc += TypeString;
6313    return true;
6314  }
6315
6316  size_t Start = Enc.size();
6317  Enc += "e(";
6318  if (ID)
6319    Enc += ID->getName();
6320  Enc += "){";
6321
6322  // We collect all encoded enumerations and order them alphanumerically.
6323  if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6324    SmallVector<FieldEncoding, 16> FE;
6325    for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6326         ++I) {
6327      SmallStringEnc EnumEnc;
6328      EnumEnc += "m(";
6329      EnumEnc += I->getName();
6330      EnumEnc += "){";
6331      I->getInitVal().toString(EnumEnc);
6332      EnumEnc += '}';
6333      FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6334    }
6335    std::sort(FE.begin(), FE.end());
6336    unsigned E = FE.size();
6337    for (unsigned I = 0; I != E; ++I) {
6338      if (I)
6339        Enc += ',';
6340      Enc += FE[I].str();
6341    }
6342  }
6343  Enc += '}';
6344  TSC.addIfComplete(ID, Enc.substr(Start), false);
6345  return true;
6346}
6347
6348/// Appends type's qualifier to Enc.
6349/// This is done prior to appending the type's encoding.
6350static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6351  // Qualifiers are emitted in alphabetical order.
6352  static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6353  int Lookup = 0;
6354  if (QT.isConstQualified())
6355    Lookup += 1<<0;
6356  if (QT.isRestrictQualified())
6357    Lookup += 1<<1;
6358  if (QT.isVolatileQualified())
6359    Lookup += 1<<2;
6360  Enc += Table[Lookup];
6361}
6362
6363/// Appends built-in types to Enc.
6364static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6365  const char *EncType;
6366  switch (BT->getKind()) {
6367    case BuiltinType::Void:
6368      EncType = "0";
6369      break;
6370    case BuiltinType::Bool:
6371      EncType = "b";
6372      break;
6373    case BuiltinType::Char_U:
6374      EncType = "uc";
6375      break;
6376    case BuiltinType::UChar:
6377      EncType = "uc";
6378      break;
6379    case BuiltinType::SChar:
6380      EncType = "sc";
6381      break;
6382    case BuiltinType::UShort:
6383      EncType = "us";
6384      break;
6385    case BuiltinType::Short:
6386      EncType = "ss";
6387      break;
6388    case BuiltinType::UInt:
6389      EncType = "ui";
6390      break;
6391    case BuiltinType::Int:
6392      EncType = "si";
6393      break;
6394    case BuiltinType::ULong:
6395      EncType = "ul";
6396      break;
6397    case BuiltinType::Long:
6398      EncType = "sl";
6399      break;
6400    case BuiltinType::ULongLong:
6401      EncType = "ull";
6402      break;
6403    case BuiltinType::LongLong:
6404      EncType = "sll";
6405      break;
6406    case BuiltinType::Float:
6407      EncType = "ft";
6408      break;
6409    case BuiltinType::Double:
6410      EncType = "d";
6411      break;
6412    case BuiltinType::LongDouble:
6413      EncType = "ld";
6414      break;
6415    default:
6416      return false;
6417  }
6418  Enc += EncType;
6419  return true;
6420}
6421
6422/// Appends a pointer encoding to Enc before calling appendType for the pointee.
6423static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6424                              const CodeGen::CodeGenModule &CGM,
6425                              TypeStringCache &TSC) {
6426  Enc += "p(";
6427  if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6428    return false;
6429  Enc += ')';
6430  return true;
6431}
6432
6433/// Appends array encoding to Enc before calling appendType for the element.
6434static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
6435                            const ArrayType *AT,
6436                            const CodeGen::CodeGenModule &CGM,
6437                            TypeStringCache &TSC, StringRef NoSizeEnc) {
6438  if (AT->getSizeModifier() != ArrayType::Normal)
6439    return false;
6440  Enc += "a(";
6441  if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6442    CAT->getSize().toStringUnsigned(Enc);
6443  else
6444    Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6445  Enc += ':';
6446  // The Qualifiers should be attached to the type rather than the array.
6447  appendQualifier(Enc, QT);
6448  if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6449    return false;
6450  Enc += ')';
6451  return true;
6452}
6453
6454/// Appends a function encoding to Enc, calling appendType for the return type
6455/// and the arguments.
6456static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6457                             const CodeGen::CodeGenModule &CGM,
6458                             TypeStringCache &TSC) {
6459  Enc += "f{";
6460  if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
6461    return false;
6462  Enc += "}(";
6463  if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
6464    // N.B. we are only interested in the adjusted param types.
6465    auto I = FPT->param_type_begin();
6466    auto E = FPT->param_type_end();
6467    if (I != E) {
6468      do {
6469        if (!appendType(Enc, *I, CGM, TSC))
6470          return false;
6471        ++I;
6472        if (I != E)
6473          Enc += ',';
6474      } while (I != E);
6475      if (FPT->isVariadic())
6476        Enc += ",va";
6477    } else {
6478      if (FPT->isVariadic())
6479        Enc += "va";
6480      else
6481        Enc += '0';
6482    }
6483  }
6484  Enc += ')';
6485  return true;
6486}
6487
6488/// Handles the type's qualifier before dispatching a call to handle specific
6489/// type encodings.
6490static bool appendType(SmallStringEnc &Enc, QualType QType,
6491                       const CodeGen::CodeGenModule &CGM,
6492                       TypeStringCache &TSC) {
6493
6494  QualType QT = QType.getCanonicalType();
6495
6496  if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
6497    // The Qualifiers should be attached to the type rather than the array.
6498    // Thus we don't call appendQualifier() here.
6499    return appendArrayType(Enc, QT, AT, CGM, TSC, "");
6500
6501  appendQualifier(Enc, QT);
6502
6503  if (const BuiltinType *BT = QT->getAs<BuiltinType>())
6504    return appendBuiltinType(Enc, BT);
6505
6506  if (const PointerType *PT = QT->getAs<PointerType>())
6507    return appendPointerType(Enc, PT, CGM, TSC);
6508
6509  if (const EnumType *ET = QT->getAs<EnumType>())
6510    return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
6511
6512  if (const RecordType *RT = QT->getAsStructureType())
6513    return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
6514
6515  if (const RecordType *RT = QT->getAsUnionType())
6516    return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
6517
6518  if (const FunctionType *FT = QT->getAs<FunctionType>())
6519    return appendFunctionType(Enc, FT, CGM, TSC);
6520
6521  return false;
6522}
6523
6524static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6525                          CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
6526  if (!D)
6527    return false;
6528
6529  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
6530    if (FD->getLanguageLinkage() != CLanguageLinkage)
6531      return false;
6532    return appendType(Enc, FD->getType(), CGM, TSC);
6533  }
6534
6535  if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
6536    if (VD->getLanguageLinkage() != CLanguageLinkage)
6537      return false;
6538    QualType QT = VD->getType().getCanonicalType();
6539    if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
6540      // Global ArrayTypes are given a size of '*' if the size is unknown.
6541      // The Qualifiers should be attached to the type rather than the array.
6542      // Thus we don't call appendQualifier() here.
6543      return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
6544    }
6545    return appendType(Enc, QT, CGM, TSC);
6546  }
6547  return false;
6548}
6549
6550
6551//===----------------------------------------------------------------------===//
6552// Driver code
6553//===----------------------------------------------------------------------===//
6554
6555const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
6556  if (TheTargetCodeGenInfo)
6557    return *TheTargetCodeGenInfo;
6558
6559  const llvm::Triple &Triple = getTarget().getTriple();
6560  switch (Triple.getArch()) {
6561  default:
6562    return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
6563
6564  case llvm::Triple::le32:
6565    return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
6566  case llvm::Triple::mips:
6567  case llvm::Triple::mipsel:
6568    return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
6569
6570  case llvm::Triple::mips64:
6571  case llvm::Triple::mips64el:
6572    return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
6573
6574  case llvm::Triple::aarch64:
6575  case llvm::Triple::aarch64_be:
6576  case llvm::Triple::arm64:
6577  case llvm::Triple::arm64_be: {
6578    AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
6579    if (getTarget().getABI() == "darwinpcs")
6580      Kind = AArch64ABIInfo::DarwinPCS;
6581
6582    return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
6583  }
6584
6585  case llvm::Triple::arm:
6586  case llvm::Triple::armeb:
6587  case llvm::Triple::thumb:
6588  case llvm::Triple::thumbeb:
6589    {
6590      ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
6591      if (getTarget().getABI() == "apcs-gnu")
6592        Kind = ARMABIInfo::APCS;
6593      else if (CodeGenOpts.FloatABI == "hard" ||
6594               (CodeGenOpts.FloatABI != "soft" &&
6595                Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
6596        Kind = ARMABIInfo::AAPCS_VFP;
6597
6598      switch (Triple.getOS()) {
6599        case llvm::Triple::NaCl:
6600          return *(TheTargetCodeGenInfo =
6601                   new NaClARMTargetCodeGenInfo(Types, Kind));
6602        default:
6603          return *(TheTargetCodeGenInfo =
6604                   new ARMTargetCodeGenInfo(Types, Kind));
6605      }
6606    }
6607
6608  case llvm::Triple::ppc:
6609    return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
6610  case llvm::Triple::ppc64:
6611    if (Triple.isOSBinFormatELF())
6612      return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
6613    else
6614      return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
6615  case llvm::Triple::ppc64le:
6616    assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
6617    return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
6618
6619  case llvm::Triple::nvptx:
6620  case llvm::Triple::nvptx64:
6621    return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
6622
6623  case llvm::Triple::msp430:
6624    return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
6625
6626  case llvm::Triple::systemz:
6627    return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
6628
6629  case llvm::Triple::tce:
6630    return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
6631
6632  case llvm::Triple::x86: {
6633    bool IsDarwinVectorABI = Triple.isOSDarwin();
6634    bool IsSmallStructInRegABI =
6635        X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
6636    bool IsWin32FloatStructABI = Triple.isWindowsMSVCEnvironment();
6637
6638    if (Triple.getOS() == llvm::Triple::Win32) {
6639      return *(TheTargetCodeGenInfo =
6640               new WinX86_32TargetCodeGenInfo(Types,
6641                                              IsDarwinVectorABI, IsSmallStructInRegABI,
6642                                              IsWin32FloatStructABI,
6643                                              CodeGenOpts.NumRegisterParameters));
6644    } else {
6645      return *(TheTargetCodeGenInfo =
6646               new X86_32TargetCodeGenInfo(Types,
6647                                           IsDarwinVectorABI, IsSmallStructInRegABI,
6648                                           IsWin32FloatStructABI,
6649                                           CodeGenOpts.NumRegisterParameters));
6650    }
6651  }
6652
6653  case llvm::Triple::x86_64: {
6654    bool HasAVX = getTarget().getABI() == "avx";
6655
6656    switch (Triple.getOS()) {
6657    case llvm::Triple::Win32:
6658      return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
6659    case llvm::Triple::NaCl:
6660      return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
6661                                                                      HasAVX));
6662    default:
6663      return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
6664                                                                  HasAVX));
6665    }
6666  }
6667  case llvm::Triple::hexagon:
6668    return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
6669  case llvm::Triple::sparcv9:
6670    return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
6671  case llvm::Triple::xcore:
6672    return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
6673  }
6674}
6675