TargetInfo.cpp revision eeb00624413d4a4856e66809b84c558d2cdce17f
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CGCXXABI.h" 18#include "CodeGenFunction.h" 19#include "clang/AST/RecordLayout.h" 20#include "clang/Frontend/CodeGenOptions.h" 21#include "llvm/ADT/Triple.h" 22#include "llvm/IR/DataLayout.h" 23#include "llvm/IR/Type.h" 24#include "llvm/Support/raw_ostream.h" 25using namespace clang; 26using namespace CodeGen; 27 28static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 29 llvm::Value *Array, 30 llvm::Value *Value, 31 unsigned FirstIndex, 32 unsigned LastIndex) { 33 // Alternatively, we could emit this as a loop in the source. 34 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 35 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 36 Builder.CreateStore(Value, Cell); 37 } 38} 39 40static bool isAggregateTypeForABI(QualType T) { 41 return !CodeGenFunction::hasScalarEvaluationKind(T) || 42 T->isMemberFunctionPointerType(); 43} 44 45ABIInfo::~ABIInfo() {} 46 47static bool isRecordReturnIndirect(const RecordType *RT, CodeGen::CodeGenTypes &CGT) { 48 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 49 if (!RD) 50 return false; 51 return CGT.CGM.getCXXABI().isReturnTypeIndirect(RD); 52} 53 54 55static bool isRecordReturnIndirect(QualType T, CodeGen::CodeGenTypes &CGT) { 56 const RecordType *RT = T->getAs<RecordType>(); 57 if (!RT) 58 return false; 59 return isRecordReturnIndirect(RT, CGT); 60} 61 62static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 63 CodeGen::CodeGenTypes &CGT) { 64 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 65 if (!RD) 66 return CGCXXABI::RAA_Default; 67 return CGT.CGM.getCXXABI().getRecordArgABI(RD); 68} 69 70static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 71 CodeGen::CodeGenTypes &CGT) { 72 const RecordType *RT = T->getAs<RecordType>(); 73 if (!RT) 74 return CGCXXABI::RAA_Default; 75 return getRecordArgABI(RT, CGT); 76} 77 78ASTContext &ABIInfo::getContext() const { 79 return CGT.getContext(); 80} 81 82llvm::LLVMContext &ABIInfo::getVMContext() const { 83 return CGT.getLLVMContext(); 84} 85 86const llvm::DataLayout &ABIInfo::getDataLayout() const { 87 return CGT.getDataLayout(); 88} 89 90const TargetInfo &ABIInfo::getTarget() const { 91 return CGT.getTarget(); 92} 93 94void ABIArgInfo::dump() const { 95 raw_ostream &OS = llvm::errs(); 96 OS << "(ABIArgInfo Kind="; 97 switch (TheKind) { 98 case Direct: 99 OS << "Direct Type="; 100 if (llvm::Type *Ty = getCoerceToType()) 101 Ty->print(OS); 102 else 103 OS << "null"; 104 break; 105 case Extend: 106 OS << "Extend"; 107 break; 108 case Ignore: 109 OS << "Ignore"; 110 break; 111 case Indirect: 112 OS << "Indirect Align=" << getIndirectAlign() 113 << " ByVal=" << getIndirectByVal() 114 << " Realign=" << getIndirectRealign(); 115 break; 116 case Expand: 117 OS << "Expand"; 118 break; 119 } 120 OS << ")\n"; 121} 122 123TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 124 125// If someone can figure out a general rule for this, that would be great. 126// It's probably just doomed to be platform-dependent, though. 127unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 128 // Verified for: 129 // x86-64 FreeBSD, Linux, Darwin 130 // x86-32 FreeBSD, Linux, Darwin 131 // PowerPC Linux, Darwin 132 // ARM Darwin (*not* EABI) 133 // AArch64 Linux 134 return 32; 135} 136 137bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 138 const FunctionNoProtoType *fnType) const { 139 // The following conventions are known to require this to be false: 140 // x86_stdcall 141 // MIPS 142 // For everything else, we just prefer false unless we opt out. 143 return false; 144} 145 146void 147TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 148 llvm::SmallString<24> &Opt) const { 149 // This assumes the user is passing a library name like "rt" instead of a 150 // filename like "librt.a/so", and that they don't care whether it's static or 151 // dynamic. 152 Opt = "-l"; 153 Opt += Lib; 154} 155 156static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 157 158/// isEmptyField - Return true iff a the field is "empty", that is it 159/// is an unnamed bit-field or an (array of) empty record(s). 160static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 161 bool AllowArrays) { 162 if (FD->isUnnamedBitfield()) 163 return true; 164 165 QualType FT = FD->getType(); 166 167 // Constant arrays of empty records count as empty, strip them off. 168 // Constant arrays of zero length always count as empty. 169 if (AllowArrays) 170 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 171 if (AT->getSize() == 0) 172 return true; 173 FT = AT->getElementType(); 174 } 175 176 const RecordType *RT = FT->getAs<RecordType>(); 177 if (!RT) 178 return false; 179 180 // C++ record fields are never empty, at least in the Itanium ABI. 181 // 182 // FIXME: We should use a predicate for whether this behavior is true in the 183 // current ABI. 184 if (isa<CXXRecordDecl>(RT->getDecl())) 185 return false; 186 187 return isEmptyRecord(Context, FT, AllowArrays); 188} 189 190/// isEmptyRecord - Return true iff a structure contains only empty 191/// fields. Note that a structure with a flexible array member is not 192/// considered empty. 193static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 194 const RecordType *RT = T->getAs<RecordType>(); 195 if (!RT) 196 return 0; 197 const RecordDecl *RD = RT->getDecl(); 198 if (RD->hasFlexibleArrayMember()) 199 return false; 200 201 // If this is a C++ record, check the bases first. 202 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 203 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 204 e = CXXRD->bases_end(); i != e; ++i) 205 if (!isEmptyRecord(Context, i->getType(), true)) 206 return false; 207 208 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 209 i != e; ++i) 210 if (!isEmptyField(Context, *i, AllowArrays)) 211 return false; 212 return true; 213} 214 215/// isSingleElementStruct - Determine if a structure is a "single 216/// element struct", i.e. it has exactly one non-empty field or 217/// exactly one field which is itself a single element 218/// struct. Structures with flexible array members are never 219/// considered single element structs. 220/// 221/// \return The field declaration for the single non-empty field, if 222/// it exists. 223static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 224 const RecordType *RT = T->getAsStructureType(); 225 if (!RT) 226 return 0; 227 228 const RecordDecl *RD = RT->getDecl(); 229 if (RD->hasFlexibleArrayMember()) 230 return 0; 231 232 const Type *Found = 0; 233 234 // If this is a C++ record, check the bases first. 235 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 236 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 237 e = CXXRD->bases_end(); i != e; ++i) { 238 // Ignore empty records. 239 if (isEmptyRecord(Context, i->getType(), true)) 240 continue; 241 242 // If we already found an element then this isn't a single-element struct. 243 if (Found) 244 return 0; 245 246 // If this is non-empty and not a single element struct, the composite 247 // cannot be a single element struct. 248 Found = isSingleElementStruct(i->getType(), Context); 249 if (!Found) 250 return 0; 251 } 252 } 253 254 // Check for single element. 255 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 256 i != e; ++i) { 257 const FieldDecl *FD = *i; 258 QualType FT = FD->getType(); 259 260 // Ignore empty fields. 261 if (isEmptyField(Context, FD, true)) 262 continue; 263 264 // If we already found an element then this isn't a single-element 265 // struct. 266 if (Found) 267 return 0; 268 269 // Treat single element arrays as the element. 270 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 271 if (AT->getSize().getZExtValue() != 1) 272 break; 273 FT = AT->getElementType(); 274 } 275 276 if (!isAggregateTypeForABI(FT)) { 277 Found = FT.getTypePtr(); 278 } else { 279 Found = isSingleElementStruct(FT, Context); 280 if (!Found) 281 return 0; 282 } 283 } 284 285 // We don't consider a struct a single-element struct if it has 286 // padding beyond the element type. 287 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 288 return 0; 289 290 return Found; 291} 292 293static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 294 // Treat complex types as the element type. 295 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 296 Ty = CTy->getElementType(); 297 298 // Check for a type which we know has a simple scalar argument-passing 299 // convention without any padding. (We're specifically looking for 32 300 // and 64-bit integer and integer-equivalents, float, and double.) 301 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 302 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 303 return false; 304 305 uint64_t Size = Context.getTypeSize(Ty); 306 return Size == 32 || Size == 64; 307} 308 309/// canExpandIndirectArgument - Test whether an argument type which is to be 310/// passed indirectly (on the stack) would have the equivalent layout if it was 311/// expanded into separate arguments. If so, we prefer to do the latter to avoid 312/// inhibiting optimizations. 313/// 314// FIXME: This predicate is missing many cases, currently it just follows 315// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 316// should probably make this smarter, or better yet make the LLVM backend 317// capable of handling it. 318static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 319 // We can only expand structure types. 320 const RecordType *RT = Ty->getAs<RecordType>(); 321 if (!RT) 322 return false; 323 324 // We can only expand (C) structures. 325 // 326 // FIXME: This needs to be generalized to handle classes as well. 327 const RecordDecl *RD = RT->getDecl(); 328 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 329 return false; 330 331 uint64_t Size = 0; 332 333 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 334 i != e; ++i) { 335 const FieldDecl *FD = *i; 336 337 if (!is32Or64BitBasicType(FD->getType(), Context)) 338 return false; 339 340 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 341 // how to expand them yet, and the predicate for telling if a bitfield still 342 // counts as "basic" is more complicated than what we were doing previously. 343 if (FD->isBitField()) 344 return false; 345 346 Size += Context.getTypeSize(FD->getType()); 347 } 348 349 // Make sure there are not any holes in the struct. 350 if (Size != Context.getTypeSize(Ty)) 351 return false; 352 353 return true; 354} 355 356namespace { 357/// DefaultABIInfo - The default implementation for ABI specific 358/// details. This implementation provides information which results in 359/// self-consistent and sensible LLVM IR generation, but does not 360/// conform to any particular ABI. 361class DefaultABIInfo : public ABIInfo { 362public: 363 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 364 365 ABIArgInfo classifyReturnType(QualType RetTy) const; 366 ABIArgInfo classifyArgumentType(QualType RetTy) const; 367 368 virtual void computeInfo(CGFunctionInfo &FI) const { 369 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 370 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 371 it != ie; ++it) 372 it->info = classifyArgumentType(it->type); 373 } 374 375 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 376 CodeGenFunction &CGF) const; 377}; 378 379class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 380public: 381 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 382 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 383}; 384 385llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 386 CodeGenFunction &CGF) const { 387 return 0; 388} 389 390ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 391 if (isAggregateTypeForABI(Ty)) { 392 // Records with non trivial destructors/constructors should not be passed 393 // by value. 394 if (isRecordReturnIndirect(Ty, CGT)) 395 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 396 397 return ABIArgInfo::getIndirect(0); 398 } 399 400 // Treat an enum type as its underlying type. 401 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 402 Ty = EnumTy->getDecl()->getIntegerType(); 403 404 return (Ty->isPromotableIntegerType() ? 405 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 406} 407 408ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 409 if (RetTy->isVoidType()) 410 return ABIArgInfo::getIgnore(); 411 412 if (isAggregateTypeForABI(RetTy)) 413 return ABIArgInfo::getIndirect(0); 414 415 // Treat an enum type as its underlying type. 416 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 417 RetTy = EnumTy->getDecl()->getIntegerType(); 418 419 return (RetTy->isPromotableIntegerType() ? 420 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 421} 422 423//===----------------------------------------------------------------------===// 424// le32/PNaCl bitcode ABI Implementation 425// 426// This is a simplified version of the x86_32 ABI. Arguments and return values 427// are always passed on the stack. 428//===----------------------------------------------------------------------===// 429 430class PNaClABIInfo : public ABIInfo { 431 public: 432 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 433 434 ABIArgInfo classifyReturnType(QualType RetTy) const; 435 ABIArgInfo classifyArgumentType(QualType RetTy) const; 436 437 virtual void computeInfo(CGFunctionInfo &FI) const; 438 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 439 CodeGenFunction &CGF) const; 440}; 441 442class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 443 public: 444 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 445 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 446}; 447 448void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 449 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 450 451 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 452 it != ie; ++it) 453 it->info = classifyArgumentType(it->type); 454 } 455 456llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 457 CodeGenFunction &CGF) const { 458 return 0; 459} 460 461/// \brief Classify argument of given type \p Ty. 462ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 463 if (isAggregateTypeForABI(Ty)) { 464 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) 465 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 466 return ABIArgInfo::getIndirect(0); 467 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 468 // Treat an enum type as its underlying type. 469 Ty = EnumTy->getDecl()->getIntegerType(); 470 } else if (Ty->isFloatingType()) { 471 // Floating-point types don't go inreg. 472 return ABIArgInfo::getDirect(); 473 } 474 475 return (Ty->isPromotableIntegerType() ? 476 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 477} 478 479ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 480 if (RetTy->isVoidType()) 481 return ABIArgInfo::getIgnore(); 482 483 // In the PNaCl ABI we always return records/structures on the stack. 484 if (isAggregateTypeForABI(RetTy)) 485 return ABIArgInfo::getIndirect(0); 486 487 // Treat an enum type as its underlying type. 488 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 489 RetTy = EnumTy->getDecl()->getIntegerType(); 490 491 return (RetTy->isPromotableIntegerType() ? 492 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 493} 494 495/// IsX86_MMXType - Return true if this is an MMX type. 496bool IsX86_MMXType(llvm::Type *IRType) { 497 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 498 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 499 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 500 IRType->getScalarSizeInBits() != 64; 501} 502 503static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 504 StringRef Constraint, 505 llvm::Type* Ty) { 506 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 507 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 508 // Invalid MMX constraint 509 return 0; 510 } 511 512 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 513 } 514 515 // No operation needed 516 return Ty; 517} 518 519//===----------------------------------------------------------------------===// 520// X86-32 ABI Implementation 521//===----------------------------------------------------------------------===// 522 523/// X86_32ABIInfo - The X86-32 ABI information. 524class X86_32ABIInfo : public ABIInfo { 525 enum Class { 526 Integer, 527 Float 528 }; 529 530 static const unsigned MinABIStackAlignInBytes = 4; 531 532 bool IsDarwinVectorABI; 533 bool IsSmallStructInRegABI; 534 bool IsWin32StructABI; 535 unsigned DefaultNumRegisterParameters; 536 537 static bool isRegisterSize(unsigned Size) { 538 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 539 } 540 541 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 542 unsigned callingConvention); 543 544 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 545 /// such that the argument will be passed in memory. 546 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 547 unsigned &FreeRegs) const; 548 549 /// \brief Return the alignment to use for the given type on the stack. 550 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 551 552 Class classify(QualType Ty) const; 553 ABIArgInfo classifyReturnType(QualType RetTy, 554 unsigned callingConvention) const; 555 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs, 556 bool IsFastCall) const; 557 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs, 558 bool IsFastCall, bool &NeedsPadding) const; 559 560public: 561 562 virtual void computeInfo(CGFunctionInfo &FI) const; 563 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 564 CodeGenFunction &CGF) const; 565 566 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, 567 unsigned r) 568 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 569 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {} 570}; 571 572class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 573public: 574 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 575 bool d, bool p, bool w, unsigned r) 576 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} 577 578 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 579 CodeGen::CodeGenModule &CGM) const; 580 581 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 582 // Darwin uses different dwarf register numbers for EH. 583 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 584 return 4; 585 } 586 587 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 588 llvm::Value *Address) const; 589 590 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 591 StringRef Constraint, 592 llvm::Type* Ty) const { 593 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 594 } 595 596}; 597 598} 599 600/// shouldReturnTypeInRegister - Determine if the given type should be 601/// passed in a register (for the Darwin ABI). 602bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 603 ASTContext &Context, 604 unsigned callingConvention) { 605 uint64_t Size = Context.getTypeSize(Ty); 606 607 // Type must be register sized. 608 if (!isRegisterSize(Size)) 609 return false; 610 611 if (Ty->isVectorType()) { 612 // 64- and 128- bit vectors inside structures are not returned in 613 // registers. 614 if (Size == 64 || Size == 128) 615 return false; 616 617 return true; 618 } 619 620 // If this is a builtin, pointer, enum, complex type, member pointer, or 621 // member function pointer it is ok. 622 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 623 Ty->isAnyComplexType() || Ty->isEnumeralType() || 624 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 625 return true; 626 627 // Arrays are treated like records. 628 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 629 return shouldReturnTypeInRegister(AT->getElementType(), Context, 630 callingConvention); 631 632 // Otherwise, it must be a record type. 633 const RecordType *RT = Ty->getAs<RecordType>(); 634 if (!RT) return false; 635 636 // FIXME: Traverse bases here too. 637 638 // For thiscall conventions, structures will never be returned in 639 // a register. This is for compatibility with the MSVC ABI 640 if (callingConvention == llvm::CallingConv::X86_ThisCall && 641 RT->isStructureType()) { 642 return false; 643 } 644 645 // Structure types are passed in register if all fields would be 646 // passed in a register. 647 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 648 e = RT->getDecl()->field_end(); i != e; ++i) { 649 const FieldDecl *FD = *i; 650 651 // Empty fields are ignored. 652 if (isEmptyField(Context, FD, true)) 653 continue; 654 655 // Check fields recursively. 656 if (!shouldReturnTypeInRegister(FD->getType(), Context, 657 callingConvention)) 658 return false; 659 } 660 return true; 661} 662 663ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 664 unsigned callingConvention) const { 665 if (RetTy->isVoidType()) 666 return ABIArgInfo::getIgnore(); 667 668 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 669 // On Darwin, some vectors are returned in registers. 670 if (IsDarwinVectorABI) { 671 uint64_t Size = getContext().getTypeSize(RetTy); 672 673 // 128-bit vectors are a special case; they are returned in 674 // registers and we need to make sure to pick a type the LLVM 675 // backend will like. 676 if (Size == 128) 677 return ABIArgInfo::getDirect(llvm::VectorType::get( 678 llvm::Type::getInt64Ty(getVMContext()), 2)); 679 680 // Always return in register if it fits in a general purpose 681 // register, or if it is 64 bits and has a single element. 682 if ((Size == 8 || Size == 16 || Size == 32) || 683 (Size == 64 && VT->getNumElements() == 1)) 684 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 685 Size)); 686 687 return ABIArgInfo::getIndirect(0); 688 } 689 690 return ABIArgInfo::getDirect(); 691 } 692 693 if (isAggregateTypeForABI(RetTy)) { 694 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 695 if (isRecordReturnIndirect(RT, CGT)) 696 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 697 698 // Structures with flexible arrays are always indirect. 699 if (RT->getDecl()->hasFlexibleArrayMember()) 700 return ABIArgInfo::getIndirect(0); 701 } 702 703 // If specified, structs and unions are always indirect. 704 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 705 return ABIArgInfo::getIndirect(0); 706 707 // Small structures which are register sized are generally returned 708 // in a register. 709 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 710 callingConvention)) { 711 uint64_t Size = getContext().getTypeSize(RetTy); 712 713 // As a special-case, if the struct is a "single-element" struct, and 714 // the field is of type "float" or "double", return it in a 715 // floating-point register. (MSVC does not apply this special case.) 716 // We apply a similar transformation for pointer types to improve the 717 // quality of the generated IR. 718 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 719 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 720 || SeltTy->hasPointerRepresentation()) 721 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 722 723 // FIXME: We should be able to narrow this integer in cases with dead 724 // padding. 725 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 726 } 727 728 return ABIArgInfo::getIndirect(0); 729 } 730 731 // Treat an enum type as its underlying type. 732 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 733 RetTy = EnumTy->getDecl()->getIntegerType(); 734 735 return (RetTy->isPromotableIntegerType() ? 736 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 737} 738 739static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 740 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 741} 742 743static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 744 const RecordType *RT = Ty->getAs<RecordType>(); 745 if (!RT) 746 return 0; 747 const RecordDecl *RD = RT->getDecl(); 748 749 // If this is a C++ record, check the bases first. 750 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 751 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 752 e = CXXRD->bases_end(); i != e; ++i) 753 if (!isRecordWithSSEVectorType(Context, i->getType())) 754 return false; 755 756 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 757 i != e; ++i) { 758 QualType FT = i->getType(); 759 760 if (isSSEVectorType(Context, FT)) 761 return true; 762 763 if (isRecordWithSSEVectorType(Context, FT)) 764 return true; 765 } 766 767 return false; 768} 769 770unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 771 unsigned Align) const { 772 // Otherwise, if the alignment is less than or equal to the minimum ABI 773 // alignment, just use the default; the backend will handle this. 774 if (Align <= MinABIStackAlignInBytes) 775 return 0; // Use default alignment. 776 777 // On non-Darwin, the stack type alignment is always 4. 778 if (!IsDarwinVectorABI) { 779 // Set explicit alignment, since we may need to realign the top. 780 return MinABIStackAlignInBytes; 781 } 782 783 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 784 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 785 isRecordWithSSEVectorType(getContext(), Ty))) 786 return 16; 787 788 return MinABIStackAlignInBytes; 789} 790 791ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 792 unsigned &FreeRegs) const { 793 if (!ByVal) { 794 if (FreeRegs) { 795 --FreeRegs; // Non byval indirects just use one pointer. 796 return ABIArgInfo::getIndirectInReg(0, false); 797 } 798 return ABIArgInfo::getIndirect(0, false); 799 } 800 801 // Compute the byval alignment. 802 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 803 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 804 if (StackAlign == 0) 805 return ABIArgInfo::getIndirect(4); 806 807 // If the stack alignment is less than the type alignment, realign the 808 // argument. 809 if (StackAlign < TypeAlign) 810 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 811 /*Realign=*/true); 812 813 return ABIArgInfo::getIndirect(StackAlign); 814} 815 816X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 817 const Type *T = isSingleElementStruct(Ty, getContext()); 818 if (!T) 819 T = Ty.getTypePtr(); 820 821 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 822 BuiltinType::Kind K = BT->getKind(); 823 if (K == BuiltinType::Float || K == BuiltinType::Double) 824 return Float; 825 } 826 return Integer; 827} 828 829bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs, 830 bool IsFastCall, bool &NeedsPadding) const { 831 NeedsPadding = false; 832 Class C = classify(Ty); 833 if (C == Float) 834 return false; 835 836 unsigned Size = getContext().getTypeSize(Ty); 837 unsigned SizeInRegs = (Size + 31) / 32; 838 839 if (SizeInRegs == 0) 840 return false; 841 842 if (SizeInRegs > FreeRegs) { 843 FreeRegs = 0; 844 return false; 845 } 846 847 FreeRegs -= SizeInRegs; 848 849 if (IsFastCall) { 850 if (Size > 32) 851 return false; 852 853 if (Ty->isIntegralOrEnumerationType()) 854 return true; 855 856 if (Ty->isPointerType()) 857 return true; 858 859 if (Ty->isReferenceType()) 860 return true; 861 862 if (FreeRegs) 863 NeedsPadding = true; 864 865 return false; 866 } 867 868 return true; 869} 870 871ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 872 unsigned &FreeRegs, 873 bool IsFastCall) const { 874 // FIXME: Set alignment on indirect arguments. 875 if (isAggregateTypeForABI(Ty)) { 876 if (const RecordType *RT = Ty->getAs<RecordType>()) { 877 if (IsWin32StructABI) 878 return getIndirectResult(Ty, true, FreeRegs); 879 880 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT)) 881 return getIndirectResult(Ty, RAA == CGCXXABI::RAA_DirectInMemory, FreeRegs); 882 883 // Structures with flexible arrays are always indirect. 884 if (RT->getDecl()->hasFlexibleArrayMember()) 885 return getIndirectResult(Ty, true, FreeRegs); 886 } 887 888 // Ignore empty structs/unions. 889 if (isEmptyRecord(getContext(), Ty, true)) 890 return ABIArgInfo::getIgnore(); 891 892 llvm::LLVMContext &LLVMContext = getVMContext(); 893 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 894 bool NeedsPadding; 895 if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) { 896 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 897 SmallVector<llvm::Type*, 3> Elements; 898 for (unsigned I = 0; I < SizeInRegs; ++I) 899 Elements.push_back(Int32); 900 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 901 return ABIArgInfo::getDirectInReg(Result); 902 } 903 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; 904 905 // Expand small (<= 128-bit) record types when we know that the stack layout 906 // of those arguments will match the struct. This is important because the 907 // LLVM backend isn't smart enough to remove byval, which inhibits many 908 // optimizations. 909 if (getContext().getTypeSize(Ty) <= 4*32 && 910 canExpandIndirectArgument(Ty, getContext())) 911 return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType); 912 913 return getIndirectResult(Ty, true, FreeRegs); 914 } 915 916 if (const VectorType *VT = Ty->getAs<VectorType>()) { 917 // On Darwin, some vectors are passed in memory, we handle this by passing 918 // it as an i8/i16/i32/i64. 919 if (IsDarwinVectorABI) { 920 uint64_t Size = getContext().getTypeSize(Ty); 921 if ((Size == 8 || Size == 16 || Size == 32) || 922 (Size == 64 && VT->getNumElements() == 1)) 923 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 924 Size)); 925 } 926 927 if (IsX86_MMXType(CGT.ConvertType(Ty))) 928 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 929 930 return ABIArgInfo::getDirect(); 931 } 932 933 934 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 935 Ty = EnumTy->getDecl()->getIntegerType(); 936 937 bool NeedsPadding; 938 bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding); 939 940 if (Ty->isPromotableIntegerType()) { 941 if (InReg) 942 return ABIArgInfo::getExtendInReg(); 943 return ABIArgInfo::getExtend(); 944 } 945 if (InReg) 946 return ABIArgInfo::getDirectInReg(); 947 return ABIArgInfo::getDirect(); 948} 949 950void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 951 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 952 FI.getCallingConvention()); 953 954 unsigned CC = FI.getCallingConvention(); 955 bool IsFastCall = CC == llvm::CallingConv::X86_FastCall; 956 unsigned FreeRegs; 957 if (IsFastCall) 958 FreeRegs = 2; 959 else if (FI.getHasRegParm()) 960 FreeRegs = FI.getRegParm(); 961 else 962 FreeRegs = DefaultNumRegisterParameters; 963 964 // If the return value is indirect, then the hidden argument is consuming one 965 // integer register. 966 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 967 --FreeRegs; 968 ABIArgInfo &Old = FI.getReturnInfo(); 969 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 970 Old.getIndirectByVal(), 971 Old.getIndirectRealign()); 972 } 973 974 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 975 it != ie; ++it) 976 it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall); 977} 978 979llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 980 CodeGenFunction &CGF) const { 981 llvm::Type *BPP = CGF.Int8PtrPtrTy; 982 983 CGBuilderTy &Builder = CGF.Builder; 984 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 985 "ap"); 986 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 987 988 // Compute if the address needs to be aligned 989 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 990 Align = getTypeStackAlignInBytes(Ty, Align); 991 Align = std::max(Align, 4U); 992 if (Align > 4) { 993 // addr = (addr + align - 1) & -align; 994 llvm::Value *Offset = 995 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 996 Addr = CGF.Builder.CreateGEP(Addr, Offset); 997 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 998 CGF.Int32Ty); 999 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 1000 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1001 Addr->getType(), 1002 "ap.cur.aligned"); 1003 } 1004 1005 llvm::Type *PTy = 1006 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1007 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1008 1009 uint64_t Offset = 1010 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 1011 llvm::Value *NextAddr = 1012 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 1013 "ap.next"); 1014 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1015 1016 return AddrTyped; 1017} 1018 1019void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1020 llvm::GlobalValue *GV, 1021 CodeGen::CodeGenModule &CGM) const { 1022 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1023 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1024 // Get the LLVM function. 1025 llvm::Function *Fn = cast<llvm::Function>(GV); 1026 1027 // Now add the 'alignstack' attribute with a value of 16. 1028 llvm::AttrBuilder B; 1029 B.addStackAlignmentAttr(16); 1030 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1031 llvm::AttributeSet::get(CGM.getLLVMContext(), 1032 llvm::AttributeSet::FunctionIndex, 1033 B)); 1034 } 1035 } 1036} 1037 1038bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1039 CodeGen::CodeGenFunction &CGF, 1040 llvm::Value *Address) const { 1041 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1042 1043 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1044 1045 // 0-7 are the eight integer registers; the order is different 1046 // on Darwin (for EH), but the range is the same. 1047 // 8 is %eip. 1048 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1049 1050 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1051 // 12-16 are st(0..4). Not sure why we stop at 4. 1052 // These have size 16, which is sizeof(long double) on 1053 // platforms with 8-byte alignment for that type. 1054 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1055 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1056 1057 } else { 1058 // 9 is %eflags, which doesn't get a size on Darwin for some 1059 // reason. 1060 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1061 1062 // 11-16 are st(0..5). Not sure why we stop at 5. 1063 // These have size 12, which is sizeof(long double) on 1064 // platforms with 4-byte alignment for that type. 1065 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1066 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1067 } 1068 1069 return false; 1070} 1071 1072//===----------------------------------------------------------------------===// 1073// X86-64 ABI Implementation 1074//===----------------------------------------------------------------------===// 1075 1076 1077namespace { 1078/// X86_64ABIInfo - The X86_64 ABI information. 1079class X86_64ABIInfo : public ABIInfo { 1080 enum Class { 1081 Integer = 0, 1082 SSE, 1083 SSEUp, 1084 X87, 1085 X87Up, 1086 ComplexX87, 1087 NoClass, 1088 Memory 1089 }; 1090 1091 /// merge - Implement the X86_64 ABI merging algorithm. 1092 /// 1093 /// Merge an accumulating classification \arg Accum with a field 1094 /// classification \arg Field. 1095 /// 1096 /// \param Accum - The accumulating classification. This should 1097 /// always be either NoClass or the result of a previous merge 1098 /// call. In addition, this should never be Memory (the caller 1099 /// should just return Memory for the aggregate). 1100 static Class merge(Class Accum, Class Field); 1101 1102 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1103 /// 1104 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1105 /// final MEMORY or SSE classes when necessary. 1106 /// 1107 /// \param AggregateSize - The size of the current aggregate in 1108 /// the classification process. 1109 /// 1110 /// \param Lo - The classification for the parts of the type 1111 /// residing in the low word of the containing object. 1112 /// 1113 /// \param Hi - The classification for the parts of the type 1114 /// residing in the higher words of the containing object. 1115 /// 1116 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1117 1118 /// classify - Determine the x86_64 register classes in which the 1119 /// given type T should be passed. 1120 /// 1121 /// \param Lo - The classification for the parts of the type 1122 /// residing in the low word of the containing object. 1123 /// 1124 /// \param Hi - The classification for the parts of the type 1125 /// residing in the high word of the containing object. 1126 /// 1127 /// \param OffsetBase - The bit offset of this type in the 1128 /// containing object. Some parameters are classified different 1129 /// depending on whether they straddle an eightbyte boundary. 1130 /// 1131 /// If a word is unused its result will be NoClass; if a type should 1132 /// be passed in Memory then at least the classification of \arg Lo 1133 /// will be Memory. 1134 /// 1135 /// The \arg Lo class will be NoClass iff the argument is ignored. 1136 /// 1137 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1138 /// also be ComplexX87. 1139 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1140 1141 llvm::Type *GetByteVectorType(QualType Ty) const; 1142 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1143 unsigned IROffset, QualType SourceTy, 1144 unsigned SourceOffset) const; 1145 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1146 unsigned IROffset, QualType SourceTy, 1147 unsigned SourceOffset) const; 1148 1149 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1150 /// such that the argument will be returned in memory. 1151 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1152 1153 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1154 /// such that the argument will be passed in memory. 1155 /// 1156 /// \param freeIntRegs - The number of free integer registers remaining 1157 /// available. 1158 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1159 1160 ABIArgInfo classifyReturnType(QualType RetTy) const; 1161 1162 ABIArgInfo classifyArgumentType(QualType Ty, 1163 unsigned freeIntRegs, 1164 unsigned &neededInt, 1165 unsigned &neededSSE) const; 1166 1167 bool IsIllegalVectorType(QualType Ty) const; 1168 1169 /// The 0.98 ABI revision clarified a lot of ambiguities, 1170 /// unfortunately in ways that were not always consistent with 1171 /// certain previous compilers. In particular, platforms which 1172 /// required strict binary compatibility with older versions of GCC 1173 /// may need to exempt themselves. 1174 bool honorsRevision0_98() const { 1175 return !getTarget().getTriple().isOSDarwin(); 1176 } 1177 1178 bool HasAVX; 1179 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1180 // 64-bit hardware. 1181 bool Has64BitPointers; 1182 1183public: 1184 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1185 ABIInfo(CGT), HasAVX(hasavx), 1186 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1187 } 1188 1189 bool isPassedUsingAVXType(QualType type) const { 1190 unsigned neededInt, neededSSE; 1191 // The freeIntRegs argument doesn't matter here. 1192 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1193 if (info.isDirect()) { 1194 llvm::Type *ty = info.getCoerceToType(); 1195 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1196 return (vectorTy->getBitWidth() > 128); 1197 } 1198 return false; 1199 } 1200 1201 virtual void computeInfo(CGFunctionInfo &FI) const; 1202 1203 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1204 CodeGenFunction &CGF) const; 1205}; 1206 1207/// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1208class WinX86_64ABIInfo : public ABIInfo { 1209 1210 ABIArgInfo classify(QualType Ty, bool IsReturnType) const; 1211 1212public: 1213 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1214 1215 virtual void computeInfo(CGFunctionInfo &FI) const; 1216 1217 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1218 CodeGenFunction &CGF) const; 1219}; 1220 1221class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1222public: 1223 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1224 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1225 1226 const X86_64ABIInfo &getABIInfo() const { 1227 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1228 } 1229 1230 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1231 return 7; 1232 } 1233 1234 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1235 llvm::Value *Address) const { 1236 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1237 1238 // 0-15 are the 16 integer registers. 1239 // 16 is %rip. 1240 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1241 return false; 1242 } 1243 1244 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1245 StringRef Constraint, 1246 llvm::Type* Ty) const { 1247 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1248 } 1249 1250 bool isNoProtoCallVariadic(const CallArgList &args, 1251 const FunctionNoProtoType *fnType) const { 1252 // The default CC on x86-64 sets %al to the number of SSA 1253 // registers used, and GCC sets this when calling an unprototyped 1254 // function, so we override the default behavior. However, don't do 1255 // that when AVX types are involved: the ABI explicitly states it is 1256 // undefined, and it doesn't work in practice because of how the ABI 1257 // defines varargs anyway. 1258 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1259 bool HasAVXType = false; 1260 for (CallArgList::const_iterator 1261 it = args.begin(), ie = args.end(); it != ie; ++it) { 1262 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1263 HasAVXType = true; 1264 break; 1265 } 1266 } 1267 1268 if (!HasAVXType) 1269 return true; 1270 } 1271 1272 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1273 } 1274 1275}; 1276 1277static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 1278 // If the argument does not end in .lib, automatically add the suffix. This 1279 // matches the behavior of MSVC. 1280 std::string ArgStr = Lib; 1281 if (Lib.size() <= 4 || 1282 Lib.substr(Lib.size() - 4).compare_lower(".lib") != 0) { 1283 ArgStr += ".lib"; 1284 } 1285 return ArgStr; 1286} 1287 1288class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 1289public: 1290 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned RegParms) 1291 : X86_32TargetCodeGenInfo(CGT, false, true, true, RegParms) {} 1292 1293 void getDependentLibraryOption(llvm::StringRef Lib, 1294 llvm::SmallString<24> &Opt) const { 1295 Opt = "/DEFAULTLIB:"; 1296 Opt += qualifyWindowsLibrary(Lib); 1297 } 1298 1299 void getDetectMismatchOption(llvm::StringRef Name, 1300 llvm::StringRef Value, 1301 llvm::SmallString<32> &Opt) const { 1302 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1303 } 1304}; 1305 1306class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1307public: 1308 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1309 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1310 1311 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1312 return 7; 1313 } 1314 1315 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1316 llvm::Value *Address) const { 1317 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1318 1319 // 0-15 are the 16 integer registers. 1320 // 16 is %rip. 1321 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1322 return false; 1323 } 1324 1325 void getDependentLibraryOption(llvm::StringRef Lib, 1326 llvm::SmallString<24> &Opt) const { 1327 Opt = "/DEFAULTLIB:"; 1328 Opt += qualifyWindowsLibrary(Lib); 1329 } 1330 1331 void getDetectMismatchOption(llvm::StringRef Name, 1332 llvm::StringRef Value, 1333 llvm::SmallString<32> &Opt) const { 1334 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1335 } 1336}; 1337 1338} 1339 1340void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1341 Class &Hi) const { 1342 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1343 // 1344 // (a) If one of the classes is Memory, the whole argument is passed in 1345 // memory. 1346 // 1347 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1348 // memory. 1349 // 1350 // (c) If the size of the aggregate exceeds two eightbytes and the first 1351 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1352 // argument is passed in memory. NOTE: This is necessary to keep the 1353 // ABI working for processors that don't support the __m256 type. 1354 // 1355 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1356 // 1357 // Some of these are enforced by the merging logic. Others can arise 1358 // only with unions; for example: 1359 // union { _Complex double; unsigned; } 1360 // 1361 // Note that clauses (b) and (c) were added in 0.98. 1362 // 1363 if (Hi == Memory) 1364 Lo = Memory; 1365 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1366 Lo = Memory; 1367 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1368 Lo = Memory; 1369 if (Hi == SSEUp && Lo != SSE) 1370 Hi = SSE; 1371} 1372 1373X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1374 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1375 // classified recursively so that always two fields are 1376 // considered. The resulting class is calculated according to 1377 // the classes of the fields in the eightbyte: 1378 // 1379 // (a) If both classes are equal, this is the resulting class. 1380 // 1381 // (b) If one of the classes is NO_CLASS, the resulting class is 1382 // the other class. 1383 // 1384 // (c) If one of the classes is MEMORY, the result is the MEMORY 1385 // class. 1386 // 1387 // (d) If one of the classes is INTEGER, the result is the 1388 // INTEGER. 1389 // 1390 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1391 // MEMORY is used as class. 1392 // 1393 // (f) Otherwise class SSE is used. 1394 1395 // Accum should never be memory (we should have returned) or 1396 // ComplexX87 (because this cannot be passed in a structure). 1397 assert((Accum != Memory && Accum != ComplexX87) && 1398 "Invalid accumulated classification during merge."); 1399 if (Accum == Field || Field == NoClass) 1400 return Accum; 1401 if (Field == Memory) 1402 return Memory; 1403 if (Accum == NoClass) 1404 return Field; 1405 if (Accum == Integer || Field == Integer) 1406 return Integer; 1407 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1408 Accum == X87 || Accum == X87Up) 1409 return Memory; 1410 return SSE; 1411} 1412 1413void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1414 Class &Lo, Class &Hi) const { 1415 // FIXME: This code can be simplified by introducing a simple value class for 1416 // Class pairs with appropriate constructor methods for the various 1417 // situations. 1418 1419 // FIXME: Some of the split computations are wrong; unaligned vectors 1420 // shouldn't be passed in registers for example, so there is no chance they 1421 // can straddle an eightbyte. Verify & simplify. 1422 1423 Lo = Hi = NoClass; 1424 1425 Class &Current = OffsetBase < 64 ? Lo : Hi; 1426 Current = Memory; 1427 1428 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1429 BuiltinType::Kind k = BT->getKind(); 1430 1431 if (k == BuiltinType::Void) { 1432 Current = NoClass; 1433 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1434 Lo = Integer; 1435 Hi = Integer; 1436 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1437 Current = Integer; 1438 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1439 (k == BuiltinType::LongDouble && 1440 getTarget().getTriple().getOS() == llvm::Triple::NaCl)) { 1441 Current = SSE; 1442 } else if (k == BuiltinType::LongDouble) { 1443 Lo = X87; 1444 Hi = X87Up; 1445 } 1446 // FIXME: _Decimal32 and _Decimal64 are SSE. 1447 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1448 return; 1449 } 1450 1451 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1452 // Classify the underlying integer type. 1453 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1454 return; 1455 } 1456 1457 if (Ty->hasPointerRepresentation()) { 1458 Current = Integer; 1459 return; 1460 } 1461 1462 if (Ty->isMemberPointerType()) { 1463 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1464 Lo = Hi = Integer; 1465 else 1466 Current = Integer; 1467 return; 1468 } 1469 1470 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1471 uint64_t Size = getContext().getTypeSize(VT); 1472 if (Size == 32) { 1473 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1474 // float> as integer. 1475 Current = Integer; 1476 1477 // If this type crosses an eightbyte boundary, it should be 1478 // split. 1479 uint64_t EB_Real = (OffsetBase) / 64; 1480 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1481 if (EB_Real != EB_Imag) 1482 Hi = Lo; 1483 } else if (Size == 64) { 1484 // gcc passes <1 x double> in memory. :( 1485 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1486 return; 1487 1488 // gcc passes <1 x long long> as INTEGER. 1489 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1490 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1491 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1492 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1493 Current = Integer; 1494 else 1495 Current = SSE; 1496 1497 // If this type crosses an eightbyte boundary, it should be 1498 // split. 1499 if (OffsetBase && OffsetBase != 64) 1500 Hi = Lo; 1501 } else if (Size == 128 || (HasAVX && Size == 256)) { 1502 // Arguments of 256-bits are split into four eightbyte chunks. The 1503 // least significant one belongs to class SSE and all the others to class 1504 // SSEUP. The original Lo and Hi design considers that types can't be 1505 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1506 // This design isn't correct for 256-bits, but since there're no cases 1507 // where the upper parts would need to be inspected, avoid adding 1508 // complexity and just consider Hi to match the 64-256 part. 1509 Lo = SSE; 1510 Hi = SSEUp; 1511 } 1512 return; 1513 } 1514 1515 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1516 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1517 1518 uint64_t Size = getContext().getTypeSize(Ty); 1519 if (ET->isIntegralOrEnumerationType()) { 1520 if (Size <= 64) 1521 Current = Integer; 1522 else if (Size <= 128) 1523 Lo = Hi = Integer; 1524 } else if (ET == getContext().FloatTy) 1525 Current = SSE; 1526 else if (ET == getContext().DoubleTy || 1527 (ET == getContext().LongDoubleTy && 1528 getTarget().getTriple().getOS() == llvm::Triple::NaCl)) 1529 Lo = Hi = SSE; 1530 else if (ET == getContext().LongDoubleTy) 1531 Current = ComplexX87; 1532 1533 // If this complex type crosses an eightbyte boundary then it 1534 // should be split. 1535 uint64_t EB_Real = (OffsetBase) / 64; 1536 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1537 if (Hi == NoClass && EB_Real != EB_Imag) 1538 Hi = Lo; 1539 1540 return; 1541 } 1542 1543 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1544 // Arrays are treated like structures. 1545 1546 uint64_t Size = getContext().getTypeSize(Ty); 1547 1548 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1549 // than four eightbytes, ..., it has class MEMORY. 1550 if (Size > 256) 1551 return; 1552 1553 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1554 // fields, it has class MEMORY. 1555 // 1556 // Only need to check alignment of array base. 1557 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1558 return; 1559 1560 // Otherwise implement simplified merge. We could be smarter about 1561 // this, but it isn't worth it and would be harder to verify. 1562 Current = NoClass; 1563 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1564 uint64_t ArraySize = AT->getSize().getZExtValue(); 1565 1566 // The only case a 256-bit wide vector could be used is when the array 1567 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1568 // to work for sizes wider than 128, early check and fallback to memory. 1569 if (Size > 128 && EltSize != 256) 1570 return; 1571 1572 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1573 Class FieldLo, FieldHi; 1574 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1575 Lo = merge(Lo, FieldLo); 1576 Hi = merge(Hi, FieldHi); 1577 if (Lo == Memory || Hi == Memory) 1578 break; 1579 } 1580 1581 postMerge(Size, Lo, Hi); 1582 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1583 return; 1584 } 1585 1586 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1587 uint64_t Size = getContext().getTypeSize(Ty); 1588 1589 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1590 // than four eightbytes, ..., it has class MEMORY. 1591 if (Size > 256) 1592 return; 1593 1594 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1595 // copy constructor or a non-trivial destructor, it is passed by invisible 1596 // reference. 1597 if (getRecordArgABI(RT, CGT)) 1598 return; 1599 1600 const RecordDecl *RD = RT->getDecl(); 1601 1602 // Assume variable sized types are passed in memory. 1603 if (RD->hasFlexibleArrayMember()) 1604 return; 1605 1606 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1607 1608 // Reset Lo class, this will be recomputed. 1609 Current = NoClass; 1610 1611 // If this is a C++ record, classify the bases first. 1612 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1613 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1614 e = CXXRD->bases_end(); i != e; ++i) { 1615 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1616 "Unexpected base class!"); 1617 const CXXRecordDecl *Base = 1618 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1619 1620 // Classify this field. 1621 // 1622 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1623 // single eightbyte, each is classified separately. Each eightbyte gets 1624 // initialized to class NO_CLASS. 1625 Class FieldLo, FieldHi; 1626 uint64_t Offset = 1627 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1628 classify(i->getType(), Offset, FieldLo, FieldHi); 1629 Lo = merge(Lo, FieldLo); 1630 Hi = merge(Hi, FieldHi); 1631 if (Lo == Memory || Hi == Memory) 1632 break; 1633 } 1634 } 1635 1636 // Classify the fields one at a time, merging the results. 1637 unsigned idx = 0; 1638 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1639 i != e; ++i, ++idx) { 1640 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1641 bool BitField = i->isBitField(); 1642 1643 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1644 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1645 // 1646 // The only case a 256-bit wide vector could be used is when the struct 1647 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1648 // to work for sizes wider than 128, early check and fallback to memory. 1649 // 1650 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1651 Lo = Memory; 1652 return; 1653 } 1654 // Note, skip this test for bit-fields, see below. 1655 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1656 Lo = Memory; 1657 return; 1658 } 1659 1660 // Classify this field. 1661 // 1662 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1663 // exceeds a single eightbyte, each is classified 1664 // separately. Each eightbyte gets initialized to class 1665 // NO_CLASS. 1666 Class FieldLo, FieldHi; 1667 1668 // Bit-fields require special handling, they do not force the 1669 // structure to be passed in memory even if unaligned, and 1670 // therefore they can straddle an eightbyte. 1671 if (BitField) { 1672 // Ignore padding bit-fields. 1673 if (i->isUnnamedBitfield()) 1674 continue; 1675 1676 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1677 uint64_t Size = i->getBitWidthValue(getContext()); 1678 1679 uint64_t EB_Lo = Offset / 64; 1680 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1681 FieldLo = FieldHi = NoClass; 1682 if (EB_Lo) { 1683 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1684 FieldLo = NoClass; 1685 FieldHi = Integer; 1686 } else { 1687 FieldLo = Integer; 1688 FieldHi = EB_Hi ? Integer : NoClass; 1689 } 1690 } else 1691 classify(i->getType(), Offset, FieldLo, FieldHi); 1692 Lo = merge(Lo, FieldLo); 1693 Hi = merge(Hi, FieldHi); 1694 if (Lo == Memory || Hi == Memory) 1695 break; 1696 } 1697 1698 postMerge(Size, Lo, Hi); 1699 } 1700} 1701 1702ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1703 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1704 // place naturally. 1705 if (!isAggregateTypeForABI(Ty)) { 1706 // Treat an enum type as its underlying type. 1707 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1708 Ty = EnumTy->getDecl()->getIntegerType(); 1709 1710 return (Ty->isPromotableIntegerType() ? 1711 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1712 } 1713 1714 return ABIArgInfo::getIndirect(0); 1715} 1716 1717bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1718 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1719 uint64_t Size = getContext().getTypeSize(VecTy); 1720 unsigned LargestVector = HasAVX ? 256 : 128; 1721 if (Size <= 64 || Size > LargestVector) 1722 return true; 1723 } 1724 1725 return false; 1726} 1727 1728ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1729 unsigned freeIntRegs) const { 1730 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1731 // place naturally. 1732 // 1733 // This assumption is optimistic, as there could be free registers available 1734 // when we need to pass this argument in memory, and LLVM could try to pass 1735 // the argument in the free register. This does not seem to happen currently, 1736 // but this code would be much safer if we could mark the argument with 1737 // 'onstack'. See PR12193. 1738 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1739 // Treat an enum type as its underlying type. 1740 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1741 Ty = EnumTy->getDecl()->getIntegerType(); 1742 1743 return (Ty->isPromotableIntegerType() ? 1744 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1745 } 1746 1747 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) 1748 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 1749 1750 // Compute the byval alignment. We specify the alignment of the byval in all 1751 // cases so that the mid-level optimizer knows the alignment of the byval. 1752 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1753 1754 // Attempt to avoid passing indirect results using byval when possible. This 1755 // is important for good codegen. 1756 // 1757 // We do this by coercing the value into a scalar type which the backend can 1758 // handle naturally (i.e., without using byval). 1759 // 1760 // For simplicity, we currently only do this when we have exhausted all of the 1761 // free integer registers. Doing this when there are free integer registers 1762 // would require more care, as we would have to ensure that the coerced value 1763 // did not claim the unused register. That would require either reording the 1764 // arguments to the function (so that any subsequent inreg values came first), 1765 // or only doing this optimization when there were no following arguments that 1766 // might be inreg. 1767 // 1768 // We currently expect it to be rare (particularly in well written code) for 1769 // arguments to be passed on the stack when there are still free integer 1770 // registers available (this would typically imply large structs being passed 1771 // by value), so this seems like a fair tradeoff for now. 1772 // 1773 // We can revisit this if the backend grows support for 'onstack' parameter 1774 // attributes. See PR12193. 1775 if (freeIntRegs == 0) { 1776 uint64_t Size = getContext().getTypeSize(Ty); 1777 1778 // If this type fits in an eightbyte, coerce it into the matching integral 1779 // type, which will end up on the stack (with alignment 8). 1780 if (Align == 8 && Size <= 64) 1781 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1782 Size)); 1783 } 1784 1785 return ABIArgInfo::getIndirect(Align); 1786} 1787 1788/// GetByteVectorType - The ABI specifies that a value should be passed in an 1789/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1790/// vector register. 1791llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1792 llvm::Type *IRType = CGT.ConvertType(Ty); 1793 1794 // Wrapper structs that just contain vectors are passed just like vectors, 1795 // strip them off if present. 1796 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1797 while (STy && STy->getNumElements() == 1) { 1798 IRType = STy->getElementType(0); 1799 STy = dyn_cast<llvm::StructType>(IRType); 1800 } 1801 1802 // If the preferred type is a 16-byte vector, prefer to pass it. 1803 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1804 llvm::Type *EltTy = VT->getElementType(); 1805 unsigned BitWidth = VT->getBitWidth(); 1806 if ((BitWidth >= 128 && BitWidth <= 256) && 1807 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1808 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1809 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1810 EltTy->isIntegerTy(128))) 1811 return VT; 1812 } 1813 1814 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1815} 1816 1817/// BitsContainNoUserData - Return true if the specified [start,end) bit range 1818/// is known to either be off the end of the specified type or being in 1819/// alignment padding. The user type specified is known to be at most 128 bits 1820/// in size, and have passed through X86_64ABIInfo::classify with a successful 1821/// classification that put one of the two halves in the INTEGER class. 1822/// 1823/// It is conservatively correct to return false. 1824static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1825 unsigned EndBit, ASTContext &Context) { 1826 // If the bytes being queried are off the end of the type, there is no user 1827 // data hiding here. This handles analysis of builtins, vectors and other 1828 // types that don't contain interesting padding. 1829 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1830 if (TySize <= StartBit) 1831 return true; 1832 1833 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1834 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1835 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1836 1837 // Check each element to see if the element overlaps with the queried range. 1838 for (unsigned i = 0; i != NumElts; ++i) { 1839 // If the element is after the span we care about, then we're done.. 1840 unsigned EltOffset = i*EltSize; 1841 if (EltOffset >= EndBit) break; 1842 1843 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1844 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1845 EndBit-EltOffset, Context)) 1846 return false; 1847 } 1848 // If it overlaps no elements, then it is safe to process as padding. 1849 return true; 1850 } 1851 1852 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1853 const RecordDecl *RD = RT->getDecl(); 1854 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1855 1856 // If this is a C++ record, check the bases first. 1857 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1858 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1859 e = CXXRD->bases_end(); i != e; ++i) { 1860 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1861 "Unexpected base class!"); 1862 const CXXRecordDecl *Base = 1863 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1864 1865 // If the base is after the span we care about, ignore it. 1866 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1867 if (BaseOffset >= EndBit) continue; 1868 1869 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1870 if (!BitsContainNoUserData(i->getType(), BaseStart, 1871 EndBit-BaseOffset, Context)) 1872 return false; 1873 } 1874 } 1875 1876 // Verify that no field has data that overlaps the region of interest. Yes 1877 // this could be sped up a lot by being smarter about queried fields, 1878 // however we're only looking at structs up to 16 bytes, so we don't care 1879 // much. 1880 unsigned idx = 0; 1881 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1882 i != e; ++i, ++idx) { 1883 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1884 1885 // If we found a field after the region we care about, then we're done. 1886 if (FieldOffset >= EndBit) break; 1887 1888 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1889 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1890 Context)) 1891 return false; 1892 } 1893 1894 // If nothing in this record overlapped the area of interest, then we're 1895 // clean. 1896 return true; 1897 } 1898 1899 return false; 1900} 1901 1902/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1903/// float member at the specified offset. For example, {int,{float}} has a 1904/// float at offset 4. It is conservatively correct for this routine to return 1905/// false. 1906static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1907 const llvm::DataLayout &TD) { 1908 // Base case if we find a float. 1909 if (IROffset == 0 && IRType->isFloatTy()) 1910 return true; 1911 1912 // If this is a struct, recurse into the field at the specified offset. 1913 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1914 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1915 unsigned Elt = SL->getElementContainingOffset(IROffset); 1916 IROffset -= SL->getElementOffset(Elt); 1917 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1918 } 1919 1920 // If this is an array, recurse into the field at the specified offset. 1921 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1922 llvm::Type *EltTy = ATy->getElementType(); 1923 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1924 IROffset -= IROffset/EltSize*EltSize; 1925 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1926 } 1927 1928 return false; 1929} 1930 1931 1932/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1933/// low 8 bytes of an XMM register, corresponding to the SSE class. 1934llvm::Type *X86_64ABIInfo:: 1935GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1936 QualType SourceTy, unsigned SourceOffset) const { 1937 // The only three choices we have are either double, <2 x float>, or float. We 1938 // pass as float if the last 4 bytes is just padding. This happens for 1939 // structs that contain 3 floats. 1940 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1941 SourceOffset*8+64, getContext())) 1942 return llvm::Type::getFloatTy(getVMContext()); 1943 1944 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1945 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1946 // case. 1947 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1948 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1949 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1950 1951 return llvm::Type::getDoubleTy(getVMContext()); 1952} 1953 1954 1955/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1956/// an 8-byte GPR. This means that we either have a scalar or we are talking 1957/// about the high or low part of an up-to-16-byte struct. This routine picks 1958/// the best LLVM IR type to represent this, which may be i64 or may be anything 1959/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1960/// etc). 1961/// 1962/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1963/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1964/// the 8-byte value references. PrefType may be null. 1965/// 1966/// SourceTy is the source level type for the entire argument. SourceOffset is 1967/// an offset into this that we're processing (which is always either 0 or 8). 1968/// 1969llvm::Type *X86_64ABIInfo:: 1970GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1971 QualType SourceTy, unsigned SourceOffset) const { 1972 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1973 // returning an 8-byte unit starting with it. See if we can safely use it. 1974 if (IROffset == 0) { 1975 // Pointers and int64's always fill the 8-byte unit. 1976 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1977 IRType->isIntegerTy(64)) 1978 return IRType; 1979 1980 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1981 // goodness in the source type is just tail padding. This is allowed to 1982 // kick in for struct {double,int} on the int, but not on 1983 // struct{double,int,int} because we wouldn't return the second int. We 1984 // have to do this analysis on the source type because we can't depend on 1985 // unions being lowered a specific way etc. 1986 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1987 IRType->isIntegerTy(32) || 1988 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1989 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1990 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1991 1992 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1993 SourceOffset*8+64, getContext())) 1994 return IRType; 1995 } 1996 } 1997 1998 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1999 // If this is a struct, recurse into the field at the specified offset. 2000 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2001 if (IROffset < SL->getSizeInBytes()) { 2002 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2003 IROffset -= SL->getElementOffset(FieldIdx); 2004 2005 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2006 SourceTy, SourceOffset); 2007 } 2008 } 2009 2010 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2011 llvm::Type *EltTy = ATy->getElementType(); 2012 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2013 unsigned EltOffset = IROffset/EltSize*EltSize; 2014 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2015 SourceOffset); 2016 } 2017 2018 // Okay, we don't have any better idea of what to pass, so we pass this in an 2019 // integer register that isn't too big to fit the rest of the struct. 2020 unsigned TySizeInBytes = 2021 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2022 2023 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2024 2025 // It is always safe to classify this as an integer type up to i64 that 2026 // isn't larger than the structure. 2027 return llvm::IntegerType::get(getVMContext(), 2028 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2029} 2030 2031 2032/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2033/// be used as elements of a two register pair to pass or return, return a 2034/// first class aggregate to represent them. For example, if the low part of 2035/// a by-value argument should be passed as i32* and the high part as float, 2036/// return {i32*, float}. 2037static llvm::Type * 2038GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2039 const llvm::DataLayout &TD) { 2040 // In order to correctly satisfy the ABI, we need to the high part to start 2041 // at offset 8. If the high and low parts we inferred are both 4-byte types 2042 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2043 // the second element at offset 8. Check for this: 2044 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2045 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2046 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 2047 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2048 2049 // To handle this, we have to increase the size of the low part so that the 2050 // second element will start at an 8 byte offset. We can't increase the size 2051 // of the second element because it might make us access off the end of the 2052 // struct. 2053 if (HiStart != 8) { 2054 // There are only two sorts of types the ABI generation code can produce for 2055 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2056 // Promote these to a larger type. 2057 if (Lo->isFloatTy()) 2058 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2059 else { 2060 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2061 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2062 } 2063 } 2064 2065 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 2066 2067 2068 // Verify that the second element is at an 8-byte offset. 2069 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2070 "Invalid x86-64 argument pair!"); 2071 return Result; 2072} 2073 2074ABIArgInfo X86_64ABIInfo:: 2075classifyReturnType(QualType RetTy) const { 2076 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2077 // classification algorithm. 2078 X86_64ABIInfo::Class Lo, Hi; 2079 classify(RetTy, 0, Lo, Hi); 2080 2081 // Check some invariants. 2082 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2083 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2084 2085 llvm::Type *ResType = 0; 2086 switch (Lo) { 2087 case NoClass: 2088 if (Hi == NoClass) 2089 return ABIArgInfo::getIgnore(); 2090 // If the low part is just padding, it takes no register, leave ResType 2091 // null. 2092 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2093 "Unknown missing lo part"); 2094 break; 2095 2096 case SSEUp: 2097 case X87Up: 2098 llvm_unreachable("Invalid classification for lo word."); 2099 2100 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2101 // hidden argument. 2102 case Memory: 2103 return getIndirectReturnResult(RetTy); 2104 2105 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2106 // available register of the sequence %rax, %rdx is used. 2107 case Integer: 2108 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2109 2110 // If we have a sign or zero extended integer, make sure to return Extend 2111 // so that the parameter gets the right LLVM IR attributes. 2112 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2113 // Treat an enum type as its underlying type. 2114 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2115 RetTy = EnumTy->getDecl()->getIntegerType(); 2116 2117 if (RetTy->isIntegralOrEnumerationType() && 2118 RetTy->isPromotableIntegerType()) 2119 return ABIArgInfo::getExtend(); 2120 } 2121 break; 2122 2123 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2124 // available SSE register of the sequence %xmm0, %xmm1 is used. 2125 case SSE: 2126 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2127 break; 2128 2129 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2130 // returned on the X87 stack in %st0 as 80-bit x87 number. 2131 case X87: 2132 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2133 break; 2134 2135 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2136 // part of the value is returned in %st0 and the imaginary part in 2137 // %st1. 2138 case ComplexX87: 2139 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2140 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2141 llvm::Type::getX86_FP80Ty(getVMContext()), 2142 NULL); 2143 break; 2144 } 2145 2146 llvm::Type *HighPart = 0; 2147 switch (Hi) { 2148 // Memory was handled previously and X87 should 2149 // never occur as a hi class. 2150 case Memory: 2151 case X87: 2152 llvm_unreachable("Invalid classification for hi word."); 2153 2154 case ComplexX87: // Previously handled. 2155 case NoClass: 2156 break; 2157 2158 case Integer: 2159 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2160 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2161 return ABIArgInfo::getDirect(HighPart, 8); 2162 break; 2163 case SSE: 2164 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2165 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2166 return ABIArgInfo::getDirect(HighPart, 8); 2167 break; 2168 2169 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2170 // is passed in the next available eightbyte chunk if the last used 2171 // vector register. 2172 // 2173 // SSEUP should always be preceded by SSE, just widen. 2174 case SSEUp: 2175 assert(Lo == SSE && "Unexpected SSEUp classification."); 2176 ResType = GetByteVectorType(RetTy); 2177 break; 2178 2179 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2180 // returned together with the previous X87 value in %st0. 2181 case X87Up: 2182 // If X87Up is preceded by X87, we don't need to do 2183 // anything. However, in some cases with unions it may not be 2184 // preceded by X87. In such situations we follow gcc and pass the 2185 // extra bits in an SSE reg. 2186 if (Lo != X87) { 2187 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2188 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2189 return ABIArgInfo::getDirect(HighPart, 8); 2190 } 2191 break; 2192 } 2193 2194 // If a high part was specified, merge it together with the low part. It is 2195 // known to pass in the high eightbyte of the result. We do this by forming a 2196 // first class struct aggregate with the high and low part: {low, high} 2197 if (HighPart) 2198 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2199 2200 return ABIArgInfo::getDirect(ResType); 2201} 2202 2203ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2204 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2205 const 2206{ 2207 X86_64ABIInfo::Class Lo, Hi; 2208 classify(Ty, 0, Lo, Hi); 2209 2210 // Check some invariants. 2211 // FIXME: Enforce these by construction. 2212 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2213 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2214 2215 neededInt = 0; 2216 neededSSE = 0; 2217 llvm::Type *ResType = 0; 2218 switch (Lo) { 2219 case NoClass: 2220 if (Hi == NoClass) 2221 return ABIArgInfo::getIgnore(); 2222 // If the low part is just padding, it takes no register, leave ResType 2223 // null. 2224 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2225 "Unknown missing lo part"); 2226 break; 2227 2228 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2229 // on the stack. 2230 case Memory: 2231 2232 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2233 // COMPLEX_X87, it is passed in memory. 2234 case X87: 2235 case ComplexX87: 2236 if (getRecordArgABI(Ty, CGT) == CGCXXABI::RAA_Indirect) 2237 ++neededInt; 2238 return getIndirectResult(Ty, freeIntRegs); 2239 2240 case SSEUp: 2241 case X87Up: 2242 llvm_unreachable("Invalid classification for lo word."); 2243 2244 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2245 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2246 // and %r9 is used. 2247 case Integer: 2248 ++neededInt; 2249 2250 // Pick an 8-byte type based on the preferred type. 2251 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2252 2253 // If we have a sign or zero extended integer, make sure to return Extend 2254 // so that the parameter gets the right LLVM IR attributes. 2255 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2256 // Treat an enum type as its underlying type. 2257 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2258 Ty = EnumTy->getDecl()->getIntegerType(); 2259 2260 if (Ty->isIntegralOrEnumerationType() && 2261 Ty->isPromotableIntegerType()) 2262 return ABIArgInfo::getExtend(); 2263 } 2264 2265 break; 2266 2267 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2268 // available SSE register is used, the registers are taken in the 2269 // order from %xmm0 to %xmm7. 2270 case SSE: { 2271 llvm::Type *IRType = CGT.ConvertType(Ty); 2272 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2273 ++neededSSE; 2274 break; 2275 } 2276 } 2277 2278 llvm::Type *HighPart = 0; 2279 switch (Hi) { 2280 // Memory was handled previously, ComplexX87 and X87 should 2281 // never occur as hi classes, and X87Up must be preceded by X87, 2282 // which is passed in memory. 2283 case Memory: 2284 case X87: 2285 case ComplexX87: 2286 llvm_unreachable("Invalid classification for hi word."); 2287 2288 case NoClass: break; 2289 2290 case Integer: 2291 ++neededInt; 2292 // Pick an 8-byte type based on the preferred type. 2293 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2294 2295 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2296 return ABIArgInfo::getDirect(HighPart, 8); 2297 break; 2298 2299 // X87Up generally doesn't occur here (long double is passed in 2300 // memory), except in situations involving unions. 2301 case X87Up: 2302 case SSE: 2303 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2304 2305 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2306 return ABIArgInfo::getDirect(HighPart, 8); 2307 2308 ++neededSSE; 2309 break; 2310 2311 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2312 // eightbyte is passed in the upper half of the last used SSE 2313 // register. This only happens when 128-bit vectors are passed. 2314 case SSEUp: 2315 assert(Lo == SSE && "Unexpected SSEUp classification"); 2316 ResType = GetByteVectorType(Ty); 2317 break; 2318 } 2319 2320 // If a high part was specified, merge it together with the low part. It is 2321 // known to pass in the high eightbyte of the result. We do this by forming a 2322 // first class struct aggregate with the high and low part: {low, high} 2323 if (HighPart) 2324 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2325 2326 return ABIArgInfo::getDirect(ResType); 2327} 2328 2329void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2330 2331 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2332 2333 // Keep track of the number of assigned registers. 2334 unsigned freeIntRegs = 6, freeSSERegs = 8; 2335 2336 // If the return value is indirect, then the hidden argument is consuming one 2337 // integer register. 2338 if (FI.getReturnInfo().isIndirect()) 2339 --freeIntRegs; 2340 2341 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2342 // get assigned (in left-to-right order) for passing as follows... 2343 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2344 it != ie; ++it) { 2345 unsigned neededInt, neededSSE; 2346 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2347 neededSSE); 2348 2349 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2350 // eightbyte of an argument, the whole argument is passed on the 2351 // stack. If registers have already been assigned for some 2352 // eightbytes of such an argument, the assignments get reverted. 2353 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2354 freeIntRegs -= neededInt; 2355 freeSSERegs -= neededSSE; 2356 } else { 2357 it->info = getIndirectResult(it->type, freeIntRegs); 2358 } 2359 } 2360} 2361 2362static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2363 QualType Ty, 2364 CodeGenFunction &CGF) { 2365 llvm::Value *overflow_arg_area_p = 2366 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2367 llvm::Value *overflow_arg_area = 2368 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2369 2370 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2371 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2372 // It isn't stated explicitly in the standard, but in practice we use 2373 // alignment greater than 16 where necessary. 2374 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2375 if (Align > 8) { 2376 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2377 llvm::Value *Offset = 2378 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2379 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2380 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2381 CGF.Int64Ty); 2382 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2383 overflow_arg_area = 2384 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2385 overflow_arg_area->getType(), 2386 "overflow_arg_area.align"); 2387 } 2388 2389 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2390 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2391 llvm::Value *Res = 2392 CGF.Builder.CreateBitCast(overflow_arg_area, 2393 llvm::PointerType::getUnqual(LTy)); 2394 2395 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2396 // l->overflow_arg_area + sizeof(type). 2397 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2398 // an 8 byte boundary. 2399 2400 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2401 llvm::Value *Offset = 2402 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2403 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2404 "overflow_arg_area.next"); 2405 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2406 2407 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2408 return Res; 2409} 2410 2411llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2412 CodeGenFunction &CGF) const { 2413 // Assume that va_list type is correct; should be pointer to LLVM type: 2414 // struct { 2415 // i32 gp_offset; 2416 // i32 fp_offset; 2417 // i8* overflow_arg_area; 2418 // i8* reg_save_area; 2419 // }; 2420 unsigned neededInt, neededSSE; 2421 2422 Ty = CGF.getContext().getCanonicalType(Ty); 2423 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2424 2425 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2426 // in the registers. If not go to step 7. 2427 if (!neededInt && !neededSSE) 2428 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2429 2430 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2431 // general purpose registers needed to pass type and num_fp to hold 2432 // the number of floating point registers needed. 2433 2434 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2435 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2436 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2437 // 2438 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2439 // register save space). 2440 2441 llvm::Value *InRegs = 0; 2442 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2443 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2444 if (neededInt) { 2445 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2446 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2447 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2448 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2449 } 2450 2451 if (neededSSE) { 2452 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2453 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2454 llvm::Value *FitsInFP = 2455 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2456 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2457 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2458 } 2459 2460 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2461 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2462 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2463 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2464 2465 // Emit code to load the value if it was passed in registers. 2466 2467 CGF.EmitBlock(InRegBlock); 2468 2469 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2470 // an offset of l->gp_offset and/or l->fp_offset. This may require 2471 // copying to a temporary location in case the parameter is passed 2472 // in different register classes or requires an alignment greater 2473 // than 8 for general purpose registers and 16 for XMM registers. 2474 // 2475 // FIXME: This really results in shameful code when we end up needing to 2476 // collect arguments from different places; often what should result in a 2477 // simple assembling of a structure from scattered addresses has many more 2478 // loads than necessary. Can we clean this up? 2479 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2480 llvm::Value *RegAddr = 2481 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2482 "reg_save_area"); 2483 if (neededInt && neededSSE) { 2484 // FIXME: Cleanup. 2485 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2486 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2487 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2488 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2489 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2490 llvm::Type *TyLo = ST->getElementType(0); 2491 llvm::Type *TyHi = ST->getElementType(1); 2492 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2493 "Unexpected ABI info for mixed regs"); 2494 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2495 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2496 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2497 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2498 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2499 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2500 llvm::Value *V = 2501 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2502 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2503 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2504 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2505 2506 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2507 llvm::PointerType::getUnqual(LTy)); 2508 } else if (neededInt) { 2509 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2510 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2511 llvm::PointerType::getUnqual(LTy)); 2512 2513 // Copy to a temporary if necessary to ensure the appropriate alignment. 2514 std::pair<CharUnits, CharUnits> SizeAlign = 2515 CGF.getContext().getTypeInfoInChars(Ty); 2516 uint64_t TySize = SizeAlign.first.getQuantity(); 2517 unsigned TyAlign = SizeAlign.second.getQuantity(); 2518 if (TyAlign > 8) { 2519 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2520 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2521 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false); 2522 RegAddr = Tmp; 2523 } 2524 } else if (neededSSE == 1) { 2525 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2526 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2527 llvm::PointerType::getUnqual(LTy)); 2528 } else { 2529 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2530 // SSE registers are spaced 16 bytes apart in the register save 2531 // area, we need to collect the two eightbytes together. 2532 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2533 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2534 llvm::Type *DoubleTy = CGF.DoubleTy; 2535 llvm::Type *DblPtrTy = 2536 llvm::PointerType::getUnqual(DoubleTy); 2537 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL); 2538 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty); 2539 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2540 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2541 DblPtrTy)); 2542 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2543 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2544 DblPtrTy)); 2545 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2546 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2547 llvm::PointerType::getUnqual(LTy)); 2548 } 2549 2550 // AMD64-ABI 3.5.7p5: Step 5. Set: 2551 // l->gp_offset = l->gp_offset + num_gp * 8 2552 // l->fp_offset = l->fp_offset + num_fp * 16. 2553 if (neededInt) { 2554 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2555 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2556 gp_offset_p); 2557 } 2558 if (neededSSE) { 2559 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2560 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2561 fp_offset_p); 2562 } 2563 CGF.EmitBranch(ContBlock); 2564 2565 // Emit code to load the value if it was passed in memory. 2566 2567 CGF.EmitBlock(InMemBlock); 2568 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2569 2570 // Return the appropriate result. 2571 2572 CGF.EmitBlock(ContBlock); 2573 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2574 "vaarg.addr"); 2575 ResAddr->addIncoming(RegAddr, InRegBlock); 2576 ResAddr->addIncoming(MemAddr, InMemBlock); 2577 return ResAddr; 2578} 2579 2580ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const { 2581 2582 if (Ty->isVoidType()) 2583 return ABIArgInfo::getIgnore(); 2584 2585 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2586 Ty = EnumTy->getDecl()->getIntegerType(); 2587 2588 uint64_t Size = getContext().getTypeSize(Ty); 2589 2590 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2591 if (IsReturnType) { 2592 if (isRecordReturnIndirect(RT, CGT)) 2593 return ABIArgInfo::getIndirect(0, false); 2594 } else { 2595 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT)) 2596 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2597 } 2598 2599 if (RT->getDecl()->hasFlexibleArrayMember()) 2600 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2601 2602 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2603 if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32) 2604 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2605 Size)); 2606 2607 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2608 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2609 if (Size <= 64 && 2610 (Size & (Size - 1)) == 0) 2611 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2612 Size)); 2613 2614 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2615 } 2616 2617 if (Ty->isPromotableIntegerType()) 2618 return ABIArgInfo::getExtend(); 2619 2620 return ABIArgInfo::getDirect(); 2621} 2622 2623void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2624 2625 QualType RetTy = FI.getReturnType(); 2626 FI.getReturnInfo() = classify(RetTy, true); 2627 2628 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2629 it != ie; ++it) 2630 it->info = classify(it->type, false); 2631} 2632 2633llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2634 CodeGenFunction &CGF) const { 2635 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2636 2637 CGBuilderTy &Builder = CGF.Builder; 2638 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2639 "ap"); 2640 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2641 llvm::Type *PTy = 2642 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2643 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2644 2645 uint64_t Offset = 2646 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2647 llvm::Value *NextAddr = 2648 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2649 "ap.next"); 2650 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2651 2652 return AddrTyped; 2653} 2654 2655namespace { 2656 2657class NaClX86_64ABIInfo : public ABIInfo { 2658 public: 2659 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2660 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2661 virtual void computeInfo(CGFunctionInfo &FI) const; 2662 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2663 CodeGenFunction &CGF) const; 2664 private: 2665 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2666 X86_64ABIInfo NInfo; // Used for everything else. 2667}; 2668 2669class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2670 public: 2671 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2672 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2673}; 2674 2675} 2676 2677void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2678 if (FI.getASTCallingConvention() == CC_PnaclCall) 2679 PInfo.computeInfo(FI); 2680 else 2681 NInfo.computeInfo(FI); 2682} 2683 2684llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2685 CodeGenFunction &CGF) const { 2686 // Always use the native convention; calling pnacl-style varargs functions 2687 // is unuspported. 2688 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2689} 2690 2691 2692// PowerPC-32 2693 2694namespace { 2695class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2696public: 2697 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2698 2699 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2700 // This is recovered from gcc output. 2701 return 1; // r1 is the dedicated stack pointer 2702 } 2703 2704 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2705 llvm::Value *Address) const; 2706}; 2707 2708} 2709 2710bool 2711PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2712 llvm::Value *Address) const { 2713 // This is calculated from the LLVM and GCC tables and verified 2714 // against gcc output. AFAIK all ABIs use the same encoding. 2715 2716 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2717 2718 llvm::IntegerType *i8 = CGF.Int8Ty; 2719 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2720 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2721 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2722 2723 // 0-31: r0-31, the 4-byte general-purpose registers 2724 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2725 2726 // 32-63: fp0-31, the 8-byte floating-point registers 2727 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2728 2729 // 64-76 are various 4-byte special-purpose registers: 2730 // 64: mq 2731 // 65: lr 2732 // 66: ctr 2733 // 67: ap 2734 // 68-75 cr0-7 2735 // 76: xer 2736 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2737 2738 // 77-108: v0-31, the 16-byte vector registers 2739 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2740 2741 // 109: vrsave 2742 // 110: vscr 2743 // 111: spe_acc 2744 // 112: spefscr 2745 // 113: sfp 2746 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2747 2748 return false; 2749} 2750 2751// PowerPC-64 2752 2753namespace { 2754/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2755class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2756 2757public: 2758 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2759 2760 bool isPromotableTypeForABI(QualType Ty) const; 2761 2762 ABIArgInfo classifyReturnType(QualType RetTy) const; 2763 ABIArgInfo classifyArgumentType(QualType Ty) const; 2764 2765 // TODO: We can add more logic to computeInfo to improve performance. 2766 // Example: For aggregate arguments that fit in a register, we could 2767 // use getDirectInReg (as is done below for structs containing a single 2768 // floating-point value) to avoid pushing them to memory on function 2769 // entry. This would require changing the logic in PPCISelLowering 2770 // when lowering the parameters in the caller and args in the callee. 2771 virtual void computeInfo(CGFunctionInfo &FI) const { 2772 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2773 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2774 it != ie; ++it) { 2775 // We rely on the default argument classification for the most part. 2776 // One exception: An aggregate containing a single floating-point 2777 // item must be passed in a register if one is available. 2778 const Type *T = isSingleElementStruct(it->type, getContext()); 2779 if (T) { 2780 const BuiltinType *BT = T->getAs<BuiltinType>(); 2781 if (BT && BT->isFloatingPoint()) { 2782 QualType QT(T, 0); 2783 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2784 continue; 2785 } 2786 } 2787 it->info = classifyArgumentType(it->type); 2788 } 2789 } 2790 2791 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2792 QualType Ty, 2793 CodeGenFunction &CGF) const; 2794}; 2795 2796class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2797public: 2798 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2799 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2800 2801 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2802 // This is recovered from gcc output. 2803 return 1; // r1 is the dedicated stack pointer 2804 } 2805 2806 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2807 llvm::Value *Address) const; 2808}; 2809 2810class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2811public: 2812 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2813 2814 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2815 // This is recovered from gcc output. 2816 return 1; // r1 is the dedicated stack pointer 2817 } 2818 2819 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2820 llvm::Value *Address) const; 2821}; 2822 2823} 2824 2825// Return true if the ABI requires Ty to be passed sign- or zero- 2826// extended to 64 bits. 2827bool 2828PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 2829 // Treat an enum type as its underlying type. 2830 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2831 Ty = EnumTy->getDecl()->getIntegerType(); 2832 2833 // Promotable integer types are required to be promoted by the ABI. 2834 if (Ty->isPromotableIntegerType()) 2835 return true; 2836 2837 // In addition to the usual promotable integer types, we also need to 2838 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 2839 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2840 switch (BT->getKind()) { 2841 case BuiltinType::Int: 2842 case BuiltinType::UInt: 2843 return true; 2844 default: 2845 break; 2846 } 2847 2848 return false; 2849} 2850 2851ABIArgInfo 2852PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 2853 if (Ty->isAnyComplexType()) 2854 return ABIArgInfo::getDirect(); 2855 2856 if (isAggregateTypeForABI(Ty)) { 2857 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) 2858 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2859 2860 return ABIArgInfo::getIndirect(0); 2861 } 2862 2863 return (isPromotableTypeForABI(Ty) ? 2864 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2865} 2866 2867ABIArgInfo 2868PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 2869 if (RetTy->isVoidType()) 2870 return ABIArgInfo::getIgnore(); 2871 2872 if (RetTy->isAnyComplexType()) 2873 return ABIArgInfo::getDirect(); 2874 2875 if (isAggregateTypeForABI(RetTy)) 2876 return ABIArgInfo::getIndirect(0); 2877 2878 return (isPromotableTypeForABI(RetTy) ? 2879 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2880} 2881 2882// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2883llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2884 QualType Ty, 2885 CodeGenFunction &CGF) const { 2886 llvm::Type *BP = CGF.Int8PtrTy; 2887 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2888 2889 CGBuilderTy &Builder = CGF.Builder; 2890 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2891 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2892 2893 // Update the va_list pointer. The pointer should be bumped by the 2894 // size of the object. We can trust getTypeSize() except for a complex 2895 // type whose base type is smaller than a doubleword. For these, the 2896 // size of the object is 16 bytes; see below for further explanation. 2897 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2898 QualType BaseTy; 2899 unsigned CplxBaseSize = 0; 2900 2901 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 2902 BaseTy = CTy->getElementType(); 2903 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 2904 if (CplxBaseSize < 8) 2905 SizeInBytes = 16; 2906 } 2907 2908 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2909 llvm::Value *NextAddr = 2910 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2911 "ap.next"); 2912 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2913 2914 // If we have a complex type and the base type is smaller than 8 bytes, 2915 // the ABI calls for the real and imaginary parts to be right-adjusted 2916 // in separate doublewords. However, Clang expects us to produce a 2917 // pointer to a structure with the two parts packed tightly. So generate 2918 // loads of the real and imaginary parts relative to the va_list pointer, 2919 // and store them to a temporary structure. 2920 if (CplxBaseSize && CplxBaseSize < 8) { 2921 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2922 llvm::Value *ImagAddr = RealAddr; 2923 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 2924 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 2925 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 2926 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 2927 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 2928 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 2929 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 2930 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 2931 "vacplx"); 2932 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 2933 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 2934 Builder.CreateStore(Real, RealPtr, false); 2935 Builder.CreateStore(Imag, ImagPtr, false); 2936 return Ptr; 2937 } 2938 2939 // If the argument is smaller than 8 bytes, it is right-adjusted in 2940 // its doubleword slot. Adjust the pointer to pick it up from the 2941 // correct offset. 2942 if (SizeInBytes < 8) { 2943 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2944 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2945 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2946 } 2947 2948 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2949 return Builder.CreateBitCast(Addr, PTy); 2950} 2951 2952static bool 2953PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2954 llvm::Value *Address) { 2955 // This is calculated from the LLVM and GCC tables and verified 2956 // against gcc output. AFAIK all ABIs use the same encoding. 2957 2958 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2959 2960 llvm::IntegerType *i8 = CGF.Int8Ty; 2961 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2962 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2963 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2964 2965 // 0-31: r0-31, the 8-byte general-purpose registers 2966 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2967 2968 // 32-63: fp0-31, the 8-byte floating-point registers 2969 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2970 2971 // 64-76 are various 4-byte special-purpose registers: 2972 // 64: mq 2973 // 65: lr 2974 // 66: ctr 2975 // 67: ap 2976 // 68-75 cr0-7 2977 // 76: xer 2978 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2979 2980 // 77-108: v0-31, the 16-byte vector registers 2981 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2982 2983 // 109: vrsave 2984 // 110: vscr 2985 // 111: spe_acc 2986 // 112: spefscr 2987 // 113: sfp 2988 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2989 2990 return false; 2991} 2992 2993bool 2994PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2995 CodeGen::CodeGenFunction &CGF, 2996 llvm::Value *Address) const { 2997 2998 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2999} 3000 3001bool 3002PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3003 llvm::Value *Address) const { 3004 3005 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3006} 3007 3008//===----------------------------------------------------------------------===// 3009// ARM ABI Implementation 3010//===----------------------------------------------------------------------===// 3011 3012namespace { 3013 3014class ARMABIInfo : public ABIInfo { 3015public: 3016 enum ABIKind { 3017 APCS = 0, 3018 AAPCS = 1, 3019 AAPCS_VFP 3020 }; 3021 3022private: 3023 ABIKind Kind; 3024 3025public: 3026 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { 3027 setRuntimeCC(); 3028 } 3029 3030 bool isEABI() const { 3031 StringRef Env = getTarget().getTriple().getEnvironmentName(); 3032 return (Env == "gnueabi" || Env == "eabi" || 3033 Env == "android" || Env == "androideabi"); 3034 } 3035 3036private: 3037 ABIKind getABIKind() const { return Kind; } 3038 3039 ABIArgInfo classifyReturnType(QualType RetTy) const; 3040 ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs, 3041 unsigned &AllocatedVFP, 3042 bool &IsHA) const; 3043 bool isIllegalVectorType(QualType Ty) const; 3044 3045 virtual void computeInfo(CGFunctionInfo &FI) const; 3046 3047 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3048 CodeGenFunction &CGF) const; 3049 3050 llvm::CallingConv::ID getLLVMDefaultCC() const; 3051 llvm::CallingConv::ID getABIDefaultCC() const; 3052 void setRuntimeCC(); 3053}; 3054 3055class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 3056public: 3057 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 3058 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 3059 3060 const ARMABIInfo &getABIInfo() const { 3061 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 3062 } 3063 3064 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3065 return 13; 3066 } 3067 3068 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 3069 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 3070 } 3071 3072 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3073 llvm::Value *Address) const { 3074 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3075 3076 // 0-15 are the 16 integer registers. 3077 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 3078 return false; 3079 } 3080 3081 unsigned getSizeOfUnwindException() const { 3082 if (getABIInfo().isEABI()) return 88; 3083 return TargetCodeGenInfo::getSizeOfUnwindException(); 3084 } 3085}; 3086 3087} 3088 3089void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3090 // To correctly handle Homogeneous Aggregate, we need to keep track of the 3091 // VFP registers allocated so far. 3092 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3093 // VFP registers of the appropriate type unallocated then the argument is 3094 // allocated to the lowest-numbered sequence of such registers. 3095 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3096 // unallocated are marked as unavailable. 3097 unsigned AllocatedVFP = 0; 3098 int VFPRegs[16] = { 0 }; 3099 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3100 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3101 it != ie; ++it) { 3102 unsigned PreAllocation = AllocatedVFP; 3103 bool IsHA = false; 3104 // 6.1.2.3 There is one VFP co-processor register class using registers 3105 // s0-s15 (d0-d7) for passing arguments. 3106 const unsigned NumVFPs = 16; 3107 it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA); 3108 // If we do not have enough VFP registers for the HA, any VFP registers 3109 // that are unallocated are marked as unavailable. To achieve this, we add 3110 // padding of (NumVFPs - PreAllocation) floats. 3111 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) { 3112 llvm::Type *PaddingTy = llvm::ArrayType::get( 3113 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation); 3114 it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy); 3115 } 3116 } 3117 3118 // Always honor user-specified calling convention. 3119 if (FI.getCallingConvention() != llvm::CallingConv::C) 3120 return; 3121 3122 llvm::CallingConv::ID cc = getRuntimeCC(); 3123 if (cc != llvm::CallingConv::C) 3124 FI.setEffectiveCallingConvention(cc); 3125} 3126 3127/// Return the default calling convention that LLVM will use. 3128llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 3129 // The default calling convention that LLVM will infer. 3130 if (getTarget().getTriple().getEnvironmentName()=="gnueabihf") 3131 return llvm::CallingConv::ARM_AAPCS_VFP; 3132 else if (isEABI()) 3133 return llvm::CallingConv::ARM_AAPCS; 3134 else 3135 return llvm::CallingConv::ARM_APCS; 3136} 3137 3138/// Return the calling convention that our ABI would like us to use 3139/// as the C calling convention. 3140llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 3141 switch (getABIKind()) { 3142 case APCS: return llvm::CallingConv::ARM_APCS; 3143 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 3144 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 3145 } 3146 llvm_unreachable("bad ABI kind"); 3147} 3148 3149void ARMABIInfo::setRuntimeCC() { 3150 assert(getRuntimeCC() == llvm::CallingConv::C); 3151 3152 // Don't muddy up the IR with a ton of explicit annotations if 3153 // they'd just match what LLVM will infer from the triple. 3154 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 3155 if (abiCC != getLLVMDefaultCC()) 3156 RuntimeCC = abiCC; 3157} 3158 3159/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 3160/// aggregate. If HAMembers is non-null, the number of base elements 3161/// contained in the type is returned through it; this is used for the 3162/// recursive calls that check aggregate component types. 3163static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 3164 ASTContext &Context, 3165 uint64_t *HAMembers = 0) { 3166 uint64_t Members = 0; 3167 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3168 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 3169 return false; 3170 Members *= AT->getSize().getZExtValue(); 3171 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3172 const RecordDecl *RD = RT->getDecl(); 3173 if (RD->hasFlexibleArrayMember()) 3174 return false; 3175 3176 Members = 0; 3177 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3178 i != e; ++i) { 3179 const FieldDecl *FD = *i; 3180 uint64_t FldMembers; 3181 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 3182 return false; 3183 3184 Members = (RD->isUnion() ? 3185 std::max(Members, FldMembers) : Members + FldMembers); 3186 } 3187 } else { 3188 Members = 1; 3189 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3190 Members = 2; 3191 Ty = CT->getElementType(); 3192 } 3193 3194 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 3195 // double, or 64-bit or 128-bit vectors. 3196 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3197 if (BT->getKind() != BuiltinType::Float && 3198 BT->getKind() != BuiltinType::Double && 3199 BT->getKind() != BuiltinType::LongDouble) 3200 return false; 3201 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 3202 unsigned VecSize = Context.getTypeSize(VT); 3203 if (VecSize != 64 && VecSize != 128) 3204 return false; 3205 } else { 3206 return false; 3207 } 3208 3209 // The base type must be the same for all members. Vector types of the 3210 // same total size are treated as being equivalent here. 3211 const Type *TyPtr = Ty.getTypePtr(); 3212 if (!Base) 3213 Base = TyPtr; 3214 if (Base != TyPtr && 3215 (!Base->isVectorType() || !TyPtr->isVectorType() || 3216 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 3217 return false; 3218 } 3219 3220 // Homogeneous Aggregates can have at most 4 members of the base type. 3221 if (HAMembers) 3222 *HAMembers = Members; 3223 3224 return (Members > 0 && Members <= 4); 3225} 3226 3227/// markAllocatedVFPs - update VFPRegs according to the alignment and 3228/// number of VFP registers (unit is S register) requested. 3229static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP, 3230 unsigned Alignment, 3231 unsigned NumRequired) { 3232 // Early Exit. 3233 if (AllocatedVFP >= 16) 3234 return; 3235 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3236 // VFP registers of the appropriate type unallocated then the argument is 3237 // allocated to the lowest-numbered sequence of such registers. 3238 for (unsigned I = 0; I < 16; I += Alignment) { 3239 bool FoundSlot = true; 3240 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3241 if (J >= 16 || VFPRegs[J]) { 3242 FoundSlot = false; 3243 break; 3244 } 3245 if (FoundSlot) { 3246 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3247 VFPRegs[J] = 1; 3248 AllocatedVFP += NumRequired; 3249 return; 3250 } 3251 } 3252 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3253 // unallocated are marked as unavailable. 3254 for (unsigned I = 0; I < 16; I++) 3255 VFPRegs[I] = 1; 3256 AllocatedVFP = 17; // We do not have enough VFP registers. 3257} 3258 3259ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs, 3260 unsigned &AllocatedVFP, 3261 bool &IsHA) const { 3262 // We update number of allocated VFPs according to 3263 // 6.1.2.1 The following argument types are VFP CPRCs: 3264 // A single-precision floating-point type (including promoted 3265 // half-precision types); A double-precision floating-point type; 3266 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 3267 // with a Base Type of a single- or double-precision floating-point type, 3268 // 64-bit containerized vectors or 128-bit containerized vectors with one 3269 // to four Elements. 3270 3271 // Handle illegal vector types here. 3272 if (isIllegalVectorType(Ty)) { 3273 uint64_t Size = getContext().getTypeSize(Ty); 3274 if (Size <= 32) { 3275 llvm::Type *ResType = 3276 llvm::Type::getInt32Ty(getVMContext()); 3277 return ABIArgInfo::getDirect(ResType); 3278 } 3279 if (Size == 64) { 3280 llvm::Type *ResType = llvm::VectorType::get( 3281 llvm::Type::getInt32Ty(getVMContext()), 2); 3282 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3283 return ABIArgInfo::getDirect(ResType); 3284 } 3285 if (Size == 128) { 3286 llvm::Type *ResType = llvm::VectorType::get( 3287 llvm::Type::getInt32Ty(getVMContext()), 4); 3288 markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4); 3289 return ABIArgInfo::getDirect(ResType); 3290 } 3291 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3292 } 3293 // Update VFPRegs for legal vector types. 3294 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3295 uint64_t Size = getContext().getTypeSize(VT); 3296 // Size of a legal vector should be power of 2 and above 64. 3297 markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32); 3298 } 3299 // Update VFPRegs for floating point types. 3300 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3301 if (BT->getKind() == BuiltinType::Half || 3302 BT->getKind() == BuiltinType::Float) 3303 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1); 3304 if (BT->getKind() == BuiltinType::Double || 3305 BT->getKind() == BuiltinType::LongDouble) 3306 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3307 } 3308 3309 if (!isAggregateTypeForABI(Ty)) { 3310 // Treat an enum type as its underlying type. 3311 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3312 Ty = EnumTy->getDecl()->getIntegerType(); 3313 3314 return (Ty->isPromotableIntegerType() ? 3315 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3316 } 3317 3318 // Ignore empty records. 3319 if (isEmptyRecord(getContext(), Ty, true)) 3320 return ABIArgInfo::getIgnore(); 3321 3322 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) 3323 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3324 3325 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3326 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 3327 // into VFP registers. 3328 const Type *Base = 0; 3329 uint64_t Members = 0; 3330 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) { 3331 assert(Base && "Base class should be set for homogeneous aggregate"); 3332 // Base can be a floating-point or a vector. 3333 if (Base->isVectorType()) { 3334 // ElementSize is in number of floats. 3335 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 3336 markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize, 3337 Members * ElementSize); 3338 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 3339 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members); 3340 else { 3341 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 3342 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 3343 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2); 3344 } 3345 IsHA = true; 3346 return ABIArgInfo::getExpand(); 3347 } 3348 } 3349 3350 // Support byval for ARM. 3351 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 3352 // most 8-byte. We realign the indirect argument if type alignment is bigger 3353 // than ABI alignment. 3354 uint64_t ABIAlign = 4; 3355 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3356 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3357 getABIKind() == ARMABIInfo::AAPCS) 3358 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3359 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 3360 return ABIArgInfo::getIndirect(0, /*ByVal=*/true, 3361 /*Realign=*/TyAlign > ABIAlign); 3362 } 3363 3364 // Otherwise, pass by coercing to a structure of the appropriate size. 3365 llvm::Type* ElemTy; 3366 unsigned SizeRegs; 3367 // FIXME: Try to match the types of the arguments more accurately where 3368 // we can. 3369 if (getContext().getTypeAlign(Ty) <= 32) { 3370 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3371 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3372 } else { 3373 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3374 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3375 } 3376 3377 llvm::Type *STy = 3378 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3379 return ABIArgInfo::getDirect(STy); 3380} 3381 3382static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3383 llvm::LLVMContext &VMContext) { 3384 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3385 // is called integer-like if its size is less than or equal to one word, and 3386 // the offset of each of its addressable sub-fields is zero. 3387 3388 uint64_t Size = Context.getTypeSize(Ty); 3389 3390 // Check that the type fits in a word. 3391 if (Size > 32) 3392 return false; 3393 3394 // FIXME: Handle vector types! 3395 if (Ty->isVectorType()) 3396 return false; 3397 3398 // Float types are never treated as "integer like". 3399 if (Ty->isRealFloatingType()) 3400 return false; 3401 3402 // If this is a builtin or pointer type then it is ok. 3403 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3404 return true; 3405 3406 // Small complex integer types are "integer like". 3407 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3408 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3409 3410 // Single element and zero sized arrays should be allowed, by the definition 3411 // above, but they are not. 3412 3413 // Otherwise, it must be a record type. 3414 const RecordType *RT = Ty->getAs<RecordType>(); 3415 if (!RT) return false; 3416 3417 // Ignore records with flexible arrays. 3418 const RecordDecl *RD = RT->getDecl(); 3419 if (RD->hasFlexibleArrayMember()) 3420 return false; 3421 3422 // Check that all sub-fields are at offset 0, and are themselves "integer 3423 // like". 3424 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3425 3426 bool HadField = false; 3427 unsigned idx = 0; 3428 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3429 i != e; ++i, ++idx) { 3430 const FieldDecl *FD = *i; 3431 3432 // Bit-fields are not addressable, we only need to verify they are "integer 3433 // like". We still have to disallow a subsequent non-bitfield, for example: 3434 // struct { int : 0; int x } 3435 // is non-integer like according to gcc. 3436 if (FD->isBitField()) { 3437 if (!RD->isUnion()) 3438 HadField = true; 3439 3440 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3441 return false; 3442 3443 continue; 3444 } 3445 3446 // Check if this field is at offset 0. 3447 if (Layout.getFieldOffset(idx) != 0) 3448 return false; 3449 3450 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3451 return false; 3452 3453 // Only allow at most one field in a structure. This doesn't match the 3454 // wording above, but follows gcc in situations with a field following an 3455 // empty structure. 3456 if (!RD->isUnion()) { 3457 if (HadField) 3458 return false; 3459 3460 HadField = true; 3461 } 3462 } 3463 3464 return true; 3465} 3466 3467ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3468 if (RetTy->isVoidType()) 3469 return ABIArgInfo::getIgnore(); 3470 3471 // Large vector types should be returned via memory. 3472 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3473 return ABIArgInfo::getIndirect(0); 3474 3475 if (!isAggregateTypeForABI(RetTy)) { 3476 // Treat an enum type as its underlying type. 3477 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3478 RetTy = EnumTy->getDecl()->getIntegerType(); 3479 3480 return (RetTy->isPromotableIntegerType() ? 3481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3482 } 3483 3484 // Structures with either a non-trivial destructor or a non-trivial 3485 // copy constructor are always indirect. 3486 if (isRecordReturnIndirect(RetTy, CGT)) 3487 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3488 3489 // Are we following APCS? 3490 if (getABIKind() == APCS) { 3491 if (isEmptyRecord(getContext(), RetTy, false)) 3492 return ABIArgInfo::getIgnore(); 3493 3494 // Complex types are all returned as packed integers. 3495 // 3496 // FIXME: Consider using 2 x vector types if the back end handles them 3497 // correctly. 3498 if (RetTy->isAnyComplexType()) 3499 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3500 getContext().getTypeSize(RetTy))); 3501 3502 // Integer like structures are returned in r0. 3503 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3504 // Return in the smallest viable integer type. 3505 uint64_t Size = getContext().getTypeSize(RetTy); 3506 if (Size <= 8) 3507 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3508 if (Size <= 16) 3509 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3510 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3511 } 3512 3513 // Otherwise return in memory. 3514 return ABIArgInfo::getIndirect(0); 3515 } 3516 3517 // Otherwise this is an AAPCS variant. 3518 3519 if (isEmptyRecord(getContext(), RetTy, true)) 3520 return ABIArgInfo::getIgnore(); 3521 3522 // Check for homogeneous aggregates with AAPCS-VFP. 3523 if (getABIKind() == AAPCS_VFP) { 3524 const Type *Base = 0; 3525 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3526 assert(Base && "Base class should be set for homogeneous aggregate"); 3527 // Homogeneous Aggregates are returned directly. 3528 return ABIArgInfo::getDirect(); 3529 } 3530 } 3531 3532 // Aggregates <= 4 bytes are returned in r0; other aggregates 3533 // are returned indirectly. 3534 uint64_t Size = getContext().getTypeSize(RetTy); 3535 if (Size <= 32) { 3536 // Return in the smallest viable integer type. 3537 if (Size <= 8) 3538 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3539 if (Size <= 16) 3540 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3541 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3542 } 3543 3544 return ABIArgInfo::getIndirect(0); 3545} 3546 3547/// isIllegalVector - check whether Ty is an illegal vector type. 3548bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3549 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3550 // Check whether VT is legal. 3551 unsigned NumElements = VT->getNumElements(); 3552 uint64_t Size = getContext().getTypeSize(VT); 3553 // NumElements should be power of 2. 3554 if ((NumElements & (NumElements - 1)) != 0) 3555 return true; 3556 // Size should be greater than 32 bits. 3557 return Size <= 32; 3558 } 3559 return false; 3560} 3561 3562llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3563 CodeGenFunction &CGF) const { 3564 llvm::Type *BP = CGF.Int8PtrTy; 3565 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3566 3567 CGBuilderTy &Builder = CGF.Builder; 3568 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3569 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3570 3571 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3572 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3573 bool IsIndirect = false; 3574 3575 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3576 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3577 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3578 getABIKind() == ARMABIInfo::AAPCS) 3579 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3580 else 3581 TyAlign = 4; 3582 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3583 if (isIllegalVectorType(Ty) && Size > 16) { 3584 IsIndirect = true; 3585 Size = 4; 3586 TyAlign = 4; 3587 } 3588 3589 // Handle address alignment for ABI alignment > 4 bytes. 3590 if (TyAlign > 4) { 3591 assert((TyAlign & (TyAlign - 1)) == 0 && 3592 "Alignment is not power of 2!"); 3593 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3594 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3595 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3596 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3597 } 3598 3599 uint64_t Offset = 3600 llvm::RoundUpToAlignment(Size, 4); 3601 llvm::Value *NextAddr = 3602 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3603 "ap.next"); 3604 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3605 3606 if (IsIndirect) 3607 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3608 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3609 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3610 // may not be correctly aligned for the vector type. We create an aligned 3611 // temporary space and copy the content over from ap.cur to the temporary 3612 // space. This is necessary if the natural alignment of the type is greater 3613 // than the ABI alignment. 3614 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3615 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3616 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3617 "var.align"); 3618 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3619 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3620 Builder.CreateMemCpy(Dst, Src, 3621 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3622 TyAlign, false); 3623 Addr = AlignedTemp; //The content is in aligned location. 3624 } 3625 llvm::Type *PTy = 3626 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3627 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3628 3629 return AddrTyped; 3630} 3631 3632namespace { 3633 3634class NaClARMABIInfo : public ABIInfo { 3635 public: 3636 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3637 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3638 virtual void computeInfo(CGFunctionInfo &FI) const; 3639 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3640 CodeGenFunction &CGF) const; 3641 private: 3642 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3643 ARMABIInfo NInfo; // Used for everything else. 3644}; 3645 3646class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3647 public: 3648 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3649 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3650}; 3651 3652} 3653 3654void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3655 if (FI.getASTCallingConvention() == CC_PnaclCall) 3656 PInfo.computeInfo(FI); 3657 else 3658 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3659} 3660 3661llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3662 CodeGenFunction &CGF) const { 3663 // Always use the native convention; calling pnacl-style varargs functions 3664 // is unsupported. 3665 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3666} 3667 3668//===----------------------------------------------------------------------===// 3669// AArch64 ABI Implementation 3670//===----------------------------------------------------------------------===// 3671 3672namespace { 3673 3674class AArch64ABIInfo : public ABIInfo { 3675public: 3676 AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3677 3678private: 3679 // The AArch64 PCS is explicit about return types and argument types being 3680 // handled identically, so we don't need to draw a distinction between 3681 // Argument and Return classification. 3682 ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs, 3683 int &FreeVFPRegs) const; 3684 3685 ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt, 3686 llvm::Type *DirectTy = 0) const; 3687 3688 virtual void computeInfo(CGFunctionInfo &FI) const; 3689 3690 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3691 CodeGenFunction &CGF) const; 3692}; 3693 3694class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 3695public: 3696 AArch64TargetCodeGenInfo(CodeGenTypes &CGT) 3697 :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {} 3698 3699 const AArch64ABIInfo &getABIInfo() const { 3700 return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 3701 } 3702 3703 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3704 return 31; 3705 } 3706 3707 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3708 llvm::Value *Address) const { 3709 // 0-31 are x0-x30 and sp: 8 bytes each 3710 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 3711 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31); 3712 3713 // 64-95 are v0-v31: 16 bytes each 3714 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 3715 AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95); 3716 3717 return false; 3718 } 3719 3720}; 3721 3722} 3723 3724void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3725 int FreeIntRegs = 8, FreeVFPRegs = 8; 3726 3727 FI.getReturnInfo() = classifyGenericType(FI.getReturnType(), 3728 FreeIntRegs, FreeVFPRegs); 3729 3730 FreeIntRegs = FreeVFPRegs = 8; 3731 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3732 it != ie; ++it) { 3733 it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs); 3734 3735 } 3736} 3737 3738ABIArgInfo 3739AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, 3740 bool IsInt, llvm::Type *DirectTy) const { 3741 if (FreeRegs >= RegsNeeded) { 3742 FreeRegs -= RegsNeeded; 3743 return ABIArgInfo::getDirect(DirectTy); 3744 } 3745 3746 llvm::Type *Padding = 0; 3747 3748 // We need padding so that later arguments don't get filled in anyway. That 3749 // wouldn't happen if only ByVal arguments followed in the same category, but 3750 // a large structure will simply seem to be a pointer as far as LLVM is 3751 // concerned. 3752 if (FreeRegs > 0) { 3753 if (IsInt) 3754 Padding = llvm::Type::getInt64Ty(getVMContext()); 3755 else 3756 Padding = llvm::Type::getFloatTy(getVMContext()); 3757 3758 // Either [N x i64] or [N x float]. 3759 Padding = llvm::ArrayType::get(Padding, FreeRegs); 3760 FreeRegs = 0; 3761 } 3762 3763 return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8, 3764 /*IsByVal=*/ true, /*Realign=*/ false, 3765 Padding); 3766} 3767 3768 3769ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty, 3770 int &FreeIntRegs, 3771 int &FreeVFPRegs) const { 3772 // Can only occurs for return, but harmless otherwise. 3773 if (Ty->isVoidType()) 3774 return ABIArgInfo::getIgnore(); 3775 3776 // Large vector types should be returned via memory. There's no such concept 3777 // in the ABI, but they'd be over 16 bytes anyway so no matter how they're 3778 // classified they'd go into memory (see B.3). 3779 if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) { 3780 if (FreeIntRegs > 0) 3781 --FreeIntRegs; 3782 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3783 } 3784 3785 // All non-aggregate LLVM types have a concrete ABI representation so they can 3786 // be passed directly. After this block we're guaranteed to be in a 3787 // complicated case. 3788 if (!isAggregateTypeForABI(Ty)) { 3789 // Treat an enum type as its underlying type. 3790 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3791 Ty = EnumTy->getDecl()->getIntegerType(); 3792 3793 if (Ty->isFloatingType() || Ty->isVectorType()) 3794 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false); 3795 3796 assert(getContext().getTypeSize(Ty) <= 128 && 3797 "unexpectedly large scalar type"); 3798 3799 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1; 3800 3801 // If the type may need padding registers to ensure "alignment", we must be 3802 // careful when this is accounted for. Increasing the effective size covers 3803 // all cases. 3804 if (getContext().getTypeAlign(Ty) == 128) 3805 RegsNeeded += FreeIntRegs % 2 != 0; 3806 3807 return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true); 3808 } 3809 3810 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) { 3811 if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect) 3812 --FreeIntRegs; 3813 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3814 } 3815 3816 if (isEmptyRecord(getContext(), Ty, true)) { 3817 if (!getContext().getLangOpts().CPlusPlus) { 3818 // Empty structs outside C++ mode are a GNU extension, so no ABI can 3819 // possibly tell us what to do. It turns out (I believe) that GCC ignores 3820 // the object for parameter-passsing purposes. 3821 return ABIArgInfo::getIgnore(); 3822 } 3823 3824 // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode 3825 // description of va_arg in the PCS require that an empty struct does 3826 // actually occupy space for parameter-passing. I'm hoping for a 3827 // clarification giving an explicit paragraph to point to in future. 3828 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true, 3829 llvm::Type::getInt8Ty(getVMContext())); 3830 } 3831 3832 // Homogeneous vector aggregates get passed in registers or on the stack. 3833 const Type *Base = 0; 3834 uint64_t NumMembers = 0; 3835 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) { 3836 assert(Base && "Base class should be set for homogeneous aggregate"); 3837 // Homogeneous aggregates are passed and returned directly. 3838 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers, 3839 /*IsInt=*/ false); 3840 } 3841 3842 uint64_t Size = getContext().getTypeSize(Ty); 3843 if (Size <= 128) { 3844 // Small structs can use the same direct type whether they're in registers 3845 // or on the stack. 3846 llvm::Type *BaseTy; 3847 unsigned NumBases; 3848 int SizeInRegs = (Size + 63) / 64; 3849 3850 if (getContext().getTypeAlign(Ty) == 128) { 3851 BaseTy = llvm::Type::getIntNTy(getVMContext(), 128); 3852 NumBases = 1; 3853 3854 // If the type may need padding registers to ensure "alignment", we must 3855 // be careful when this is accounted for. Increasing the effective size 3856 // covers all cases. 3857 SizeInRegs += FreeIntRegs % 2 != 0; 3858 } else { 3859 BaseTy = llvm::Type::getInt64Ty(getVMContext()); 3860 NumBases = SizeInRegs; 3861 } 3862 llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases); 3863 3864 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs, 3865 /*IsInt=*/ true, DirectTy); 3866 } 3867 3868 // If the aggregate is > 16 bytes, it's passed and returned indirectly. In 3869 // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere. 3870 --FreeIntRegs; 3871 return ABIArgInfo::getIndirect(0, /* byVal = */ false); 3872} 3873 3874llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3875 CodeGenFunction &CGF) const { 3876 // The AArch64 va_list type and handling is specified in the Procedure Call 3877 // Standard, section B.4: 3878 // 3879 // struct { 3880 // void *__stack; 3881 // void *__gr_top; 3882 // void *__vr_top; 3883 // int __gr_offs; 3884 // int __vr_offs; 3885 // }; 3886 3887 assert(!CGF.CGM.getDataLayout().isBigEndian() 3888 && "va_arg not implemented for big-endian AArch64"); 3889 3890 int FreeIntRegs = 8, FreeVFPRegs = 8; 3891 Ty = CGF.getContext().getCanonicalType(Ty); 3892 ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs); 3893 3894 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 3895 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3896 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 3897 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3898 3899 llvm::Value *reg_offs_p = 0, *reg_offs = 0; 3900 int reg_top_index; 3901 int RegSize; 3902 if (FreeIntRegs < 8) { 3903 assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs"); 3904 // 3 is the field number of __gr_offs 3905 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 3906 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 3907 reg_top_index = 1; // field number for __gr_top 3908 RegSize = 8 * (8 - FreeIntRegs); 3909 } else { 3910 assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs"); 3911 // 4 is the field number of __vr_offs. 3912 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 3913 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 3914 reg_top_index = 2; // field number for __vr_top 3915 RegSize = 16 * (8 - FreeVFPRegs); 3916 } 3917 3918 //======================================= 3919 // Find out where argument was passed 3920 //======================================= 3921 3922 // If reg_offs >= 0 we're already using the stack for this type of 3923 // argument. We don't want to keep updating reg_offs (in case it overflows, 3924 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 3925 // whatever they get). 3926 llvm::Value *UsingStack = 0; 3927 UsingStack = CGF.Builder.CreateICmpSGE(reg_offs, 3928 llvm::ConstantInt::get(CGF.Int32Ty, 0)); 3929 3930 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 3931 3932 // Otherwise, at least some kind of argument could go in these registers, the 3933 // quesiton is whether this particular type is too big. 3934 CGF.EmitBlock(MaybeRegBlock); 3935 3936 // Integer arguments may need to correct register alignment (for example a 3937 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 3938 // align __gr_offs to calculate the potential address. 3939 if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) { 3940 int Align = getContext().getTypeAlign(Ty) / 8; 3941 3942 reg_offs = CGF.Builder.CreateAdd(reg_offs, 3943 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 3944 "align_regoffs"); 3945 reg_offs = CGF.Builder.CreateAnd(reg_offs, 3946 llvm::ConstantInt::get(CGF.Int32Ty, -Align), 3947 "aligned_regoffs"); 3948 } 3949 3950 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 3951 llvm::Value *NewOffset = 0; 3952 NewOffset = CGF.Builder.CreateAdd(reg_offs, 3953 llvm::ConstantInt::get(CGF.Int32Ty, RegSize), 3954 "new_reg_offs"); 3955 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 3956 3957 // Now we're in a position to decide whether this argument really was in 3958 // registers or not. 3959 llvm::Value *InRegs = 0; 3960 InRegs = CGF.Builder.CreateICmpSLE(NewOffset, 3961 llvm::ConstantInt::get(CGF.Int32Ty, 0), 3962 "inreg"); 3963 3964 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 3965 3966 //======================================= 3967 // Argument was in registers 3968 //======================================= 3969 3970 // Now we emit the code for if the argument was originally passed in 3971 // registers. First start the appropriate block: 3972 CGF.EmitBlock(InRegBlock); 3973 3974 llvm::Value *reg_top_p = 0, *reg_top = 0; 3975 reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 3976 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 3977 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); 3978 llvm::Value *RegAddr = 0; 3979 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 3980 3981 if (!AI.isDirect()) { 3982 // If it's been passed indirectly (actually a struct), whatever we find from 3983 // stored registers or on the stack will actually be a struct **. 3984 MemTy = llvm::PointerType::getUnqual(MemTy); 3985 } 3986 3987 const Type *Base = 0; 3988 uint64_t NumMembers; 3989 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers) 3990 && NumMembers > 1) { 3991 // Homogeneous aggregates passed in registers will have their elements split 3992 // and stored 16-bytes apart regardless of size (they're notionally in qN, 3993 // qN+1, ...). We reload and store into a temporary local variable 3994 // contiguously. 3995 assert(AI.isDirect() && "Homogeneous aggregates should be passed directly"); 3996 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 3997 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 3998 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); 3999 4000 for (unsigned i = 0; i < NumMembers; ++i) { 4001 llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i); 4002 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); 4003 LoadAddr = CGF.Builder.CreateBitCast(LoadAddr, 4004 llvm::PointerType::getUnqual(BaseTy)); 4005 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); 4006 4007 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4008 CGF.Builder.CreateStore(Elem, StoreAddr); 4009 } 4010 4011 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); 4012 } else { 4013 // Otherwise the object is contiguous in memory 4014 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); 4015 } 4016 4017 CGF.EmitBranch(ContBlock); 4018 4019 //======================================= 4020 // Argument was on the stack 4021 //======================================= 4022 CGF.EmitBlock(OnStackBlock); 4023 4024 llvm::Value *stack_p = 0, *OnStackAddr = 0; 4025 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 4026 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); 4027 4028 // Again, stack arguments may need realigmnent. In this case both integer and 4029 // floating-point ones might be affected. 4030 if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) { 4031 int Align = getContext().getTypeAlign(Ty) / 8; 4032 4033 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4034 4035 OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr, 4036 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4037 "align_stack"); 4038 OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr, 4039 llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4040 "align_stack"); 4041 4042 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4043 } 4044 4045 uint64_t StackSize; 4046 if (AI.isDirect()) 4047 StackSize = getContext().getTypeSize(Ty) / 8; 4048 else 4049 StackSize = 8; 4050 4051 // All stack slots are 8 bytes 4052 StackSize = llvm::RoundUpToAlignment(StackSize, 8); 4053 4054 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); 4055 llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, 4056 "new_stack"); 4057 4058 // Write the new value of __stack for the next call to va_arg 4059 CGF.Builder.CreateStore(NewStack, stack_p); 4060 4061 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); 4062 4063 CGF.EmitBranch(ContBlock); 4064 4065 //======================================= 4066 // Tidy up 4067 //======================================= 4068 CGF.EmitBlock(ContBlock); 4069 4070 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); 4071 ResAddr->addIncoming(RegAddr, InRegBlock); 4072 ResAddr->addIncoming(OnStackAddr, OnStackBlock); 4073 4074 if (AI.isDirect()) 4075 return ResAddr; 4076 4077 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); 4078} 4079 4080//===----------------------------------------------------------------------===// 4081// NVPTX ABI Implementation 4082//===----------------------------------------------------------------------===// 4083 4084namespace { 4085 4086class NVPTXABIInfo : public ABIInfo { 4087public: 4088 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4089 4090 ABIArgInfo classifyReturnType(QualType RetTy) const; 4091 ABIArgInfo classifyArgumentType(QualType Ty) const; 4092 4093 virtual void computeInfo(CGFunctionInfo &FI) const; 4094 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4095 CodeGenFunction &CFG) const; 4096}; 4097 4098class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 4099public: 4100 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 4101 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 4102 4103 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4104 CodeGen::CodeGenModule &M) const; 4105private: 4106 static void addKernelMetadata(llvm::Function *F); 4107}; 4108 4109ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 4110 if (RetTy->isVoidType()) 4111 return ABIArgInfo::getIgnore(); 4112 if (isAggregateTypeForABI(RetTy)) 4113 return ABIArgInfo::getIndirect(0); 4114 return ABIArgInfo::getDirect(); 4115} 4116 4117ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 4118 if (isAggregateTypeForABI(Ty)) 4119 return ABIArgInfo::getIndirect(0); 4120 4121 return ABIArgInfo::getDirect(); 4122} 4123 4124void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 4125 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4126 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4127 it != ie; ++it) 4128 it->info = classifyArgumentType(it->type); 4129 4130 // Always honor user-specified calling convention. 4131 if (FI.getCallingConvention() != llvm::CallingConv::C) 4132 return; 4133 4134 FI.setEffectiveCallingConvention(getRuntimeCC()); 4135} 4136 4137llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4138 CodeGenFunction &CFG) const { 4139 llvm_unreachable("NVPTX does not support varargs"); 4140} 4141 4142void NVPTXTargetCodeGenInfo:: 4143SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4144 CodeGen::CodeGenModule &M) const{ 4145 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4146 if (!FD) return; 4147 4148 llvm::Function *F = cast<llvm::Function>(GV); 4149 4150 // Perform special handling in OpenCL mode 4151 if (M.getLangOpts().OpenCL) { 4152 // Use OpenCL function attributes to check for kernel functions 4153 // By default, all functions are device functions 4154 if (FD->hasAttr<OpenCLKernelAttr>()) { 4155 // OpenCL __kernel functions get kernel metadata 4156 addKernelMetadata(F); 4157 // And kernel functions are not subject to inlining 4158 F->addFnAttr(llvm::Attribute::NoInline); 4159 } 4160 } 4161 4162 // Perform special handling in CUDA mode. 4163 if (M.getLangOpts().CUDA) { 4164 // CUDA __global__ functions get a kernel metadata entry. Since 4165 // __global__ functions cannot be called from the device, we do not 4166 // need to set the noinline attribute. 4167 if (FD->getAttr<CUDAGlobalAttr>()) 4168 addKernelMetadata(F); 4169 } 4170} 4171 4172void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) { 4173 llvm::Module *M = F->getParent(); 4174 llvm::LLVMContext &Ctx = M->getContext(); 4175 4176 // Get "nvvm.annotations" metadata node 4177 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 4178 4179 // Create !{<func-ref>, metadata !"kernel", i32 1} node 4180 llvm::SmallVector<llvm::Value *, 3> MDVals; 4181 MDVals.push_back(F); 4182 MDVals.push_back(llvm::MDString::get(Ctx, "kernel")); 4183 MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1)); 4184 4185 // Append metadata to nvvm.annotations 4186 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 4187} 4188 4189} 4190 4191//===----------------------------------------------------------------------===// 4192// SystemZ ABI Implementation 4193//===----------------------------------------------------------------------===// 4194 4195namespace { 4196 4197class SystemZABIInfo : public ABIInfo { 4198public: 4199 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4200 4201 bool isPromotableIntegerType(QualType Ty) const; 4202 bool isCompoundType(QualType Ty) const; 4203 bool isFPArgumentType(QualType Ty) const; 4204 4205 ABIArgInfo classifyReturnType(QualType RetTy) const; 4206 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 4207 4208 virtual void computeInfo(CGFunctionInfo &FI) const { 4209 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4210 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4211 it != ie; ++it) 4212 it->info = classifyArgumentType(it->type); 4213 } 4214 4215 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4216 CodeGenFunction &CGF) const; 4217}; 4218 4219class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 4220public: 4221 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 4222 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 4223}; 4224 4225} 4226 4227bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 4228 // Treat an enum type as its underlying type. 4229 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4230 Ty = EnumTy->getDecl()->getIntegerType(); 4231 4232 // Promotable integer types are required to be promoted by the ABI. 4233 if (Ty->isPromotableIntegerType()) 4234 return true; 4235 4236 // 32-bit values must also be promoted. 4237 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4238 switch (BT->getKind()) { 4239 case BuiltinType::Int: 4240 case BuiltinType::UInt: 4241 return true; 4242 default: 4243 return false; 4244 } 4245 return false; 4246} 4247 4248bool SystemZABIInfo::isCompoundType(QualType Ty) const { 4249 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty); 4250} 4251 4252bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 4253 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4254 switch (BT->getKind()) { 4255 case BuiltinType::Float: 4256 case BuiltinType::Double: 4257 return true; 4258 default: 4259 return false; 4260 } 4261 4262 if (const RecordType *RT = Ty->getAsStructureType()) { 4263 const RecordDecl *RD = RT->getDecl(); 4264 bool Found = false; 4265 4266 // If this is a C++ record, check the bases first. 4267 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 4268 for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(), 4269 E = CXXRD->bases_end(); I != E; ++I) { 4270 QualType Base = I->getType(); 4271 4272 // Empty bases don't affect things either way. 4273 if (isEmptyRecord(getContext(), Base, true)) 4274 continue; 4275 4276 if (Found) 4277 return false; 4278 Found = isFPArgumentType(Base); 4279 if (!Found) 4280 return false; 4281 } 4282 4283 // Check the fields. 4284 for (RecordDecl::field_iterator I = RD->field_begin(), 4285 E = RD->field_end(); I != E; ++I) { 4286 const FieldDecl *FD = *I; 4287 4288 // Empty bitfields don't affect things either way. 4289 // Unlike isSingleElementStruct(), empty structure and array fields 4290 // do count. So do anonymous bitfields that aren't zero-sized. 4291 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 4292 return true; 4293 4294 // Unlike isSingleElementStruct(), arrays do not count. 4295 // Nested isFPArgumentType structures still do though. 4296 if (Found) 4297 return false; 4298 Found = isFPArgumentType(FD->getType()); 4299 if (!Found) 4300 return false; 4301 } 4302 4303 // Unlike isSingleElementStruct(), trailing padding is allowed. 4304 // An 8-byte aligned struct s { float f; } is passed as a double. 4305 return Found; 4306 } 4307 4308 return false; 4309} 4310 4311llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4312 CodeGenFunction &CGF) const { 4313 // Assume that va_list type is correct; should be pointer to LLVM type: 4314 // struct { 4315 // i64 __gpr; 4316 // i64 __fpr; 4317 // i8 *__overflow_arg_area; 4318 // i8 *__reg_save_area; 4319 // }; 4320 4321 // Every argument occupies 8 bytes and is passed by preference in either 4322 // GPRs or FPRs. 4323 Ty = CGF.getContext().getCanonicalType(Ty); 4324 ABIArgInfo AI = classifyArgumentType(Ty); 4325 bool InFPRs = isFPArgumentType(Ty); 4326 4327 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 4328 bool IsIndirect = AI.isIndirect(); 4329 unsigned UnpaddedBitSize; 4330 if (IsIndirect) { 4331 APTy = llvm::PointerType::getUnqual(APTy); 4332 UnpaddedBitSize = 64; 4333 } else 4334 UnpaddedBitSize = getContext().getTypeSize(Ty); 4335 unsigned PaddedBitSize = 64; 4336 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); 4337 4338 unsigned PaddedSize = PaddedBitSize / 8; 4339 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; 4340 4341 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; 4342 if (InFPRs) { 4343 MaxRegs = 4; // Maximum of 4 FPR arguments 4344 RegCountField = 1; // __fpr 4345 RegSaveIndex = 16; // save offset for f0 4346 RegPadding = 0; // floats are passed in the high bits of an FPR 4347 } else { 4348 MaxRegs = 5; // Maximum of 5 GPR arguments 4349 RegCountField = 0; // __gpr 4350 RegSaveIndex = 2; // save offset for r2 4351 RegPadding = Padding; // values are passed in the low bits of a GPR 4352 } 4353 4354 llvm::Value *RegCountPtr = 4355 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 4356 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 4357 llvm::Type *IndexTy = RegCount->getType(); 4358 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 4359 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 4360 "fits_in_regs"); 4361 4362 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4363 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 4364 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4365 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 4366 4367 // Emit code to load the value if it was passed in registers. 4368 CGF.EmitBlock(InRegBlock); 4369 4370 // Work out the address of an argument register. 4371 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); 4372 llvm::Value *ScaledRegCount = 4373 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 4374 llvm::Value *RegBase = 4375 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding); 4376 llvm::Value *RegOffset = 4377 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 4378 llvm::Value *RegSaveAreaPtr = 4379 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 4380 llvm::Value *RegSaveArea = 4381 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 4382 llvm::Value *RawRegAddr = 4383 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr"); 4384 llvm::Value *RegAddr = 4385 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr"); 4386 4387 // Update the register count 4388 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 4389 llvm::Value *NewRegCount = 4390 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 4391 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 4392 CGF.EmitBranch(ContBlock); 4393 4394 // Emit code to load the value if it was passed in memory. 4395 CGF.EmitBlock(InMemBlock); 4396 4397 // Work out the address of a stack argument. 4398 llvm::Value *OverflowArgAreaPtr = 4399 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 4400 llvm::Value *OverflowArgArea = 4401 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); 4402 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); 4403 llvm::Value *RawMemAddr = 4404 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr"); 4405 llvm::Value *MemAddr = 4406 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr"); 4407 4408 // Update overflow_arg_area_ptr pointer 4409 llvm::Value *NewOverflowArgArea = 4410 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); 4411 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 4412 CGF.EmitBranch(ContBlock); 4413 4414 // Return the appropriate result. 4415 CGF.EmitBlock(ContBlock); 4416 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr"); 4417 ResAddr->addIncoming(RegAddr, InRegBlock); 4418 ResAddr->addIncoming(MemAddr, InMemBlock); 4419 4420 if (IsIndirect) 4421 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg"); 4422 4423 return ResAddr; 4424} 4425 4426 4427ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 4428 if (RetTy->isVoidType()) 4429 return ABIArgInfo::getIgnore(); 4430 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 4431 return ABIArgInfo::getIndirect(0); 4432 return (isPromotableIntegerType(RetTy) ? 4433 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4434} 4435 4436ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 4437 // Handle the generic C++ ABI. 4438 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) 4439 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4440 4441 // Integers and enums are extended to full register width. 4442 if (isPromotableIntegerType(Ty)) 4443 return ABIArgInfo::getExtend(); 4444 4445 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 4446 uint64_t Size = getContext().getTypeSize(Ty); 4447 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 4448 return ABIArgInfo::getIndirect(0); 4449 4450 // Handle small structures. 4451 if (const RecordType *RT = Ty->getAs<RecordType>()) { 4452 // Structures with flexible arrays have variable length, so really 4453 // fail the size test above. 4454 const RecordDecl *RD = RT->getDecl(); 4455 if (RD->hasFlexibleArrayMember()) 4456 return ABIArgInfo::getIndirect(0); 4457 4458 // The structure is passed as an unextended integer, a float, or a double. 4459 llvm::Type *PassTy; 4460 if (isFPArgumentType(Ty)) { 4461 assert(Size == 32 || Size == 64); 4462 if (Size == 32) 4463 PassTy = llvm::Type::getFloatTy(getVMContext()); 4464 else 4465 PassTy = llvm::Type::getDoubleTy(getVMContext()); 4466 } else 4467 PassTy = llvm::IntegerType::get(getVMContext(), Size); 4468 return ABIArgInfo::getDirect(PassTy); 4469 } 4470 4471 // Non-structure compounds are passed indirectly. 4472 if (isCompoundType(Ty)) 4473 return ABIArgInfo::getIndirect(0); 4474 4475 return ABIArgInfo::getDirect(0); 4476} 4477 4478//===----------------------------------------------------------------------===// 4479// MBlaze ABI Implementation 4480//===----------------------------------------------------------------------===// 4481 4482namespace { 4483 4484class MBlazeABIInfo : public ABIInfo { 4485public: 4486 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4487 4488 bool isPromotableIntegerType(QualType Ty) const; 4489 4490 ABIArgInfo classifyReturnType(QualType RetTy) const; 4491 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4492 4493 virtual void computeInfo(CGFunctionInfo &FI) const { 4494 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4495 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4496 it != ie; ++it) 4497 it->info = classifyArgumentType(it->type); 4498 } 4499 4500 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4501 CodeGenFunction &CGF) const; 4502}; 4503 4504class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 4505public: 4506 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 4507 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 4508 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4509 CodeGen::CodeGenModule &M) const; 4510}; 4511 4512} 4513 4514bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 4515 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 4516 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4517 switch (BT->getKind()) { 4518 case BuiltinType::Bool: 4519 case BuiltinType::Char_S: 4520 case BuiltinType::Char_U: 4521 case BuiltinType::SChar: 4522 case BuiltinType::UChar: 4523 case BuiltinType::Short: 4524 case BuiltinType::UShort: 4525 return true; 4526 default: 4527 return false; 4528 } 4529 return false; 4530} 4531 4532llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4533 CodeGenFunction &CGF) const { 4534 // FIXME: Implement 4535 return 0; 4536} 4537 4538 4539ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 4540 if (RetTy->isVoidType()) 4541 return ABIArgInfo::getIgnore(); 4542 if (isAggregateTypeForABI(RetTy)) 4543 return ABIArgInfo::getIndirect(0); 4544 4545 return (isPromotableIntegerType(RetTy) ? 4546 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4547} 4548 4549ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 4550 if (isAggregateTypeForABI(Ty)) 4551 return ABIArgInfo::getIndirect(0); 4552 4553 return (isPromotableIntegerType(Ty) ? 4554 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4555} 4556 4557void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4558 llvm::GlobalValue *GV, 4559 CodeGen::CodeGenModule &M) 4560 const { 4561 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4562 if (!FD) return; 4563 4564 llvm::CallingConv::ID CC = llvm::CallingConv::C; 4565 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 4566 CC = llvm::CallingConv::MBLAZE_INTR; 4567 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 4568 CC = llvm::CallingConv::MBLAZE_SVOL; 4569 4570 if (CC != llvm::CallingConv::C) { 4571 // Handle 'interrupt_handler' attribute: 4572 llvm::Function *F = cast<llvm::Function>(GV); 4573 4574 // Step 1: Set ISR calling convention. 4575 F->setCallingConv(CC); 4576 4577 // Step 2: Add attributes goodness. 4578 F->addFnAttr(llvm::Attribute::NoInline); 4579 } 4580 4581 // Step 3: Emit _interrupt_handler alias. 4582 if (CC == llvm::CallingConv::MBLAZE_INTR) 4583 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 4584 "_interrupt_handler", GV, &M.getModule()); 4585} 4586 4587 4588//===----------------------------------------------------------------------===// 4589// MSP430 ABI Implementation 4590//===----------------------------------------------------------------------===// 4591 4592namespace { 4593 4594class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 4595public: 4596 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 4597 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 4598 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4599 CodeGen::CodeGenModule &M) const; 4600}; 4601 4602} 4603 4604void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4605 llvm::GlobalValue *GV, 4606 CodeGen::CodeGenModule &M) const { 4607 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 4608 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 4609 // Handle 'interrupt' attribute: 4610 llvm::Function *F = cast<llvm::Function>(GV); 4611 4612 // Step 1: Set ISR calling convention. 4613 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 4614 4615 // Step 2: Add attributes goodness. 4616 F->addFnAttr(llvm::Attribute::NoInline); 4617 4618 // Step 3: Emit ISR vector alias. 4619 unsigned Num = attr->getNumber() / 2; 4620 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 4621 "__isr_" + Twine(Num), 4622 GV, &M.getModule()); 4623 } 4624 } 4625} 4626 4627//===----------------------------------------------------------------------===// 4628// MIPS ABI Implementation. This works for both little-endian and 4629// big-endian variants. 4630//===----------------------------------------------------------------------===// 4631 4632namespace { 4633class MipsABIInfo : public ABIInfo { 4634 bool IsO32; 4635 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 4636 void CoerceToIntArgs(uint64_t TySize, 4637 SmallVector<llvm::Type*, 8> &ArgList) const; 4638 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 4639 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 4640 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 4641public: 4642 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 4643 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 4644 StackAlignInBytes(IsO32 ? 8 : 16) {} 4645 4646 ABIArgInfo classifyReturnType(QualType RetTy) const; 4647 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 4648 virtual void computeInfo(CGFunctionInfo &FI) const; 4649 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4650 CodeGenFunction &CGF) const; 4651}; 4652 4653class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 4654 unsigned SizeOfUnwindException; 4655public: 4656 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 4657 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 4658 SizeOfUnwindException(IsO32 ? 24 : 32) {} 4659 4660 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 4661 return 29; 4662 } 4663 4664 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4665 CodeGen::CodeGenModule &CGM) const { 4666 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4667 if (!FD) return; 4668 llvm::Function *Fn = cast<llvm::Function>(GV); 4669 if (FD->hasAttr<Mips16Attr>()) { 4670 Fn->addFnAttr("mips16"); 4671 } 4672 else if (FD->hasAttr<NoMips16Attr>()) { 4673 Fn->addFnAttr("nomips16"); 4674 } 4675 } 4676 4677 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4678 llvm::Value *Address) const; 4679 4680 unsigned getSizeOfUnwindException() const { 4681 return SizeOfUnwindException; 4682 } 4683}; 4684} 4685 4686void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 4687 SmallVector<llvm::Type*, 8> &ArgList) const { 4688 llvm::IntegerType *IntTy = 4689 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 4690 4691 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 4692 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 4693 ArgList.push_back(IntTy); 4694 4695 // If necessary, add one more integer type to ArgList. 4696 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 4697 4698 if (R) 4699 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 4700} 4701 4702// In N32/64, an aligned double precision floating point field is passed in 4703// a register. 4704llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 4705 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 4706 4707 if (IsO32) { 4708 CoerceToIntArgs(TySize, ArgList); 4709 return llvm::StructType::get(getVMContext(), ArgList); 4710 } 4711 4712 if (Ty->isComplexType()) 4713 return CGT.ConvertType(Ty); 4714 4715 const RecordType *RT = Ty->getAs<RecordType>(); 4716 4717 // Unions/vectors are passed in integer registers. 4718 if (!RT || !RT->isStructureOrClassType()) { 4719 CoerceToIntArgs(TySize, ArgList); 4720 return llvm::StructType::get(getVMContext(), ArgList); 4721 } 4722 4723 const RecordDecl *RD = RT->getDecl(); 4724 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4725 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 4726 4727 uint64_t LastOffset = 0; 4728 unsigned idx = 0; 4729 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 4730 4731 // Iterate over fields in the struct/class and check if there are any aligned 4732 // double fields. 4733 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 4734 i != e; ++i, ++idx) { 4735 const QualType Ty = i->getType(); 4736 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 4737 4738 if (!BT || BT->getKind() != BuiltinType::Double) 4739 continue; 4740 4741 uint64_t Offset = Layout.getFieldOffset(idx); 4742 if (Offset % 64) // Ignore doubles that are not aligned. 4743 continue; 4744 4745 // Add ((Offset - LastOffset) / 64) args of type i64. 4746 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 4747 ArgList.push_back(I64); 4748 4749 // Add double type. 4750 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 4751 LastOffset = Offset + 64; 4752 } 4753 4754 CoerceToIntArgs(TySize - LastOffset, IntArgList); 4755 ArgList.append(IntArgList.begin(), IntArgList.end()); 4756 4757 return llvm::StructType::get(getVMContext(), ArgList); 4758} 4759 4760llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 4761 assert((Offset % MinABIStackAlignInBytes) == 0); 4762 4763 if ((Align - 1) & Offset) 4764 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 4765 4766 return 0; 4767} 4768 4769ABIArgInfo 4770MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 4771 uint64_t OrigOffset = Offset; 4772 uint64_t TySize = getContext().getTypeSize(Ty); 4773 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 4774 4775 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 4776 (uint64_t)StackAlignInBytes); 4777 Offset = llvm::RoundUpToAlignment(Offset, Align); 4778 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 4779 4780 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 4781 // Ignore empty aggregates. 4782 if (TySize == 0) 4783 return ABIArgInfo::getIgnore(); 4784 4785 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) { 4786 Offset = OrigOffset + MinABIStackAlignInBytes; 4787 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4788 } 4789 4790 // If we have reached here, aggregates are passed directly by coercing to 4791 // another structure type. Padding is inserted if the offset of the 4792 // aggregate is unaligned. 4793 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 4794 getPaddingType(Align, OrigOffset)); 4795 } 4796 4797 // Treat an enum type as its underlying type. 4798 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4799 Ty = EnumTy->getDecl()->getIntegerType(); 4800 4801 if (Ty->isPromotableIntegerType()) 4802 return ABIArgInfo::getExtend(); 4803 4804 return ABIArgInfo::getDirect(0, 0, 4805 IsO32 ? 0 : getPaddingType(Align, OrigOffset)); 4806} 4807 4808llvm::Type* 4809MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 4810 const RecordType *RT = RetTy->getAs<RecordType>(); 4811 SmallVector<llvm::Type*, 8> RTList; 4812 4813 if (RT && RT->isStructureOrClassType()) { 4814 const RecordDecl *RD = RT->getDecl(); 4815 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4816 unsigned FieldCnt = Layout.getFieldCount(); 4817 4818 // N32/64 returns struct/classes in floating point registers if the 4819 // following conditions are met: 4820 // 1. The size of the struct/class is no larger than 128-bit. 4821 // 2. The struct/class has one or two fields all of which are floating 4822 // point types. 4823 // 3. The offset of the first field is zero (this follows what gcc does). 4824 // 4825 // Any other composite results are returned in integer registers. 4826 // 4827 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 4828 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 4829 for (; b != e; ++b) { 4830 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 4831 4832 if (!BT || !BT->isFloatingPoint()) 4833 break; 4834 4835 RTList.push_back(CGT.ConvertType(b->getType())); 4836 } 4837 4838 if (b == e) 4839 return llvm::StructType::get(getVMContext(), RTList, 4840 RD->hasAttr<PackedAttr>()); 4841 4842 RTList.clear(); 4843 } 4844 } 4845 4846 CoerceToIntArgs(Size, RTList); 4847 return llvm::StructType::get(getVMContext(), RTList); 4848} 4849 4850ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 4851 uint64_t Size = getContext().getTypeSize(RetTy); 4852 4853 if (RetTy->isVoidType() || Size == 0) 4854 return ABIArgInfo::getIgnore(); 4855 4856 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 4857 if (isRecordReturnIndirect(RetTy, CGT)) 4858 return ABIArgInfo::getIndirect(0); 4859 4860 if (Size <= 128) { 4861 if (RetTy->isAnyComplexType()) 4862 return ABIArgInfo::getDirect(); 4863 4864 // O32 returns integer vectors in registers. 4865 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 4866 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 4867 4868 if (!IsO32) 4869 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 4870 } 4871 4872 return ABIArgInfo::getIndirect(0); 4873 } 4874 4875 // Treat an enum type as its underlying type. 4876 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4877 RetTy = EnumTy->getDecl()->getIntegerType(); 4878 4879 return (RetTy->isPromotableIntegerType() ? 4880 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4881} 4882 4883void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 4884 ABIArgInfo &RetInfo = FI.getReturnInfo(); 4885 RetInfo = classifyReturnType(FI.getReturnType()); 4886 4887 // Check if a pointer to an aggregate is passed as a hidden argument. 4888 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 4889 4890 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4891 it != ie; ++it) 4892 it->info = classifyArgumentType(it->type, Offset); 4893} 4894 4895llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4896 CodeGenFunction &CGF) const { 4897 llvm::Type *BP = CGF.Int8PtrTy; 4898 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4899 4900 CGBuilderTy &Builder = CGF.Builder; 4901 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4902 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4903 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 4904 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4905 llvm::Value *AddrTyped; 4906 unsigned PtrWidth = getTarget().getPointerWidth(0); 4907 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 4908 4909 if (TypeAlign > MinABIStackAlignInBytes) { 4910 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 4911 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 4912 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 4913 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 4914 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 4915 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 4916 } 4917 else 4918 AddrTyped = Builder.CreateBitCast(Addr, PTy); 4919 4920 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 4921 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 4922 uint64_t Offset = 4923 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 4924 llvm::Value *NextAddr = 4925 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 4926 "ap.next"); 4927 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4928 4929 return AddrTyped; 4930} 4931 4932bool 4933MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4934 llvm::Value *Address) const { 4935 // This information comes from gcc's implementation, which seems to 4936 // as canonical as it gets. 4937 4938 // Everything on MIPS is 4 bytes. Double-precision FP registers 4939 // are aliased to pairs of single-precision FP registers. 4940 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4941 4942 // 0-31 are the general purpose registers, $0 - $31. 4943 // 32-63 are the floating-point registers, $f0 - $f31. 4944 // 64 and 65 are the multiply/divide registers, $hi and $lo. 4945 // 66 is the (notional, I think) register for signal-handler return. 4946 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 4947 4948 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 4949 // They are one bit wide and ignored here. 4950 4951 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 4952 // (coprocessor 1 is the FP unit) 4953 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 4954 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 4955 // 176-181 are the DSP accumulator registers. 4956 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 4957 return false; 4958} 4959 4960//===----------------------------------------------------------------------===// 4961// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 4962// Currently subclassed only to implement custom OpenCL C function attribute 4963// handling. 4964//===----------------------------------------------------------------------===// 4965 4966namespace { 4967 4968class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4969public: 4970 TCETargetCodeGenInfo(CodeGenTypes &CGT) 4971 : DefaultTargetCodeGenInfo(CGT) {} 4972 4973 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4974 CodeGen::CodeGenModule &M) const; 4975}; 4976 4977void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4978 llvm::GlobalValue *GV, 4979 CodeGen::CodeGenModule &M) const { 4980 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4981 if (!FD) return; 4982 4983 llvm::Function *F = cast<llvm::Function>(GV); 4984 4985 if (M.getLangOpts().OpenCL) { 4986 if (FD->hasAttr<OpenCLKernelAttr>()) { 4987 // OpenCL C Kernel functions are not subject to inlining 4988 F->addFnAttr(llvm::Attribute::NoInline); 4989 4990 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 4991 4992 // Convert the reqd_work_group_size() attributes to metadata. 4993 llvm::LLVMContext &Context = F->getContext(); 4994 llvm::NamedMDNode *OpenCLMetadata = 4995 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 4996 4997 SmallVector<llvm::Value*, 5> Operands; 4998 Operands.push_back(F); 4999 5000 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 5001 llvm::APInt(32, 5002 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 5003 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 5004 llvm::APInt(32, 5005 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 5006 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 5007 llvm::APInt(32, 5008 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 5009 5010 // Add a boolean constant operand for "required" (true) or "hint" (false) 5011 // for implementing the work_group_size_hint attr later. Currently 5012 // always true as the hint is not yet implemented. 5013 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 5014 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 5015 } 5016 } 5017 } 5018} 5019 5020} 5021 5022//===----------------------------------------------------------------------===// 5023// Hexagon ABI Implementation 5024//===----------------------------------------------------------------------===// 5025 5026namespace { 5027 5028class HexagonABIInfo : public ABIInfo { 5029 5030 5031public: 5032 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5033 5034private: 5035 5036 ABIArgInfo classifyReturnType(QualType RetTy) const; 5037 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5038 5039 virtual void computeInfo(CGFunctionInfo &FI) const; 5040 5041 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5042 CodeGenFunction &CGF) const; 5043}; 5044 5045class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 5046public: 5047 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 5048 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 5049 5050 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 5051 return 29; 5052 } 5053}; 5054 5055} 5056 5057void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 5058 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5059 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 5060 it != ie; ++it) 5061 it->info = classifyArgumentType(it->type); 5062} 5063 5064ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 5065 if (!isAggregateTypeForABI(Ty)) { 5066 // Treat an enum type as its underlying type. 5067 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5068 Ty = EnumTy->getDecl()->getIntegerType(); 5069 5070 return (Ty->isPromotableIntegerType() ? 5071 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5072 } 5073 5074 // Ignore empty records. 5075 if (isEmptyRecord(getContext(), Ty, true)) 5076 return ABIArgInfo::getIgnore(); 5077 5078 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) 5079 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5080 5081 uint64_t Size = getContext().getTypeSize(Ty); 5082 if (Size > 64) 5083 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 5084 // Pass in the smallest viable integer type. 5085 else if (Size > 32) 5086 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 5087 else if (Size > 16) 5088 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5089 else if (Size > 8) 5090 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5091 else 5092 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5093} 5094 5095ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 5096 if (RetTy->isVoidType()) 5097 return ABIArgInfo::getIgnore(); 5098 5099 // Large vector types should be returned via memory. 5100 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 5101 return ABIArgInfo::getIndirect(0); 5102 5103 if (!isAggregateTypeForABI(RetTy)) { 5104 // Treat an enum type as its underlying type. 5105 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5106 RetTy = EnumTy->getDecl()->getIntegerType(); 5107 5108 return (RetTy->isPromotableIntegerType() ? 5109 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5110 } 5111 5112 // Structures with either a non-trivial destructor or a non-trivial 5113 // copy constructor are always indirect. 5114 if (isRecordReturnIndirect(RetTy, CGT)) 5115 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5116 5117 if (isEmptyRecord(getContext(), RetTy, true)) 5118 return ABIArgInfo::getIgnore(); 5119 5120 // Aggregates <= 8 bytes are returned in r0; other aggregates 5121 // are returned indirectly. 5122 uint64_t Size = getContext().getTypeSize(RetTy); 5123 if (Size <= 64) { 5124 // Return in the smallest viable integer type. 5125 if (Size <= 8) 5126 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5127 if (Size <= 16) 5128 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5129 if (Size <= 32) 5130 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5131 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 5132 } 5133 5134 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 5135} 5136 5137llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5138 CodeGenFunction &CGF) const { 5139 // FIXME: Need to handle alignment 5140 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5141 5142 CGBuilderTy &Builder = CGF.Builder; 5143 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 5144 "ap"); 5145 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5146 llvm::Type *PTy = 5147 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5148 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 5149 5150 uint64_t Offset = 5151 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 5152 llvm::Value *NextAddr = 5153 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 5154 "ap.next"); 5155 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5156 5157 return AddrTyped; 5158} 5159 5160 5161//===----------------------------------------------------------------------===// 5162// SPARC v9 ABI Implementation. 5163// Based on the SPARC Compliance Definition version 2.4.1. 5164// 5165// Function arguments a mapped to a nominal "parameter array" and promoted to 5166// registers depending on their type. Each argument occupies 8 or 16 bytes in 5167// the array, structs larger than 16 bytes are passed indirectly. 5168// 5169// One case requires special care: 5170// 5171// struct mixed { 5172// int i; 5173// float f; 5174// }; 5175// 5176// When a struct mixed is passed by value, it only occupies 8 bytes in the 5177// parameter array, but the int is passed in an integer register, and the float 5178// is passed in a floating point register. This is represented as two arguments 5179// with the LLVM IR inreg attribute: 5180// 5181// declare void f(i32 inreg %i, float inreg %f) 5182// 5183// The code generator will only allocate 4 bytes from the parameter array for 5184// the inreg arguments. All other arguments are allocated a multiple of 8 5185// bytes. 5186// 5187namespace { 5188class SparcV9ABIInfo : public ABIInfo { 5189public: 5190 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5191 5192private: 5193 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 5194 virtual void computeInfo(CGFunctionInfo &FI) const; 5195 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5196 CodeGenFunction &CGF) const; 5197 5198 // Coercion type builder for structs passed in registers. The coercion type 5199 // serves two purposes: 5200 // 5201 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 5202 // in registers. 5203 // 2. Expose aligned floating point elements as first-level elements, so the 5204 // code generator knows to pass them in floating point registers. 5205 // 5206 // We also compute the InReg flag which indicates that the struct contains 5207 // aligned 32-bit floats. 5208 // 5209 struct CoerceBuilder { 5210 llvm::LLVMContext &Context; 5211 const llvm::DataLayout &DL; 5212 SmallVector<llvm::Type*, 8> Elems; 5213 uint64_t Size; 5214 bool InReg; 5215 5216 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 5217 : Context(c), DL(dl), Size(0), InReg(false) {} 5218 5219 // Pad Elems with integers until Size is ToSize. 5220 void pad(uint64_t ToSize) { 5221 assert(ToSize >= Size && "Cannot remove elements"); 5222 if (ToSize == Size) 5223 return; 5224 5225 // Finish the current 64-bit word. 5226 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); 5227 if (Aligned > Size && Aligned <= ToSize) { 5228 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 5229 Size = Aligned; 5230 } 5231 5232 // Add whole 64-bit words. 5233 while (Size + 64 <= ToSize) { 5234 Elems.push_back(llvm::Type::getInt64Ty(Context)); 5235 Size += 64; 5236 } 5237 5238 // Final in-word padding. 5239 if (Size < ToSize) { 5240 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 5241 Size = ToSize; 5242 } 5243 } 5244 5245 // Add a floating point element at Offset. 5246 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 5247 // Unaligned floats are treated as integers. 5248 if (Offset % Bits) 5249 return; 5250 // The InReg flag is only required if there are any floats < 64 bits. 5251 if (Bits < 64) 5252 InReg = true; 5253 pad(Offset); 5254 Elems.push_back(Ty); 5255 Size = Offset + Bits; 5256 } 5257 5258 // Add a struct type to the coercion type, starting at Offset (in bits). 5259 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 5260 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 5261 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 5262 llvm::Type *ElemTy = StrTy->getElementType(i); 5263 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 5264 switch (ElemTy->getTypeID()) { 5265 case llvm::Type::StructTyID: 5266 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 5267 break; 5268 case llvm::Type::FloatTyID: 5269 addFloat(ElemOffset, ElemTy, 32); 5270 break; 5271 case llvm::Type::DoubleTyID: 5272 addFloat(ElemOffset, ElemTy, 64); 5273 break; 5274 case llvm::Type::FP128TyID: 5275 addFloat(ElemOffset, ElemTy, 128); 5276 break; 5277 case llvm::Type::PointerTyID: 5278 if (ElemOffset % 64 == 0) { 5279 pad(ElemOffset); 5280 Elems.push_back(ElemTy); 5281 Size += 64; 5282 } 5283 break; 5284 default: 5285 break; 5286 } 5287 } 5288 } 5289 5290 // Check if Ty is a usable substitute for the coercion type. 5291 bool isUsableType(llvm::StructType *Ty) const { 5292 if (Ty->getNumElements() != Elems.size()) 5293 return false; 5294 for (unsigned i = 0, e = Elems.size(); i != e; ++i) 5295 if (Elems[i] != Ty->getElementType(i)) 5296 return false; 5297 return true; 5298 } 5299 5300 // Get the coercion type as a literal struct type. 5301 llvm::Type *getType() const { 5302 if (Elems.size() == 1) 5303 return Elems.front(); 5304 else 5305 return llvm::StructType::get(Context, Elems); 5306 } 5307 }; 5308}; 5309} // end anonymous namespace 5310 5311ABIArgInfo 5312SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 5313 if (Ty->isVoidType()) 5314 return ABIArgInfo::getIgnore(); 5315 5316 uint64_t Size = getContext().getTypeSize(Ty); 5317 5318 // Anything too big to fit in registers is passed with an explicit indirect 5319 // pointer / sret pointer. 5320 if (Size > SizeLimit) 5321 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5322 5323 // Treat an enum type as its underlying type. 5324 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5325 Ty = EnumTy->getDecl()->getIntegerType(); 5326 5327 // Integer types smaller than a register are extended. 5328 if (Size < 64 && Ty->isIntegerType()) 5329 return ABIArgInfo::getExtend(); 5330 5331 // Other non-aggregates go in registers. 5332 if (!isAggregateTypeForABI(Ty)) 5333 return ABIArgInfo::getDirect(); 5334 5335 // This is a small aggregate type that should be passed in registers. 5336 // Build a coercion type from the LLVM struct type. 5337 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 5338 if (!StrTy) 5339 return ABIArgInfo::getDirect(); 5340 5341 CoerceBuilder CB(getVMContext(), getDataLayout()); 5342 CB.addStruct(0, StrTy); 5343 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); 5344 5345 // Try to use the original type for coercion. 5346 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 5347 5348 if (CB.InReg) 5349 return ABIArgInfo::getDirectInReg(CoerceTy); 5350 else 5351 return ABIArgInfo::getDirect(CoerceTy); 5352} 5353 5354llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5355 CodeGenFunction &CGF) const { 5356 ABIArgInfo AI = classifyType(Ty, 16 * 8); 5357 llvm::Type *ArgTy = CGT.ConvertType(Ty); 5358 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 5359 AI.setCoerceToType(ArgTy); 5360 5361 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5362 CGBuilderTy &Builder = CGF.Builder; 5363 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 5364 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5365 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 5366 llvm::Value *ArgAddr; 5367 unsigned Stride; 5368 5369 switch (AI.getKind()) { 5370 case ABIArgInfo::Expand: 5371 llvm_unreachable("Unsupported ABI kind for va_arg"); 5372 5373 case ABIArgInfo::Extend: 5374 Stride = 8; 5375 ArgAddr = Builder 5376 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy), 5377 "extend"); 5378 break; 5379 5380 case ABIArgInfo::Direct: 5381 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 5382 ArgAddr = Addr; 5383 break; 5384 5385 case ABIArgInfo::Indirect: 5386 Stride = 8; 5387 ArgAddr = Builder.CreateBitCast(Addr, 5388 llvm::PointerType::getUnqual(ArgPtrTy), 5389 "indirect"); 5390 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg"); 5391 break; 5392 5393 case ABIArgInfo::Ignore: 5394 return llvm::UndefValue::get(ArgPtrTy); 5395 } 5396 5397 // Update VAList. 5398 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next"); 5399 Builder.CreateStore(Addr, VAListAddrAsBPP); 5400 5401 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr"); 5402} 5403 5404void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 5405 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 5406 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 5407 it != ie; ++it) 5408 it->info = classifyType(it->type, 16 * 8); 5409} 5410 5411namespace { 5412class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 5413public: 5414 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 5415 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 5416}; 5417} // end anonymous namespace 5418 5419 5420const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 5421 if (TheTargetCodeGenInfo) 5422 return *TheTargetCodeGenInfo; 5423 5424 const llvm::Triple &Triple = getTarget().getTriple(); 5425 switch (Triple.getArch()) { 5426 default: 5427 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 5428 5429 case llvm::Triple::le32: 5430 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 5431 case llvm::Triple::mips: 5432 case llvm::Triple::mipsel: 5433 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 5434 5435 case llvm::Triple::mips64: 5436 case llvm::Triple::mips64el: 5437 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 5438 5439 case llvm::Triple::aarch64: 5440 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types)); 5441 5442 case llvm::Triple::arm: 5443 case llvm::Triple::thumb: 5444 { 5445 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 5446 if (strcmp(getTarget().getABI(), "apcs-gnu") == 0) 5447 Kind = ARMABIInfo::APCS; 5448 else if (CodeGenOpts.FloatABI == "hard" || 5449 (CodeGenOpts.FloatABI != "soft" && 5450 Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) 5451 Kind = ARMABIInfo::AAPCS_VFP; 5452 5453 switch (Triple.getOS()) { 5454 case llvm::Triple::NaCl: 5455 return *(TheTargetCodeGenInfo = 5456 new NaClARMTargetCodeGenInfo(Types, Kind)); 5457 default: 5458 return *(TheTargetCodeGenInfo = 5459 new ARMTargetCodeGenInfo(Types, Kind)); 5460 } 5461 } 5462 5463 case llvm::Triple::ppc: 5464 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 5465 case llvm::Triple::ppc64: 5466 if (Triple.isOSBinFormatELF()) 5467 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 5468 else 5469 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 5470 5471 case llvm::Triple::nvptx: 5472 case llvm::Triple::nvptx64: 5473 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 5474 5475 case llvm::Triple::mblaze: 5476 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 5477 5478 case llvm::Triple::msp430: 5479 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 5480 5481 case llvm::Triple::systemz: 5482 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 5483 5484 case llvm::Triple::tce: 5485 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 5486 5487 case llvm::Triple::x86: { 5488 if (Triple.isOSDarwin()) 5489 return *(TheTargetCodeGenInfo = 5490 new X86_32TargetCodeGenInfo(Types, true, true, false, 5491 CodeGenOpts.NumRegisterParameters)); 5492 5493 switch (Triple.getOS()) { 5494 case llvm::Triple::Cygwin: 5495 case llvm::Triple::MinGW32: 5496 case llvm::Triple::AuroraUX: 5497 case llvm::Triple::DragonFly: 5498 case llvm::Triple::FreeBSD: 5499 case llvm::Triple::OpenBSD: 5500 case llvm::Triple::Bitrig: 5501 return *(TheTargetCodeGenInfo = 5502 new X86_32TargetCodeGenInfo(Types, false, true, false, 5503 CodeGenOpts.NumRegisterParameters)); 5504 5505 case llvm::Triple::Win32: 5506 return *(TheTargetCodeGenInfo = 5507 new WinX86_32TargetCodeGenInfo(Types, 5508 CodeGenOpts.NumRegisterParameters)); 5509 5510 default: 5511 return *(TheTargetCodeGenInfo = 5512 new X86_32TargetCodeGenInfo(Types, false, false, false, 5513 CodeGenOpts.NumRegisterParameters)); 5514 } 5515 } 5516 5517 case llvm::Triple::x86_64: { 5518 bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0; 5519 5520 switch (Triple.getOS()) { 5521 case llvm::Triple::Win32: 5522 case llvm::Triple::MinGW32: 5523 case llvm::Triple::Cygwin: 5524 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 5525 case llvm::Triple::NaCl: 5526 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, 5527 HasAVX)); 5528 default: 5529 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 5530 HasAVX)); 5531 } 5532 } 5533 case llvm::Triple::hexagon: 5534 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 5535 case llvm::Triple::sparcv9: 5536 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); 5537 } 5538} 5539