TargetInfo.cpp revision dca8f336e6da2b50eb965535d81d603e39294f9c
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "clang/AST/RecordLayout.h" 19#include "clang/Frontend/CodeGenOptions.h" 20#include "llvm/ADT/Triple.h" 21#include "llvm/IR/DataLayout.h" 22#include "llvm/IR/Type.h" 23#include "llvm/Support/raw_ostream.h" 24using namespace clang; 25using namespace CodeGen; 26 27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37} 38 39static bool isAggregateTypeForABI(QualType T) { 40 return !CodeGenFunction::hasScalarEvaluationKind(T) || 41 T->isMemberFunctionPointerType(); 42} 43 44ABIInfo::~ABIInfo() {} 45 46ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48} 49 50llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52} 53 54const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56} 57 58 59void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86} 87 88TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90// If someone can figure out a general rule for this, that would be great. 91// It's probably just doomed to be platform-dependent, though. 92unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 // AArch64 Linux 99 return 32; 100} 101 102bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 103 const FunctionNoProtoType *fnType) const { 104 // The following conventions are known to require this to be false: 105 // x86_stdcall 106 // MIPS 107 // For everything else, we just prefer false unless we opt out. 108 return false; 109} 110 111static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 112 113/// isEmptyField - Return true iff a the field is "empty", that is it 114/// is an unnamed bit-field or an (array of) empty record(s). 115static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 116 bool AllowArrays) { 117 if (FD->isUnnamedBitfield()) 118 return true; 119 120 QualType FT = FD->getType(); 121 122 // Constant arrays of empty records count as empty, strip them off. 123 // Constant arrays of zero length always count as empty. 124 if (AllowArrays) 125 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 126 if (AT->getSize() == 0) 127 return true; 128 FT = AT->getElementType(); 129 } 130 131 const RecordType *RT = FT->getAs<RecordType>(); 132 if (!RT) 133 return false; 134 135 // C++ record fields are never empty, at least in the Itanium ABI. 136 // 137 // FIXME: We should use a predicate for whether this behavior is true in the 138 // current ABI. 139 if (isa<CXXRecordDecl>(RT->getDecl())) 140 return false; 141 142 return isEmptyRecord(Context, FT, AllowArrays); 143} 144 145/// isEmptyRecord - Return true iff a structure contains only empty 146/// fields. Note that a structure with a flexible array member is not 147/// considered empty. 148static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 149 const RecordType *RT = T->getAs<RecordType>(); 150 if (!RT) 151 return 0; 152 const RecordDecl *RD = RT->getDecl(); 153 if (RD->hasFlexibleArrayMember()) 154 return false; 155 156 // If this is a C++ record, check the bases first. 157 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 158 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 159 e = CXXRD->bases_end(); i != e; ++i) 160 if (!isEmptyRecord(Context, i->getType(), true)) 161 return false; 162 163 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 164 i != e; ++i) 165 if (!isEmptyField(Context, *i, AllowArrays)) 166 return false; 167 return true; 168} 169 170/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 171/// a non-trivial destructor or a non-trivial copy constructor. 172static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 173 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 174 if (!RD) 175 return false; 176 177 return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor(); 178} 179 180/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 181/// a record type with either a non-trivial destructor or a non-trivial copy 182/// constructor. 183static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 184 const RecordType *RT = T->getAs<RecordType>(); 185 if (!RT) 186 return false; 187 188 return hasNonTrivialDestructorOrCopyConstructor(RT); 189} 190 191/// isSingleElementStruct - Determine if a structure is a "single 192/// element struct", i.e. it has exactly one non-empty field or 193/// exactly one field which is itself a single element 194/// struct. Structures with flexible array members are never 195/// considered single element structs. 196/// 197/// \return The field declaration for the single non-empty field, if 198/// it exists. 199static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 200 const RecordType *RT = T->getAsStructureType(); 201 if (!RT) 202 return 0; 203 204 const RecordDecl *RD = RT->getDecl(); 205 if (RD->hasFlexibleArrayMember()) 206 return 0; 207 208 const Type *Found = 0; 209 210 // If this is a C++ record, check the bases first. 211 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 212 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 213 e = CXXRD->bases_end(); i != e; ++i) { 214 // Ignore empty records. 215 if (isEmptyRecord(Context, i->getType(), true)) 216 continue; 217 218 // If we already found an element then this isn't a single-element struct. 219 if (Found) 220 return 0; 221 222 // If this is non-empty and not a single element struct, the composite 223 // cannot be a single element struct. 224 Found = isSingleElementStruct(i->getType(), Context); 225 if (!Found) 226 return 0; 227 } 228 } 229 230 // Check for single element. 231 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 232 i != e; ++i) { 233 const FieldDecl *FD = *i; 234 QualType FT = FD->getType(); 235 236 // Ignore empty fields. 237 if (isEmptyField(Context, FD, true)) 238 continue; 239 240 // If we already found an element then this isn't a single-element 241 // struct. 242 if (Found) 243 return 0; 244 245 // Treat single element arrays as the element. 246 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 247 if (AT->getSize().getZExtValue() != 1) 248 break; 249 FT = AT->getElementType(); 250 } 251 252 if (!isAggregateTypeForABI(FT)) { 253 Found = FT.getTypePtr(); 254 } else { 255 Found = isSingleElementStruct(FT, Context); 256 if (!Found) 257 return 0; 258 } 259 } 260 261 // We don't consider a struct a single-element struct if it has 262 // padding beyond the element type. 263 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 264 return 0; 265 266 return Found; 267} 268 269static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 270 // Treat complex types as the element type. 271 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 272 Ty = CTy->getElementType(); 273 274 // Check for a type which we know has a simple scalar argument-passing 275 // convention without any padding. (We're specifically looking for 32 276 // and 64-bit integer and integer-equivalents, float, and double.) 277 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 278 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 279 return false; 280 281 uint64_t Size = Context.getTypeSize(Ty); 282 return Size == 32 || Size == 64; 283} 284 285/// canExpandIndirectArgument - Test whether an argument type which is to be 286/// passed indirectly (on the stack) would have the equivalent layout if it was 287/// expanded into separate arguments. If so, we prefer to do the latter to avoid 288/// inhibiting optimizations. 289/// 290// FIXME: This predicate is missing many cases, currently it just follows 291// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 292// should probably make this smarter, or better yet make the LLVM backend 293// capable of handling it. 294static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 295 // We can only expand structure types. 296 const RecordType *RT = Ty->getAs<RecordType>(); 297 if (!RT) 298 return false; 299 300 // We can only expand (C) structures. 301 // 302 // FIXME: This needs to be generalized to handle classes as well. 303 const RecordDecl *RD = RT->getDecl(); 304 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 305 return false; 306 307 uint64_t Size = 0; 308 309 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 310 i != e; ++i) { 311 const FieldDecl *FD = *i; 312 313 if (!is32Or64BitBasicType(FD->getType(), Context)) 314 return false; 315 316 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 317 // how to expand them yet, and the predicate for telling if a bitfield still 318 // counts as "basic" is more complicated than what we were doing previously. 319 if (FD->isBitField()) 320 return false; 321 322 Size += Context.getTypeSize(FD->getType()); 323 } 324 325 // Make sure there are not any holes in the struct. 326 if (Size != Context.getTypeSize(Ty)) 327 return false; 328 329 return true; 330} 331 332namespace { 333/// DefaultABIInfo - The default implementation for ABI specific 334/// details. This implementation provides information which results in 335/// self-consistent and sensible LLVM IR generation, but does not 336/// conform to any particular ABI. 337class DefaultABIInfo : public ABIInfo { 338public: 339 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 340 341 ABIArgInfo classifyReturnType(QualType RetTy) const; 342 ABIArgInfo classifyArgumentType(QualType RetTy) const; 343 344 virtual void computeInfo(CGFunctionInfo &FI) const { 345 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 346 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 347 it != ie; ++it) 348 it->info = classifyArgumentType(it->type); 349 } 350 351 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 352 CodeGenFunction &CGF) const; 353}; 354 355class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 356public: 357 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 358 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 359}; 360 361llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 362 CodeGenFunction &CGF) const { 363 return 0; 364} 365 366ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 367 if (isAggregateTypeForABI(Ty)) { 368 // Records with non trivial destructors/constructors should not be passed 369 // by value. 370 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 371 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 372 373 return ABIArgInfo::getIndirect(0); 374 } 375 376 // Treat an enum type as its underlying type. 377 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 378 Ty = EnumTy->getDecl()->getIntegerType(); 379 380 return (Ty->isPromotableIntegerType() ? 381 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 382} 383 384ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 385 if (RetTy->isVoidType()) 386 return ABIArgInfo::getIgnore(); 387 388 if (isAggregateTypeForABI(RetTy)) 389 return ABIArgInfo::getIndirect(0); 390 391 // Treat an enum type as its underlying type. 392 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 393 RetTy = EnumTy->getDecl()->getIntegerType(); 394 395 return (RetTy->isPromotableIntegerType() ? 396 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 397} 398 399//===----------------------------------------------------------------------===// 400// le32/PNaCl bitcode ABI Implementation 401//===----------------------------------------------------------------------===// 402 403class PNaClABIInfo : public ABIInfo { 404 public: 405 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 406 407 ABIArgInfo classifyReturnType(QualType RetTy) const; 408 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 409 410 virtual void computeInfo(CGFunctionInfo &FI) const; 411 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 412 CodeGenFunction &CGF) const; 413}; 414 415class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 416 public: 417 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 418 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 419}; 420 421void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 422 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 423 424 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 425 426 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 427 it != ie; ++it) 428 it->info = classifyArgumentType(it->type, FreeRegs); 429 } 430 431llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 432 CodeGenFunction &CGF) const { 433 return 0; 434} 435 436ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 437 unsigned &FreeRegs) const { 438 if (isAggregateTypeForABI(Ty)) { 439 // Records with non trivial destructors/constructors should not be passed 440 // by value. 441 FreeRegs = 0; 442 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 443 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 444 445 return ABIArgInfo::getIndirect(0); 446 } 447 448 // Treat an enum type as its underlying type. 449 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 450 Ty = EnumTy->getDecl()->getIntegerType(); 451 452 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 453 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 454 455 // Regparm regs hold 32 bits. 456 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 457 if (SizeInRegs == 0) return BaseInfo; 458 if (SizeInRegs > FreeRegs) { 459 FreeRegs = 0; 460 return BaseInfo; 461 } 462 FreeRegs -= SizeInRegs; 463 return BaseInfo.isDirect() ? 464 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 465 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 466} 467 468ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 469 if (RetTy->isVoidType()) 470 return ABIArgInfo::getIgnore(); 471 472 if (isAggregateTypeForABI(RetTy)) 473 return ABIArgInfo::getIndirect(0); 474 475 // Treat an enum type as its underlying type. 476 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 477 RetTy = EnumTy->getDecl()->getIntegerType(); 478 479 return (RetTy->isPromotableIntegerType() ? 480 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 481} 482 483/// IsX86_MMXType - Return true if this is an MMX type. 484bool IsX86_MMXType(llvm::Type *IRType) { 485 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 486 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 487 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 488 IRType->getScalarSizeInBits() != 64; 489} 490 491static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 492 StringRef Constraint, 493 llvm::Type* Ty) { 494 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 495 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 496 return Ty; 497} 498 499//===----------------------------------------------------------------------===// 500// X86-32 ABI Implementation 501//===----------------------------------------------------------------------===// 502 503/// X86_32ABIInfo - The X86-32 ABI information. 504class X86_32ABIInfo : public ABIInfo { 505 enum Class { 506 Integer, 507 Float 508 }; 509 510 static const unsigned MinABIStackAlignInBytes = 4; 511 512 bool IsDarwinVectorABI; 513 bool IsSmallStructInRegABI; 514 bool IsWin32FloatStructABI; 515 unsigned DefaultNumRegisterParameters; 516 517 static bool isRegisterSize(unsigned Size) { 518 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 519 } 520 521 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 522 unsigned callingConvention); 523 524 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 525 /// such that the argument will be passed in memory. 526 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 527 unsigned &FreeRegs) const; 528 529 /// \brief Return the alignment to use for the given type on the stack. 530 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 531 532 Class classify(QualType Ty) const; 533 ABIArgInfo classifyReturnType(QualType RetTy, 534 unsigned callingConvention) const; 535 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs, 536 bool IsFastCall) const; 537 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs, 538 bool IsFastCall, bool &NeedsPadding) const; 539 540public: 541 542 virtual void computeInfo(CGFunctionInfo &FI) const; 543 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 544 CodeGenFunction &CGF) const; 545 546 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, 547 unsigned r) 548 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 549 IsWin32FloatStructABI(w), DefaultNumRegisterParameters(r) {} 550}; 551 552class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 553public: 554 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 555 bool d, bool p, bool w, unsigned r) 556 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} 557 558 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 559 CodeGen::CodeGenModule &CGM) const; 560 561 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 562 // Darwin uses different dwarf register numbers for EH. 563 if (CGM.isTargetDarwin()) return 5; 564 565 return 4; 566 } 567 568 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 569 llvm::Value *Address) const; 570 571 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 572 StringRef Constraint, 573 llvm::Type* Ty) const { 574 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 575 } 576 577}; 578 579} 580 581/// shouldReturnTypeInRegister - Determine if the given type should be 582/// passed in a register (for the Darwin ABI). 583bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 584 ASTContext &Context, 585 unsigned callingConvention) { 586 uint64_t Size = Context.getTypeSize(Ty); 587 588 // Type must be register sized. 589 if (!isRegisterSize(Size)) 590 return false; 591 592 if (Ty->isVectorType()) { 593 // 64- and 128- bit vectors inside structures are not returned in 594 // registers. 595 if (Size == 64 || Size == 128) 596 return false; 597 598 return true; 599 } 600 601 // If this is a builtin, pointer, enum, complex type, member pointer, or 602 // member function pointer it is ok. 603 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 604 Ty->isAnyComplexType() || Ty->isEnumeralType() || 605 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 606 return true; 607 608 // Arrays are treated like records. 609 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 610 return shouldReturnTypeInRegister(AT->getElementType(), Context, 611 callingConvention); 612 613 // Otherwise, it must be a record type. 614 const RecordType *RT = Ty->getAs<RecordType>(); 615 if (!RT) return false; 616 617 // FIXME: Traverse bases here too. 618 619 // For thiscall conventions, structures will never be returned in 620 // a register. This is for compatibility with the MSVC ABI 621 if (callingConvention == llvm::CallingConv::X86_ThisCall && 622 RT->isStructureType()) { 623 return false; 624 } 625 626 // Structure types are passed in register if all fields would be 627 // passed in a register. 628 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 629 e = RT->getDecl()->field_end(); i != e; ++i) { 630 const FieldDecl *FD = *i; 631 632 // Empty fields are ignored. 633 if (isEmptyField(Context, FD, true)) 634 continue; 635 636 // Check fields recursively. 637 if (!shouldReturnTypeInRegister(FD->getType(), Context, 638 callingConvention)) 639 return false; 640 } 641 return true; 642} 643 644ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 645 unsigned callingConvention) const { 646 if (RetTy->isVoidType()) 647 return ABIArgInfo::getIgnore(); 648 649 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 650 // On Darwin, some vectors are returned in registers. 651 if (IsDarwinVectorABI) { 652 uint64_t Size = getContext().getTypeSize(RetTy); 653 654 // 128-bit vectors are a special case; they are returned in 655 // registers and we need to make sure to pick a type the LLVM 656 // backend will like. 657 if (Size == 128) 658 return ABIArgInfo::getDirect(llvm::VectorType::get( 659 llvm::Type::getInt64Ty(getVMContext()), 2)); 660 661 // Always return in register if it fits in a general purpose 662 // register, or if it is 64 bits and has a single element. 663 if ((Size == 8 || Size == 16 || Size == 32) || 664 (Size == 64 && VT->getNumElements() == 1)) 665 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 666 Size)); 667 668 return ABIArgInfo::getIndirect(0); 669 } 670 671 return ABIArgInfo::getDirect(); 672 } 673 674 if (isAggregateTypeForABI(RetTy)) { 675 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 676 // Structures with either a non-trivial destructor or a non-trivial 677 // copy constructor are always indirect. 678 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 679 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 680 681 // Structures with flexible arrays are always indirect. 682 if (RT->getDecl()->hasFlexibleArrayMember()) 683 return ABIArgInfo::getIndirect(0); 684 } 685 686 // If specified, structs and unions are always indirect. 687 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 688 return ABIArgInfo::getIndirect(0); 689 690 // Small structures which are register sized are generally returned 691 // in a register. 692 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 693 callingConvention)) { 694 uint64_t Size = getContext().getTypeSize(RetTy); 695 696 // As a special-case, if the struct is a "single-element" struct, and 697 // the field is of type "float" or "double", return it in a 698 // floating-point register. (MSVC does not apply this special case.) 699 // We apply a similar transformation for pointer types to improve the 700 // quality of the generated IR. 701 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 702 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 703 || SeltTy->hasPointerRepresentation()) 704 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 705 706 // FIXME: We should be able to narrow this integer in cases with dead 707 // padding. 708 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 709 } 710 711 return ABIArgInfo::getIndirect(0); 712 } 713 714 // Treat an enum type as its underlying type. 715 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 716 RetTy = EnumTy->getDecl()->getIntegerType(); 717 718 return (RetTy->isPromotableIntegerType() ? 719 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 720} 721 722static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 723 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 724} 725 726static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 727 const RecordType *RT = Ty->getAs<RecordType>(); 728 if (!RT) 729 return 0; 730 const RecordDecl *RD = RT->getDecl(); 731 732 // If this is a C++ record, check the bases first. 733 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 734 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 735 e = CXXRD->bases_end(); i != e; ++i) 736 if (!isRecordWithSSEVectorType(Context, i->getType())) 737 return false; 738 739 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 740 i != e; ++i) { 741 QualType FT = i->getType(); 742 743 if (isSSEVectorType(Context, FT)) 744 return true; 745 746 if (isRecordWithSSEVectorType(Context, FT)) 747 return true; 748 } 749 750 return false; 751} 752 753unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 754 unsigned Align) const { 755 // Otherwise, if the alignment is less than or equal to the minimum ABI 756 // alignment, just use the default; the backend will handle this. 757 if (Align <= MinABIStackAlignInBytes) 758 return 0; // Use default alignment. 759 760 // On non-Darwin, the stack type alignment is always 4. 761 if (!IsDarwinVectorABI) { 762 // Set explicit alignment, since we may need to realign the top. 763 return MinABIStackAlignInBytes; 764 } 765 766 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 767 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 768 isRecordWithSSEVectorType(getContext(), Ty))) 769 return 16; 770 771 return MinABIStackAlignInBytes; 772} 773 774ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 775 unsigned &FreeRegs) const { 776 if (!ByVal) { 777 if (FreeRegs) { 778 --FreeRegs; // Non byval indirects just use one pointer. 779 return ABIArgInfo::getIndirectInReg(0, false); 780 } 781 return ABIArgInfo::getIndirect(0, false); 782 } 783 784 // Compute the byval alignment. 785 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 786 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 787 if (StackAlign == 0) 788 return ABIArgInfo::getIndirect(4); 789 790 // If the stack alignment is less than the type alignment, realign the 791 // argument. 792 if (StackAlign < TypeAlign) 793 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 794 /*Realign=*/true); 795 796 return ABIArgInfo::getIndirect(StackAlign); 797} 798 799X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 800 const Type *T = isSingleElementStruct(Ty, getContext()); 801 if (!T) 802 T = Ty.getTypePtr(); 803 804 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 805 BuiltinType::Kind K = BT->getKind(); 806 if (K == BuiltinType::Float || K == BuiltinType::Double) 807 return Float; 808 } 809 return Integer; 810} 811 812bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs, 813 bool IsFastCall, bool &NeedsPadding) const { 814 NeedsPadding = false; 815 Class C = classify(Ty); 816 if (C == Float) 817 return false; 818 819 unsigned Size = getContext().getTypeSize(Ty); 820 unsigned SizeInRegs = (Size + 31) / 32; 821 822 if (SizeInRegs == 0) 823 return false; 824 825 if (SizeInRegs > FreeRegs) { 826 FreeRegs = 0; 827 return false; 828 } 829 830 FreeRegs -= SizeInRegs; 831 832 if (IsFastCall) { 833 if (Size > 32) 834 return false; 835 836 if (Ty->isIntegralOrEnumerationType()) 837 return true; 838 839 if (Ty->isPointerType()) 840 return true; 841 842 if (Ty->isReferenceType()) 843 return true; 844 845 if (FreeRegs) 846 NeedsPadding = true; 847 848 return false; 849 } 850 851 return true; 852} 853 854ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 855 unsigned &FreeRegs, 856 bool IsFastCall) const { 857 // FIXME: Set alignment on indirect arguments. 858 if (isAggregateTypeForABI(Ty)) { 859 // Structures with flexible arrays are always indirect. 860 if (const RecordType *RT = Ty->getAs<RecordType>()) { 861 // Structures with either a non-trivial destructor or a non-trivial 862 // copy constructor are always indirect. 863 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 864 return getIndirectResult(Ty, false, FreeRegs); 865 866 if (RT->getDecl()->hasFlexibleArrayMember()) 867 return getIndirectResult(Ty, true, FreeRegs); 868 } 869 870 // Ignore empty structs/unions. 871 if (isEmptyRecord(getContext(), Ty, true)) 872 return ABIArgInfo::getIgnore(); 873 874 llvm::LLVMContext &LLVMContext = getVMContext(); 875 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 876 bool NeedsPadding; 877 if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) { 878 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 879 SmallVector<llvm::Type*, 3> Elements; 880 for (unsigned I = 0; I < SizeInRegs; ++I) 881 Elements.push_back(Int32); 882 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 883 return ABIArgInfo::getDirectInReg(Result); 884 } 885 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; 886 887 // Expand small (<= 128-bit) record types when we know that the stack layout 888 // of those arguments will match the struct. This is important because the 889 // LLVM backend isn't smart enough to remove byval, which inhibits many 890 // optimizations. 891 if (getContext().getTypeSize(Ty) <= 4*32 && 892 canExpandIndirectArgument(Ty, getContext())) 893 return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType); 894 895 return getIndirectResult(Ty, true, FreeRegs); 896 } 897 898 if (const VectorType *VT = Ty->getAs<VectorType>()) { 899 // On Darwin, some vectors are passed in memory, we handle this by passing 900 // it as an i8/i16/i32/i64. 901 if (IsDarwinVectorABI) { 902 uint64_t Size = getContext().getTypeSize(Ty); 903 if ((Size == 8 || Size == 16 || Size == 32) || 904 (Size == 64 && VT->getNumElements() == 1)) 905 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 906 Size)); 907 } 908 909 if (IsX86_MMXType(CGT.ConvertType(Ty))) 910 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 911 912 return ABIArgInfo::getDirect(); 913 } 914 915 916 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 917 Ty = EnumTy->getDecl()->getIntegerType(); 918 919 bool NeedsPadding; 920 bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding); 921 922 if (Ty->isPromotableIntegerType()) { 923 if (InReg) 924 return ABIArgInfo::getExtendInReg(); 925 return ABIArgInfo::getExtend(); 926 } 927 if (InReg) 928 return ABIArgInfo::getDirectInReg(); 929 return ABIArgInfo::getDirect(); 930} 931 932void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 933 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 934 FI.getCallingConvention()); 935 936 unsigned CC = FI.getCallingConvention(); 937 bool IsFastCall = CC == llvm::CallingConv::X86_FastCall; 938 unsigned FreeRegs; 939 if (IsFastCall) 940 FreeRegs = 2; 941 else if (FI.getHasRegParm()) 942 FreeRegs = FI.getRegParm(); 943 else 944 FreeRegs = DefaultNumRegisterParameters; 945 946 // If the return value is indirect, then the hidden argument is consuming one 947 // integer register. 948 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 949 --FreeRegs; 950 ABIArgInfo &Old = FI.getReturnInfo(); 951 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 952 Old.getIndirectByVal(), 953 Old.getIndirectRealign()); 954 } 955 956 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 957 it != ie; ++it) 958 it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall); 959} 960 961llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 962 CodeGenFunction &CGF) const { 963 llvm::Type *BPP = CGF.Int8PtrPtrTy; 964 965 CGBuilderTy &Builder = CGF.Builder; 966 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 967 "ap"); 968 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 969 970 // Compute if the address needs to be aligned 971 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 972 Align = getTypeStackAlignInBytes(Ty, Align); 973 Align = std::max(Align, 4U); 974 if (Align > 4) { 975 // addr = (addr + align - 1) & -align; 976 llvm::Value *Offset = 977 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 978 Addr = CGF.Builder.CreateGEP(Addr, Offset); 979 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 980 CGF.Int32Ty); 981 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 982 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 983 Addr->getType(), 984 "ap.cur.aligned"); 985 } 986 987 llvm::Type *PTy = 988 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 989 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 990 991 uint64_t Offset = 992 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 993 llvm::Value *NextAddr = 994 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 995 "ap.next"); 996 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 997 998 return AddrTyped; 999} 1000 1001void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1002 llvm::GlobalValue *GV, 1003 CodeGen::CodeGenModule &CGM) const { 1004 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1005 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1006 // Get the LLVM function. 1007 llvm::Function *Fn = cast<llvm::Function>(GV); 1008 1009 // Now add the 'alignstack' attribute with a value of 16. 1010 llvm::AttrBuilder B; 1011 B.addStackAlignmentAttr(16); 1012 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1013 llvm::AttributeSet::get(CGM.getLLVMContext(), 1014 llvm::AttributeSet::FunctionIndex, 1015 B)); 1016 } 1017 } 1018} 1019 1020bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1021 CodeGen::CodeGenFunction &CGF, 1022 llvm::Value *Address) const { 1023 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1024 1025 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1026 1027 // 0-7 are the eight integer registers; the order is different 1028 // on Darwin (for EH), but the range is the same. 1029 // 8 is %eip. 1030 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1031 1032 if (CGF.CGM.isTargetDarwin()) { 1033 // 12-16 are st(0..4). Not sure why we stop at 4. 1034 // These have size 16, which is sizeof(long double) on 1035 // platforms with 8-byte alignment for that type. 1036 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1037 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1038 1039 } else { 1040 // 9 is %eflags, which doesn't get a size on Darwin for some 1041 // reason. 1042 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1043 1044 // 11-16 are st(0..5). Not sure why we stop at 5. 1045 // These have size 12, which is sizeof(long double) on 1046 // platforms with 4-byte alignment for that type. 1047 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1048 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1049 } 1050 1051 return false; 1052} 1053 1054//===----------------------------------------------------------------------===// 1055// X86-64 ABI Implementation 1056//===----------------------------------------------------------------------===// 1057 1058 1059namespace { 1060/// X86_64ABIInfo - The X86_64 ABI information. 1061class X86_64ABIInfo : public ABIInfo { 1062 enum Class { 1063 Integer = 0, 1064 SSE, 1065 SSEUp, 1066 X87, 1067 X87Up, 1068 ComplexX87, 1069 NoClass, 1070 Memory 1071 }; 1072 1073 /// merge - Implement the X86_64 ABI merging algorithm. 1074 /// 1075 /// Merge an accumulating classification \arg Accum with a field 1076 /// classification \arg Field. 1077 /// 1078 /// \param Accum - The accumulating classification. This should 1079 /// always be either NoClass or the result of a previous merge 1080 /// call. In addition, this should never be Memory (the caller 1081 /// should just return Memory for the aggregate). 1082 static Class merge(Class Accum, Class Field); 1083 1084 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1085 /// 1086 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1087 /// final MEMORY or SSE classes when necessary. 1088 /// 1089 /// \param AggregateSize - The size of the current aggregate in 1090 /// the classification process. 1091 /// 1092 /// \param Lo - The classification for the parts of the type 1093 /// residing in the low word of the containing object. 1094 /// 1095 /// \param Hi - The classification for the parts of the type 1096 /// residing in the higher words of the containing object. 1097 /// 1098 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1099 1100 /// classify - Determine the x86_64 register classes in which the 1101 /// given type T should be passed. 1102 /// 1103 /// \param Lo - The classification for the parts of the type 1104 /// residing in the low word of the containing object. 1105 /// 1106 /// \param Hi - The classification for the parts of the type 1107 /// residing in the high word of the containing object. 1108 /// 1109 /// \param OffsetBase - The bit offset of this type in the 1110 /// containing object. Some parameters are classified different 1111 /// depending on whether they straddle an eightbyte boundary. 1112 /// 1113 /// If a word is unused its result will be NoClass; if a type should 1114 /// be passed in Memory then at least the classification of \arg Lo 1115 /// will be Memory. 1116 /// 1117 /// The \arg Lo class will be NoClass iff the argument is ignored. 1118 /// 1119 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1120 /// also be ComplexX87. 1121 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1122 1123 llvm::Type *GetByteVectorType(QualType Ty) const; 1124 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1125 unsigned IROffset, QualType SourceTy, 1126 unsigned SourceOffset) const; 1127 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1128 unsigned IROffset, QualType SourceTy, 1129 unsigned SourceOffset) const; 1130 1131 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1132 /// such that the argument will be returned in memory. 1133 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1134 1135 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1136 /// such that the argument will be passed in memory. 1137 /// 1138 /// \param freeIntRegs - The number of free integer registers remaining 1139 /// available. 1140 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1141 1142 ABIArgInfo classifyReturnType(QualType RetTy) const; 1143 1144 ABIArgInfo classifyArgumentType(QualType Ty, 1145 unsigned freeIntRegs, 1146 unsigned &neededInt, 1147 unsigned &neededSSE) const; 1148 1149 bool IsIllegalVectorType(QualType Ty) const; 1150 1151 /// The 0.98 ABI revision clarified a lot of ambiguities, 1152 /// unfortunately in ways that were not always consistent with 1153 /// certain previous compilers. In particular, platforms which 1154 /// required strict binary compatibility with older versions of GCC 1155 /// may need to exempt themselves. 1156 bool honorsRevision0_98() const { 1157 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1158 } 1159 1160 bool HasAVX; 1161 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1162 // 64-bit hardware. 1163 bool Has64BitPointers; 1164 1165public: 1166 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1167 ABIInfo(CGT), HasAVX(hasavx), 1168 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1169 } 1170 1171 bool isPassedUsingAVXType(QualType type) const { 1172 unsigned neededInt, neededSSE; 1173 // The freeIntRegs argument doesn't matter here. 1174 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1175 if (info.isDirect()) { 1176 llvm::Type *ty = info.getCoerceToType(); 1177 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1178 return (vectorTy->getBitWidth() > 128); 1179 } 1180 return false; 1181 } 1182 1183 virtual void computeInfo(CGFunctionInfo &FI) const; 1184 1185 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1186 CodeGenFunction &CGF) const; 1187}; 1188 1189/// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1190class WinX86_64ABIInfo : public ABIInfo { 1191 1192 ABIArgInfo classify(QualType Ty) const; 1193 1194public: 1195 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1196 1197 virtual void computeInfo(CGFunctionInfo &FI) const; 1198 1199 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1200 CodeGenFunction &CGF) const; 1201}; 1202 1203class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1204public: 1205 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1206 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1207 1208 const X86_64ABIInfo &getABIInfo() const { 1209 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1210 } 1211 1212 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1213 return 7; 1214 } 1215 1216 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1217 llvm::Value *Address) const { 1218 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1219 1220 // 0-15 are the 16 integer registers. 1221 // 16 is %rip. 1222 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1223 return false; 1224 } 1225 1226 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1227 StringRef Constraint, 1228 llvm::Type* Ty) const { 1229 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1230 } 1231 1232 bool isNoProtoCallVariadic(const CallArgList &args, 1233 const FunctionNoProtoType *fnType) const { 1234 // The default CC on x86-64 sets %al to the number of SSA 1235 // registers used, and GCC sets this when calling an unprototyped 1236 // function, so we override the default behavior. However, don't do 1237 // that when AVX types are involved: the ABI explicitly states it is 1238 // undefined, and it doesn't work in practice because of how the ABI 1239 // defines varargs anyway. 1240 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1241 bool HasAVXType = false; 1242 for (CallArgList::const_iterator 1243 it = args.begin(), ie = args.end(); it != ie; ++it) { 1244 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1245 HasAVXType = true; 1246 break; 1247 } 1248 } 1249 1250 if (!HasAVXType) 1251 return true; 1252 } 1253 1254 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1255 } 1256 1257}; 1258 1259class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1260public: 1261 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1262 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1263 1264 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1265 return 7; 1266 } 1267 1268 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1269 llvm::Value *Address) const { 1270 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1271 1272 // 0-15 are the 16 integer registers. 1273 // 16 is %rip. 1274 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1275 return false; 1276 } 1277}; 1278 1279} 1280 1281void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1282 Class &Hi) const { 1283 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1284 // 1285 // (a) If one of the classes is Memory, the whole argument is passed in 1286 // memory. 1287 // 1288 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1289 // memory. 1290 // 1291 // (c) If the size of the aggregate exceeds two eightbytes and the first 1292 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1293 // argument is passed in memory. NOTE: This is necessary to keep the 1294 // ABI working for processors that don't support the __m256 type. 1295 // 1296 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1297 // 1298 // Some of these are enforced by the merging logic. Others can arise 1299 // only with unions; for example: 1300 // union { _Complex double; unsigned; } 1301 // 1302 // Note that clauses (b) and (c) were added in 0.98. 1303 // 1304 if (Hi == Memory) 1305 Lo = Memory; 1306 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1307 Lo = Memory; 1308 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1309 Lo = Memory; 1310 if (Hi == SSEUp && Lo != SSE) 1311 Hi = SSE; 1312} 1313 1314X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1315 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1316 // classified recursively so that always two fields are 1317 // considered. The resulting class is calculated according to 1318 // the classes of the fields in the eightbyte: 1319 // 1320 // (a) If both classes are equal, this is the resulting class. 1321 // 1322 // (b) If one of the classes is NO_CLASS, the resulting class is 1323 // the other class. 1324 // 1325 // (c) If one of the classes is MEMORY, the result is the MEMORY 1326 // class. 1327 // 1328 // (d) If one of the classes is INTEGER, the result is the 1329 // INTEGER. 1330 // 1331 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1332 // MEMORY is used as class. 1333 // 1334 // (f) Otherwise class SSE is used. 1335 1336 // Accum should never be memory (we should have returned) or 1337 // ComplexX87 (because this cannot be passed in a structure). 1338 assert((Accum != Memory && Accum != ComplexX87) && 1339 "Invalid accumulated classification during merge."); 1340 if (Accum == Field || Field == NoClass) 1341 return Accum; 1342 if (Field == Memory) 1343 return Memory; 1344 if (Accum == NoClass) 1345 return Field; 1346 if (Accum == Integer || Field == Integer) 1347 return Integer; 1348 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1349 Accum == X87 || Accum == X87Up) 1350 return Memory; 1351 return SSE; 1352} 1353 1354void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1355 Class &Lo, Class &Hi) const { 1356 // FIXME: This code can be simplified by introducing a simple value class for 1357 // Class pairs with appropriate constructor methods for the various 1358 // situations. 1359 1360 // FIXME: Some of the split computations are wrong; unaligned vectors 1361 // shouldn't be passed in registers for example, so there is no chance they 1362 // can straddle an eightbyte. Verify & simplify. 1363 1364 Lo = Hi = NoClass; 1365 1366 Class &Current = OffsetBase < 64 ? Lo : Hi; 1367 Current = Memory; 1368 1369 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1370 BuiltinType::Kind k = BT->getKind(); 1371 1372 if (k == BuiltinType::Void) { 1373 Current = NoClass; 1374 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1375 Lo = Integer; 1376 Hi = Integer; 1377 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1378 Current = Integer; 1379 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1380 (k == BuiltinType::LongDouble && 1381 getContext().getTargetInfo().getTriple().getOS() == 1382 llvm::Triple::NaCl)) { 1383 Current = SSE; 1384 } else if (k == BuiltinType::LongDouble) { 1385 Lo = X87; 1386 Hi = X87Up; 1387 } 1388 // FIXME: _Decimal32 and _Decimal64 are SSE. 1389 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1390 return; 1391 } 1392 1393 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1394 // Classify the underlying integer type. 1395 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1396 return; 1397 } 1398 1399 if (Ty->hasPointerRepresentation()) { 1400 Current = Integer; 1401 return; 1402 } 1403 1404 if (Ty->isMemberPointerType()) { 1405 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1406 Lo = Hi = Integer; 1407 else 1408 Current = Integer; 1409 return; 1410 } 1411 1412 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1413 uint64_t Size = getContext().getTypeSize(VT); 1414 if (Size == 32) { 1415 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1416 // float> as integer. 1417 Current = Integer; 1418 1419 // If this type crosses an eightbyte boundary, it should be 1420 // split. 1421 uint64_t EB_Real = (OffsetBase) / 64; 1422 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1423 if (EB_Real != EB_Imag) 1424 Hi = Lo; 1425 } else if (Size == 64) { 1426 // gcc passes <1 x double> in memory. :( 1427 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1428 return; 1429 1430 // gcc passes <1 x long long> as INTEGER. 1431 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1432 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1433 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1434 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1435 Current = Integer; 1436 else 1437 Current = SSE; 1438 1439 // If this type crosses an eightbyte boundary, it should be 1440 // split. 1441 if (OffsetBase && OffsetBase != 64) 1442 Hi = Lo; 1443 } else if (Size == 128 || (HasAVX && Size == 256)) { 1444 // Arguments of 256-bits are split into four eightbyte chunks. The 1445 // least significant one belongs to class SSE and all the others to class 1446 // SSEUP. The original Lo and Hi design considers that types can't be 1447 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1448 // This design isn't correct for 256-bits, but since there're no cases 1449 // where the upper parts would need to be inspected, avoid adding 1450 // complexity and just consider Hi to match the 64-256 part. 1451 Lo = SSE; 1452 Hi = SSEUp; 1453 } 1454 return; 1455 } 1456 1457 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1458 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1459 1460 uint64_t Size = getContext().getTypeSize(Ty); 1461 if (ET->isIntegralOrEnumerationType()) { 1462 if (Size <= 64) 1463 Current = Integer; 1464 else if (Size <= 128) 1465 Lo = Hi = Integer; 1466 } else if (ET == getContext().FloatTy) 1467 Current = SSE; 1468 else if (ET == getContext().DoubleTy || 1469 (ET == getContext().LongDoubleTy && 1470 getContext().getTargetInfo().getTriple().getOS() == 1471 llvm::Triple::NaCl)) 1472 Lo = Hi = SSE; 1473 else if (ET == getContext().LongDoubleTy) 1474 Current = ComplexX87; 1475 1476 // If this complex type crosses an eightbyte boundary then it 1477 // should be split. 1478 uint64_t EB_Real = (OffsetBase) / 64; 1479 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1480 if (Hi == NoClass && EB_Real != EB_Imag) 1481 Hi = Lo; 1482 1483 return; 1484 } 1485 1486 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1487 // Arrays are treated like structures. 1488 1489 uint64_t Size = getContext().getTypeSize(Ty); 1490 1491 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1492 // than four eightbytes, ..., it has class MEMORY. 1493 if (Size > 256) 1494 return; 1495 1496 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1497 // fields, it has class MEMORY. 1498 // 1499 // Only need to check alignment of array base. 1500 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1501 return; 1502 1503 // Otherwise implement simplified merge. We could be smarter about 1504 // this, but it isn't worth it and would be harder to verify. 1505 Current = NoClass; 1506 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1507 uint64_t ArraySize = AT->getSize().getZExtValue(); 1508 1509 // The only case a 256-bit wide vector could be used is when the array 1510 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1511 // to work for sizes wider than 128, early check and fallback to memory. 1512 if (Size > 128 && EltSize != 256) 1513 return; 1514 1515 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1516 Class FieldLo, FieldHi; 1517 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1518 Lo = merge(Lo, FieldLo); 1519 Hi = merge(Hi, FieldHi); 1520 if (Lo == Memory || Hi == Memory) 1521 break; 1522 } 1523 1524 postMerge(Size, Lo, Hi); 1525 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1526 return; 1527 } 1528 1529 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1530 uint64_t Size = getContext().getTypeSize(Ty); 1531 1532 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1533 // than four eightbytes, ..., it has class MEMORY. 1534 if (Size > 256) 1535 return; 1536 1537 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1538 // copy constructor or a non-trivial destructor, it is passed by invisible 1539 // reference. 1540 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1541 return; 1542 1543 const RecordDecl *RD = RT->getDecl(); 1544 1545 // Assume variable sized types are passed in memory. 1546 if (RD->hasFlexibleArrayMember()) 1547 return; 1548 1549 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1550 1551 // Reset Lo class, this will be recomputed. 1552 Current = NoClass; 1553 1554 // If this is a C++ record, classify the bases first. 1555 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1556 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1557 e = CXXRD->bases_end(); i != e; ++i) { 1558 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1559 "Unexpected base class!"); 1560 const CXXRecordDecl *Base = 1561 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1562 1563 // Classify this field. 1564 // 1565 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1566 // single eightbyte, each is classified separately. Each eightbyte gets 1567 // initialized to class NO_CLASS. 1568 Class FieldLo, FieldHi; 1569 uint64_t Offset = 1570 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1571 classify(i->getType(), Offset, FieldLo, FieldHi); 1572 Lo = merge(Lo, FieldLo); 1573 Hi = merge(Hi, FieldHi); 1574 if (Lo == Memory || Hi == Memory) 1575 break; 1576 } 1577 } 1578 1579 // Classify the fields one at a time, merging the results. 1580 unsigned idx = 0; 1581 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1582 i != e; ++i, ++idx) { 1583 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1584 bool BitField = i->isBitField(); 1585 1586 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1587 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1588 // 1589 // The only case a 256-bit wide vector could be used is when the struct 1590 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1591 // to work for sizes wider than 128, early check and fallback to memory. 1592 // 1593 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1594 Lo = Memory; 1595 return; 1596 } 1597 // Note, skip this test for bit-fields, see below. 1598 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1599 Lo = Memory; 1600 return; 1601 } 1602 1603 // Classify this field. 1604 // 1605 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1606 // exceeds a single eightbyte, each is classified 1607 // separately. Each eightbyte gets initialized to class 1608 // NO_CLASS. 1609 Class FieldLo, FieldHi; 1610 1611 // Bit-fields require special handling, they do not force the 1612 // structure to be passed in memory even if unaligned, and 1613 // therefore they can straddle an eightbyte. 1614 if (BitField) { 1615 // Ignore padding bit-fields. 1616 if (i->isUnnamedBitfield()) 1617 continue; 1618 1619 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1620 uint64_t Size = i->getBitWidthValue(getContext()); 1621 1622 uint64_t EB_Lo = Offset / 64; 1623 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1624 FieldLo = FieldHi = NoClass; 1625 if (EB_Lo) { 1626 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1627 FieldLo = NoClass; 1628 FieldHi = Integer; 1629 } else { 1630 FieldLo = Integer; 1631 FieldHi = EB_Hi ? Integer : NoClass; 1632 } 1633 } else 1634 classify(i->getType(), Offset, FieldLo, FieldHi); 1635 Lo = merge(Lo, FieldLo); 1636 Hi = merge(Hi, FieldHi); 1637 if (Lo == Memory || Hi == Memory) 1638 break; 1639 } 1640 1641 postMerge(Size, Lo, Hi); 1642 } 1643} 1644 1645ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1646 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1647 // place naturally. 1648 if (!isAggregateTypeForABI(Ty)) { 1649 // Treat an enum type as its underlying type. 1650 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1651 Ty = EnumTy->getDecl()->getIntegerType(); 1652 1653 return (Ty->isPromotableIntegerType() ? 1654 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1655 } 1656 1657 return ABIArgInfo::getIndirect(0); 1658} 1659 1660bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1661 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1662 uint64_t Size = getContext().getTypeSize(VecTy); 1663 unsigned LargestVector = HasAVX ? 256 : 128; 1664 if (Size <= 64 || Size > LargestVector) 1665 return true; 1666 } 1667 1668 return false; 1669} 1670 1671ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1672 unsigned freeIntRegs) const { 1673 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1674 // place naturally. 1675 // 1676 // This assumption is optimistic, as there could be free registers available 1677 // when we need to pass this argument in memory, and LLVM could try to pass 1678 // the argument in the free register. This does not seem to happen currently, 1679 // but this code would be much safer if we could mark the argument with 1680 // 'onstack'. See PR12193. 1681 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1682 // Treat an enum type as its underlying type. 1683 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1684 Ty = EnumTy->getDecl()->getIntegerType(); 1685 1686 return (Ty->isPromotableIntegerType() ? 1687 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1688 } 1689 1690 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1691 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1692 1693 // Compute the byval alignment. We specify the alignment of the byval in all 1694 // cases so that the mid-level optimizer knows the alignment of the byval. 1695 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1696 1697 // Attempt to avoid passing indirect results using byval when possible. This 1698 // is important for good codegen. 1699 // 1700 // We do this by coercing the value into a scalar type which the backend can 1701 // handle naturally (i.e., without using byval). 1702 // 1703 // For simplicity, we currently only do this when we have exhausted all of the 1704 // free integer registers. Doing this when there are free integer registers 1705 // would require more care, as we would have to ensure that the coerced value 1706 // did not claim the unused register. That would require either reording the 1707 // arguments to the function (so that any subsequent inreg values came first), 1708 // or only doing this optimization when there were no following arguments that 1709 // might be inreg. 1710 // 1711 // We currently expect it to be rare (particularly in well written code) for 1712 // arguments to be passed on the stack when there are still free integer 1713 // registers available (this would typically imply large structs being passed 1714 // by value), so this seems like a fair tradeoff for now. 1715 // 1716 // We can revisit this if the backend grows support for 'onstack' parameter 1717 // attributes. See PR12193. 1718 if (freeIntRegs == 0) { 1719 uint64_t Size = getContext().getTypeSize(Ty); 1720 1721 // If this type fits in an eightbyte, coerce it into the matching integral 1722 // type, which will end up on the stack (with alignment 8). 1723 if (Align == 8 && Size <= 64) 1724 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1725 Size)); 1726 } 1727 1728 return ABIArgInfo::getIndirect(Align); 1729} 1730 1731/// GetByteVectorType - The ABI specifies that a value should be passed in an 1732/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1733/// vector register. 1734llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1735 llvm::Type *IRType = CGT.ConvertType(Ty); 1736 1737 // Wrapper structs that just contain vectors are passed just like vectors, 1738 // strip them off if present. 1739 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1740 while (STy && STy->getNumElements() == 1) { 1741 IRType = STy->getElementType(0); 1742 STy = dyn_cast<llvm::StructType>(IRType); 1743 } 1744 1745 // If the preferred type is a 16-byte vector, prefer to pass it. 1746 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1747 llvm::Type *EltTy = VT->getElementType(); 1748 unsigned BitWidth = VT->getBitWidth(); 1749 if ((BitWidth >= 128 && BitWidth <= 256) && 1750 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1751 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1752 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1753 EltTy->isIntegerTy(128))) 1754 return VT; 1755 } 1756 1757 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1758} 1759 1760/// BitsContainNoUserData - Return true if the specified [start,end) bit range 1761/// is known to either be off the end of the specified type or being in 1762/// alignment padding. The user type specified is known to be at most 128 bits 1763/// in size, and have passed through X86_64ABIInfo::classify with a successful 1764/// classification that put one of the two halves in the INTEGER class. 1765/// 1766/// It is conservatively correct to return false. 1767static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1768 unsigned EndBit, ASTContext &Context) { 1769 // If the bytes being queried are off the end of the type, there is no user 1770 // data hiding here. This handles analysis of builtins, vectors and other 1771 // types that don't contain interesting padding. 1772 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1773 if (TySize <= StartBit) 1774 return true; 1775 1776 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1777 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1778 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1779 1780 // Check each element to see if the element overlaps with the queried range. 1781 for (unsigned i = 0; i != NumElts; ++i) { 1782 // If the element is after the span we care about, then we're done.. 1783 unsigned EltOffset = i*EltSize; 1784 if (EltOffset >= EndBit) break; 1785 1786 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1787 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1788 EndBit-EltOffset, Context)) 1789 return false; 1790 } 1791 // If it overlaps no elements, then it is safe to process as padding. 1792 return true; 1793 } 1794 1795 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1796 const RecordDecl *RD = RT->getDecl(); 1797 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1798 1799 // If this is a C++ record, check the bases first. 1800 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1801 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1802 e = CXXRD->bases_end(); i != e; ++i) { 1803 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1804 "Unexpected base class!"); 1805 const CXXRecordDecl *Base = 1806 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1807 1808 // If the base is after the span we care about, ignore it. 1809 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1810 if (BaseOffset >= EndBit) continue; 1811 1812 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1813 if (!BitsContainNoUserData(i->getType(), BaseStart, 1814 EndBit-BaseOffset, Context)) 1815 return false; 1816 } 1817 } 1818 1819 // Verify that no field has data that overlaps the region of interest. Yes 1820 // this could be sped up a lot by being smarter about queried fields, 1821 // however we're only looking at structs up to 16 bytes, so we don't care 1822 // much. 1823 unsigned idx = 0; 1824 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1825 i != e; ++i, ++idx) { 1826 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1827 1828 // If we found a field after the region we care about, then we're done. 1829 if (FieldOffset >= EndBit) break; 1830 1831 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1832 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1833 Context)) 1834 return false; 1835 } 1836 1837 // If nothing in this record overlapped the area of interest, then we're 1838 // clean. 1839 return true; 1840 } 1841 1842 return false; 1843} 1844 1845/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1846/// float member at the specified offset. For example, {int,{float}} has a 1847/// float at offset 4. It is conservatively correct for this routine to return 1848/// false. 1849static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1850 const llvm::DataLayout &TD) { 1851 // Base case if we find a float. 1852 if (IROffset == 0 && IRType->isFloatTy()) 1853 return true; 1854 1855 // If this is a struct, recurse into the field at the specified offset. 1856 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1857 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1858 unsigned Elt = SL->getElementContainingOffset(IROffset); 1859 IROffset -= SL->getElementOffset(Elt); 1860 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1861 } 1862 1863 // If this is an array, recurse into the field at the specified offset. 1864 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1865 llvm::Type *EltTy = ATy->getElementType(); 1866 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1867 IROffset -= IROffset/EltSize*EltSize; 1868 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1869 } 1870 1871 return false; 1872} 1873 1874 1875/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1876/// low 8 bytes of an XMM register, corresponding to the SSE class. 1877llvm::Type *X86_64ABIInfo:: 1878GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1879 QualType SourceTy, unsigned SourceOffset) const { 1880 // The only three choices we have are either double, <2 x float>, or float. We 1881 // pass as float if the last 4 bytes is just padding. This happens for 1882 // structs that contain 3 floats. 1883 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1884 SourceOffset*8+64, getContext())) 1885 return llvm::Type::getFloatTy(getVMContext()); 1886 1887 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1888 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1889 // case. 1890 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1891 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1892 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1893 1894 return llvm::Type::getDoubleTy(getVMContext()); 1895} 1896 1897 1898/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1899/// an 8-byte GPR. This means that we either have a scalar or we are talking 1900/// about the high or low part of an up-to-16-byte struct. This routine picks 1901/// the best LLVM IR type to represent this, which may be i64 or may be anything 1902/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1903/// etc). 1904/// 1905/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1906/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1907/// the 8-byte value references. PrefType may be null. 1908/// 1909/// SourceTy is the source level type for the entire argument. SourceOffset is 1910/// an offset into this that we're processing (which is always either 0 or 8). 1911/// 1912llvm::Type *X86_64ABIInfo:: 1913GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1914 QualType SourceTy, unsigned SourceOffset) const { 1915 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1916 // returning an 8-byte unit starting with it. See if we can safely use it. 1917 if (IROffset == 0) { 1918 // Pointers and int64's always fill the 8-byte unit. 1919 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1920 IRType->isIntegerTy(64)) 1921 return IRType; 1922 1923 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1924 // goodness in the source type is just tail padding. This is allowed to 1925 // kick in for struct {double,int} on the int, but not on 1926 // struct{double,int,int} because we wouldn't return the second int. We 1927 // have to do this analysis on the source type because we can't depend on 1928 // unions being lowered a specific way etc. 1929 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1930 IRType->isIntegerTy(32) || 1931 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1932 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1933 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1934 1935 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1936 SourceOffset*8+64, getContext())) 1937 return IRType; 1938 } 1939 } 1940 1941 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1942 // If this is a struct, recurse into the field at the specified offset. 1943 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1944 if (IROffset < SL->getSizeInBytes()) { 1945 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1946 IROffset -= SL->getElementOffset(FieldIdx); 1947 1948 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1949 SourceTy, SourceOffset); 1950 } 1951 } 1952 1953 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1954 llvm::Type *EltTy = ATy->getElementType(); 1955 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1956 unsigned EltOffset = IROffset/EltSize*EltSize; 1957 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1958 SourceOffset); 1959 } 1960 1961 // Okay, we don't have any better idea of what to pass, so we pass this in an 1962 // integer register that isn't too big to fit the rest of the struct. 1963 unsigned TySizeInBytes = 1964 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1965 1966 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1967 1968 // It is always safe to classify this as an integer type up to i64 that 1969 // isn't larger than the structure. 1970 return llvm::IntegerType::get(getVMContext(), 1971 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1972} 1973 1974 1975/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1976/// be used as elements of a two register pair to pass or return, return a 1977/// first class aggregate to represent them. For example, if the low part of 1978/// a by-value argument should be passed as i32* and the high part as float, 1979/// return {i32*, float}. 1980static llvm::Type * 1981GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1982 const llvm::DataLayout &TD) { 1983 // In order to correctly satisfy the ABI, we need to the high part to start 1984 // at offset 8. If the high and low parts we inferred are both 4-byte types 1985 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1986 // the second element at offset 8. Check for this: 1987 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1988 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1989 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1990 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1991 1992 // To handle this, we have to increase the size of the low part so that the 1993 // second element will start at an 8 byte offset. We can't increase the size 1994 // of the second element because it might make us access off the end of the 1995 // struct. 1996 if (HiStart != 8) { 1997 // There are only two sorts of types the ABI generation code can produce for 1998 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1999 // Promote these to a larger type. 2000 if (Lo->isFloatTy()) 2001 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2002 else { 2003 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2004 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2005 } 2006 } 2007 2008 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 2009 2010 2011 // Verify that the second element is at an 8-byte offset. 2012 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2013 "Invalid x86-64 argument pair!"); 2014 return Result; 2015} 2016 2017ABIArgInfo X86_64ABIInfo:: 2018classifyReturnType(QualType RetTy) const { 2019 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2020 // classification algorithm. 2021 X86_64ABIInfo::Class Lo, Hi; 2022 classify(RetTy, 0, Lo, Hi); 2023 2024 // Check some invariants. 2025 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2026 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2027 2028 llvm::Type *ResType = 0; 2029 switch (Lo) { 2030 case NoClass: 2031 if (Hi == NoClass) 2032 return ABIArgInfo::getIgnore(); 2033 // If the low part is just padding, it takes no register, leave ResType 2034 // null. 2035 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2036 "Unknown missing lo part"); 2037 break; 2038 2039 case SSEUp: 2040 case X87Up: 2041 llvm_unreachable("Invalid classification for lo word."); 2042 2043 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2044 // hidden argument. 2045 case Memory: 2046 return getIndirectReturnResult(RetTy); 2047 2048 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2049 // available register of the sequence %rax, %rdx is used. 2050 case Integer: 2051 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2052 2053 // If we have a sign or zero extended integer, make sure to return Extend 2054 // so that the parameter gets the right LLVM IR attributes. 2055 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2056 // Treat an enum type as its underlying type. 2057 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2058 RetTy = EnumTy->getDecl()->getIntegerType(); 2059 2060 if (RetTy->isIntegralOrEnumerationType() && 2061 RetTy->isPromotableIntegerType()) 2062 return ABIArgInfo::getExtend(); 2063 } 2064 break; 2065 2066 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2067 // available SSE register of the sequence %xmm0, %xmm1 is used. 2068 case SSE: 2069 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2070 break; 2071 2072 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2073 // returned on the X87 stack in %st0 as 80-bit x87 number. 2074 case X87: 2075 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2076 break; 2077 2078 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2079 // part of the value is returned in %st0 and the imaginary part in 2080 // %st1. 2081 case ComplexX87: 2082 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2083 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2084 llvm::Type::getX86_FP80Ty(getVMContext()), 2085 NULL); 2086 break; 2087 } 2088 2089 llvm::Type *HighPart = 0; 2090 switch (Hi) { 2091 // Memory was handled previously and X87 should 2092 // never occur as a hi class. 2093 case Memory: 2094 case X87: 2095 llvm_unreachable("Invalid classification for hi word."); 2096 2097 case ComplexX87: // Previously handled. 2098 case NoClass: 2099 break; 2100 2101 case Integer: 2102 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2103 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2104 return ABIArgInfo::getDirect(HighPart, 8); 2105 break; 2106 case SSE: 2107 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2108 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2109 return ABIArgInfo::getDirect(HighPart, 8); 2110 break; 2111 2112 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2113 // is passed in the next available eightbyte chunk if the last used 2114 // vector register. 2115 // 2116 // SSEUP should always be preceded by SSE, just widen. 2117 case SSEUp: 2118 assert(Lo == SSE && "Unexpected SSEUp classification."); 2119 ResType = GetByteVectorType(RetTy); 2120 break; 2121 2122 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2123 // returned together with the previous X87 value in %st0. 2124 case X87Up: 2125 // If X87Up is preceded by X87, we don't need to do 2126 // anything. However, in some cases with unions it may not be 2127 // preceded by X87. In such situations we follow gcc and pass the 2128 // extra bits in an SSE reg. 2129 if (Lo != X87) { 2130 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2131 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2132 return ABIArgInfo::getDirect(HighPart, 8); 2133 } 2134 break; 2135 } 2136 2137 // If a high part was specified, merge it together with the low part. It is 2138 // known to pass in the high eightbyte of the result. We do this by forming a 2139 // first class struct aggregate with the high and low part: {low, high} 2140 if (HighPart) 2141 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2142 2143 return ABIArgInfo::getDirect(ResType); 2144} 2145 2146ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2147 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2148 const 2149{ 2150 X86_64ABIInfo::Class Lo, Hi; 2151 classify(Ty, 0, Lo, Hi); 2152 2153 // Check some invariants. 2154 // FIXME: Enforce these by construction. 2155 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2156 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2157 2158 neededInt = 0; 2159 neededSSE = 0; 2160 llvm::Type *ResType = 0; 2161 switch (Lo) { 2162 case NoClass: 2163 if (Hi == NoClass) 2164 return ABIArgInfo::getIgnore(); 2165 // If the low part is just padding, it takes no register, leave ResType 2166 // null. 2167 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2168 "Unknown missing lo part"); 2169 break; 2170 2171 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2172 // on the stack. 2173 case Memory: 2174 2175 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2176 // COMPLEX_X87, it is passed in memory. 2177 case X87: 2178 case ComplexX87: 2179 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2180 ++neededInt; 2181 return getIndirectResult(Ty, freeIntRegs); 2182 2183 case SSEUp: 2184 case X87Up: 2185 llvm_unreachable("Invalid classification for lo word."); 2186 2187 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2188 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2189 // and %r9 is used. 2190 case Integer: 2191 ++neededInt; 2192 2193 // Pick an 8-byte type based on the preferred type. 2194 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2195 2196 // If we have a sign or zero extended integer, make sure to return Extend 2197 // so that the parameter gets the right LLVM IR attributes. 2198 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2199 // Treat an enum type as its underlying type. 2200 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2201 Ty = EnumTy->getDecl()->getIntegerType(); 2202 2203 if (Ty->isIntegralOrEnumerationType() && 2204 Ty->isPromotableIntegerType()) 2205 return ABIArgInfo::getExtend(); 2206 } 2207 2208 break; 2209 2210 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2211 // available SSE register is used, the registers are taken in the 2212 // order from %xmm0 to %xmm7. 2213 case SSE: { 2214 llvm::Type *IRType = CGT.ConvertType(Ty); 2215 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2216 ++neededSSE; 2217 break; 2218 } 2219 } 2220 2221 llvm::Type *HighPart = 0; 2222 switch (Hi) { 2223 // Memory was handled previously, ComplexX87 and X87 should 2224 // never occur as hi classes, and X87Up must be preceded by X87, 2225 // which is passed in memory. 2226 case Memory: 2227 case X87: 2228 case ComplexX87: 2229 llvm_unreachable("Invalid classification for hi word."); 2230 2231 case NoClass: break; 2232 2233 case Integer: 2234 ++neededInt; 2235 // Pick an 8-byte type based on the preferred type. 2236 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2237 2238 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2239 return ABIArgInfo::getDirect(HighPart, 8); 2240 break; 2241 2242 // X87Up generally doesn't occur here (long double is passed in 2243 // memory), except in situations involving unions. 2244 case X87Up: 2245 case SSE: 2246 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2247 2248 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2249 return ABIArgInfo::getDirect(HighPart, 8); 2250 2251 ++neededSSE; 2252 break; 2253 2254 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2255 // eightbyte is passed in the upper half of the last used SSE 2256 // register. This only happens when 128-bit vectors are passed. 2257 case SSEUp: 2258 assert(Lo == SSE && "Unexpected SSEUp classification"); 2259 ResType = GetByteVectorType(Ty); 2260 break; 2261 } 2262 2263 // If a high part was specified, merge it together with the low part. It is 2264 // known to pass in the high eightbyte of the result. We do this by forming a 2265 // first class struct aggregate with the high and low part: {low, high} 2266 if (HighPart) 2267 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2268 2269 return ABIArgInfo::getDirect(ResType); 2270} 2271 2272void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2273 2274 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2275 2276 // Keep track of the number of assigned registers. 2277 unsigned freeIntRegs = 6, freeSSERegs = 8; 2278 2279 // If the return value is indirect, then the hidden argument is consuming one 2280 // integer register. 2281 if (FI.getReturnInfo().isIndirect()) 2282 --freeIntRegs; 2283 2284 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2285 // get assigned (in left-to-right order) for passing as follows... 2286 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2287 it != ie; ++it) { 2288 unsigned neededInt, neededSSE; 2289 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2290 neededSSE); 2291 2292 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2293 // eightbyte of an argument, the whole argument is passed on the 2294 // stack. If registers have already been assigned for some 2295 // eightbytes of such an argument, the assignments get reverted. 2296 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2297 freeIntRegs -= neededInt; 2298 freeSSERegs -= neededSSE; 2299 } else { 2300 it->info = getIndirectResult(it->type, freeIntRegs); 2301 } 2302 } 2303} 2304 2305static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2306 QualType Ty, 2307 CodeGenFunction &CGF) { 2308 llvm::Value *overflow_arg_area_p = 2309 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2310 llvm::Value *overflow_arg_area = 2311 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2312 2313 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2314 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2315 // It isn't stated explicitly in the standard, but in practice we use 2316 // alignment greater than 16 where necessary. 2317 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2318 if (Align > 8) { 2319 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2320 llvm::Value *Offset = 2321 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2322 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2323 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2324 CGF.Int64Ty); 2325 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2326 overflow_arg_area = 2327 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2328 overflow_arg_area->getType(), 2329 "overflow_arg_area.align"); 2330 } 2331 2332 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2333 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2334 llvm::Value *Res = 2335 CGF.Builder.CreateBitCast(overflow_arg_area, 2336 llvm::PointerType::getUnqual(LTy)); 2337 2338 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2339 // l->overflow_arg_area + sizeof(type). 2340 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2341 // an 8 byte boundary. 2342 2343 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2344 llvm::Value *Offset = 2345 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2346 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2347 "overflow_arg_area.next"); 2348 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2349 2350 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2351 return Res; 2352} 2353 2354llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2355 CodeGenFunction &CGF) const { 2356 // Assume that va_list type is correct; should be pointer to LLVM type: 2357 // struct { 2358 // i32 gp_offset; 2359 // i32 fp_offset; 2360 // i8* overflow_arg_area; 2361 // i8* reg_save_area; 2362 // }; 2363 unsigned neededInt, neededSSE; 2364 2365 Ty = CGF.getContext().getCanonicalType(Ty); 2366 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2367 2368 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2369 // in the registers. If not go to step 7. 2370 if (!neededInt && !neededSSE) 2371 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2372 2373 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2374 // general purpose registers needed to pass type and num_fp to hold 2375 // the number of floating point registers needed. 2376 2377 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2378 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2379 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2380 // 2381 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2382 // register save space). 2383 2384 llvm::Value *InRegs = 0; 2385 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2386 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2387 if (neededInt) { 2388 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2389 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2390 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2391 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2392 } 2393 2394 if (neededSSE) { 2395 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2396 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2397 llvm::Value *FitsInFP = 2398 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2399 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2400 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2401 } 2402 2403 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2404 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2405 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2406 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2407 2408 // Emit code to load the value if it was passed in registers. 2409 2410 CGF.EmitBlock(InRegBlock); 2411 2412 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2413 // an offset of l->gp_offset and/or l->fp_offset. This may require 2414 // copying to a temporary location in case the parameter is passed 2415 // in different register classes or requires an alignment greater 2416 // than 8 for general purpose registers and 16 for XMM registers. 2417 // 2418 // FIXME: This really results in shameful code when we end up needing to 2419 // collect arguments from different places; often what should result in a 2420 // simple assembling of a structure from scattered addresses has many more 2421 // loads than necessary. Can we clean this up? 2422 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2423 llvm::Value *RegAddr = 2424 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2425 "reg_save_area"); 2426 if (neededInt && neededSSE) { 2427 // FIXME: Cleanup. 2428 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2429 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2430 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2431 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2432 llvm::Type *TyLo = ST->getElementType(0); 2433 llvm::Type *TyHi = ST->getElementType(1); 2434 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2435 "Unexpected ABI info for mixed regs"); 2436 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2437 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2438 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2439 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2440 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2441 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2442 llvm::Value *V = 2443 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2444 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2445 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2446 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2447 2448 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2449 llvm::PointerType::getUnqual(LTy)); 2450 } else if (neededInt) { 2451 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2452 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2453 llvm::PointerType::getUnqual(LTy)); 2454 } else if (neededSSE == 1) { 2455 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2456 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2457 llvm::PointerType::getUnqual(LTy)); 2458 } else { 2459 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2460 // SSE registers are spaced 16 bytes apart in the register save 2461 // area, we need to collect the two eightbytes together. 2462 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2463 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2464 llvm::Type *DoubleTy = CGF.DoubleTy; 2465 llvm::Type *DblPtrTy = 2466 llvm::PointerType::getUnqual(DoubleTy); 2467 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2468 DoubleTy, NULL); 2469 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2470 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2471 DblPtrTy)); 2472 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2473 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2474 DblPtrTy)); 2475 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2476 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2477 llvm::PointerType::getUnqual(LTy)); 2478 } 2479 2480 // AMD64-ABI 3.5.7p5: Step 5. Set: 2481 // l->gp_offset = l->gp_offset + num_gp * 8 2482 // l->fp_offset = l->fp_offset + num_fp * 16. 2483 if (neededInt) { 2484 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2485 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2486 gp_offset_p); 2487 } 2488 if (neededSSE) { 2489 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2490 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2491 fp_offset_p); 2492 } 2493 CGF.EmitBranch(ContBlock); 2494 2495 // Emit code to load the value if it was passed in memory. 2496 2497 CGF.EmitBlock(InMemBlock); 2498 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2499 2500 // Return the appropriate result. 2501 2502 CGF.EmitBlock(ContBlock); 2503 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2504 "vaarg.addr"); 2505 ResAddr->addIncoming(RegAddr, InRegBlock); 2506 ResAddr->addIncoming(MemAddr, InMemBlock); 2507 return ResAddr; 2508} 2509 2510ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2511 2512 if (Ty->isVoidType()) 2513 return ABIArgInfo::getIgnore(); 2514 2515 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2516 Ty = EnumTy->getDecl()->getIntegerType(); 2517 2518 uint64_t Size = getContext().getTypeSize(Ty); 2519 2520 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2521 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2522 RT->getDecl()->hasFlexibleArrayMember()) 2523 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2524 2525 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2526 if (Size == 128 && 2527 getContext().getTargetInfo().getTriple().getOS() 2528 == llvm::Triple::MinGW32) 2529 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2530 Size)); 2531 2532 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2533 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2534 if (Size <= 64 && 2535 (Size & (Size - 1)) == 0) 2536 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2537 Size)); 2538 2539 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2540 } 2541 2542 if (Ty->isPromotableIntegerType()) 2543 return ABIArgInfo::getExtend(); 2544 2545 return ABIArgInfo::getDirect(); 2546} 2547 2548void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2549 2550 QualType RetTy = FI.getReturnType(); 2551 FI.getReturnInfo() = classify(RetTy); 2552 2553 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2554 it != ie; ++it) 2555 it->info = classify(it->type); 2556} 2557 2558llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2559 CodeGenFunction &CGF) const { 2560 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2561 2562 CGBuilderTy &Builder = CGF.Builder; 2563 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2564 "ap"); 2565 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2566 llvm::Type *PTy = 2567 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2568 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2569 2570 uint64_t Offset = 2571 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2572 llvm::Value *NextAddr = 2573 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2574 "ap.next"); 2575 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2576 2577 return AddrTyped; 2578} 2579 2580namespace { 2581 2582class NaClX86_64ABIInfo : public ABIInfo { 2583 public: 2584 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2585 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2586 virtual void computeInfo(CGFunctionInfo &FI) const; 2587 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2588 CodeGenFunction &CGF) const; 2589 private: 2590 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2591 X86_64ABIInfo NInfo; // Used for everything else. 2592}; 2593 2594class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2595 public: 2596 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2597 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2598}; 2599 2600} 2601 2602void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2603 if (FI.getASTCallingConvention() == CC_PnaclCall) 2604 PInfo.computeInfo(FI); 2605 else 2606 NInfo.computeInfo(FI); 2607} 2608 2609llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2610 CodeGenFunction &CGF) const { 2611 // Always use the native convention; calling pnacl-style varargs functions 2612 // is unuspported. 2613 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2614} 2615 2616 2617// PowerPC-32 2618 2619namespace { 2620class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2621public: 2622 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2623 2624 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2625 // This is recovered from gcc output. 2626 return 1; // r1 is the dedicated stack pointer 2627 } 2628 2629 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2630 llvm::Value *Address) const; 2631}; 2632 2633} 2634 2635bool 2636PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2637 llvm::Value *Address) const { 2638 // This is calculated from the LLVM and GCC tables and verified 2639 // against gcc output. AFAIK all ABIs use the same encoding. 2640 2641 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2642 2643 llvm::IntegerType *i8 = CGF.Int8Ty; 2644 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2645 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2646 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2647 2648 // 0-31: r0-31, the 4-byte general-purpose registers 2649 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2650 2651 // 32-63: fp0-31, the 8-byte floating-point registers 2652 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2653 2654 // 64-76 are various 4-byte special-purpose registers: 2655 // 64: mq 2656 // 65: lr 2657 // 66: ctr 2658 // 67: ap 2659 // 68-75 cr0-7 2660 // 76: xer 2661 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2662 2663 // 77-108: v0-31, the 16-byte vector registers 2664 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2665 2666 // 109: vrsave 2667 // 110: vscr 2668 // 111: spe_acc 2669 // 112: spefscr 2670 // 113: sfp 2671 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2672 2673 return false; 2674} 2675 2676// PowerPC-64 2677 2678namespace { 2679/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2680class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2681 2682public: 2683 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2684 2685 bool isPromotableTypeForABI(QualType Ty) const; 2686 2687 ABIArgInfo classifyReturnType(QualType RetTy) const; 2688 ABIArgInfo classifyArgumentType(QualType Ty) const; 2689 2690 // TODO: We can add more logic to computeInfo to improve performance. 2691 // Example: For aggregate arguments that fit in a register, we could 2692 // use getDirectInReg (as is done below for structs containing a single 2693 // floating-point value) to avoid pushing them to memory on function 2694 // entry. This would require changing the logic in PPCISelLowering 2695 // when lowering the parameters in the caller and args in the callee. 2696 virtual void computeInfo(CGFunctionInfo &FI) const { 2697 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2698 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2699 it != ie; ++it) { 2700 // We rely on the default argument classification for the most part. 2701 // One exception: An aggregate containing a single floating-point 2702 // item must be passed in a register if one is available. 2703 const Type *T = isSingleElementStruct(it->type, getContext()); 2704 if (T) { 2705 const BuiltinType *BT = T->getAs<BuiltinType>(); 2706 if (BT && BT->isFloatingPoint()) { 2707 QualType QT(T, 0); 2708 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2709 continue; 2710 } 2711 } 2712 it->info = classifyArgumentType(it->type); 2713 } 2714 } 2715 2716 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2717 QualType Ty, 2718 CodeGenFunction &CGF) const; 2719}; 2720 2721class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2722public: 2723 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2724 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2725 2726 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2727 // This is recovered from gcc output. 2728 return 1; // r1 is the dedicated stack pointer 2729 } 2730 2731 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2732 llvm::Value *Address) const; 2733}; 2734 2735class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2736public: 2737 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2738 2739 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2740 // This is recovered from gcc output. 2741 return 1; // r1 is the dedicated stack pointer 2742 } 2743 2744 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2745 llvm::Value *Address) const; 2746}; 2747 2748} 2749 2750// Return true if the ABI requires Ty to be passed sign- or zero- 2751// extended to 64 bits. 2752bool 2753PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 2754 // Treat an enum type as its underlying type. 2755 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2756 Ty = EnumTy->getDecl()->getIntegerType(); 2757 2758 // Promotable integer types are required to be promoted by the ABI. 2759 if (Ty->isPromotableIntegerType()) 2760 return true; 2761 2762 // In addition to the usual promotable integer types, we also need to 2763 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 2764 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2765 switch (BT->getKind()) { 2766 case BuiltinType::Int: 2767 case BuiltinType::UInt: 2768 return true; 2769 default: 2770 break; 2771 } 2772 2773 return false; 2774} 2775 2776ABIArgInfo 2777PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 2778 if (Ty->isAnyComplexType()) 2779 return ABIArgInfo::getDirect(); 2780 2781 if (isAggregateTypeForABI(Ty)) { 2782 // Records with non trivial destructors/constructors should not be passed 2783 // by value. 2784 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2785 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2786 2787 return ABIArgInfo::getIndirect(0); 2788 } 2789 2790 return (isPromotableTypeForABI(Ty) ? 2791 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2792} 2793 2794ABIArgInfo 2795PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 2796 if (RetTy->isVoidType()) 2797 return ABIArgInfo::getIgnore(); 2798 2799 if (RetTy->isAnyComplexType()) 2800 return ABIArgInfo::getDirect(); 2801 2802 if (isAggregateTypeForABI(RetTy)) 2803 return ABIArgInfo::getIndirect(0); 2804 2805 return (isPromotableTypeForABI(RetTy) ? 2806 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2807} 2808 2809// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2810llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2811 QualType Ty, 2812 CodeGenFunction &CGF) const { 2813 llvm::Type *BP = CGF.Int8PtrTy; 2814 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2815 2816 CGBuilderTy &Builder = CGF.Builder; 2817 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2818 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2819 2820 // Update the va_list pointer. The pointer should be bumped by the 2821 // size of the object. We can trust getTypeSize() except for a complex 2822 // type whose base type is smaller than a doubleword. For these, the 2823 // size of the object is 16 bytes; see below for further explanation. 2824 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2825 QualType BaseTy; 2826 unsigned CplxBaseSize = 0; 2827 2828 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 2829 BaseTy = CTy->getElementType(); 2830 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 2831 if (CplxBaseSize < 8) 2832 SizeInBytes = 16; 2833 } 2834 2835 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2836 llvm::Value *NextAddr = 2837 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2838 "ap.next"); 2839 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2840 2841 // If we have a complex type and the base type is smaller than 8 bytes, 2842 // the ABI calls for the real and imaginary parts to be right-adjusted 2843 // in separate doublewords. However, Clang expects us to produce a 2844 // pointer to a structure with the two parts packed tightly. So generate 2845 // loads of the real and imaginary parts relative to the va_list pointer, 2846 // and store them to a temporary structure. 2847 if (CplxBaseSize && CplxBaseSize < 8) { 2848 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2849 llvm::Value *ImagAddr = RealAddr; 2850 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 2851 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 2852 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 2853 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 2854 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 2855 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 2856 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 2857 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 2858 "vacplx"); 2859 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 2860 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 2861 Builder.CreateStore(Real, RealPtr, false); 2862 Builder.CreateStore(Imag, ImagPtr, false); 2863 return Ptr; 2864 } 2865 2866 // If the argument is smaller than 8 bytes, it is right-adjusted in 2867 // its doubleword slot. Adjust the pointer to pick it up from the 2868 // correct offset. 2869 if (SizeInBytes < 8) { 2870 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2871 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2872 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2873 } 2874 2875 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2876 return Builder.CreateBitCast(Addr, PTy); 2877} 2878 2879static bool 2880PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2881 llvm::Value *Address) { 2882 // This is calculated from the LLVM and GCC tables and verified 2883 // against gcc output. AFAIK all ABIs use the same encoding. 2884 2885 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2886 2887 llvm::IntegerType *i8 = CGF.Int8Ty; 2888 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2889 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2890 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2891 2892 // 0-31: r0-31, the 8-byte general-purpose registers 2893 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2894 2895 // 32-63: fp0-31, the 8-byte floating-point registers 2896 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2897 2898 // 64-76 are various 4-byte special-purpose registers: 2899 // 64: mq 2900 // 65: lr 2901 // 66: ctr 2902 // 67: ap 2903 // 68-75 cr0-7 2904 // 76: xer 2905 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2906 2907 // 77-108: v0-31, the 16-byte vector registers 2908 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2909 2910 // 109: vrsave 2911 // 110: vscr 2912 // 111: spe_acc 2913 // 112: spefscr 2914 // 113: sfp 2915 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2916 2917 return false; 2918} 2919 2920bool 2921PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2922 CodeGen::CodeGenFunction &CGF, 2923 llvm::Value *Address) const { 2924 2925 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2926} 2927 2928bool 2929PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2930 llvm::Value *Address) const { 2931 2932 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2933} 2934 2935//===----------------------------------------------------------------------===// 2936// ARM ABI Implementation 2937//===----------------------------------------------------------------------===// 2938 2939namespace { 2940 2941class ARMABIInfo : public ABIInfo { 2942public: 2943 enum ABIKind { 2944 APCS = 0, 2945 AAPCS = 1, 2946 AAPCS_VFP 2947 }; 2948 2949private: 2950 ABIKind Kind; 2951 2952public: 2953 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { 2954 setRuntimeCC(); 2955 } 2956 2957 bool isEABI() const { 2958 StringRef Env = 2959 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2960 return (Env == "gnueabi" || Env == "eabi" || 2961 Env == "android" || Env == "androideabi"); 2962 } 2963 2964private: 2965 ABIKind getABIKind() const { return Kind; } 2966 2967 ABIArgInfo classifyReturnType(QualType RetTy) const; 2968 ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs, 2969 unsigned &AllocatedVFP, 2970 bool &IsHA) const; 2971 bool isIllegalVectorType(QualType Ty) const; 2972 2973 virtual void computeInfo(CGFunctionInfo &FI) const; 2974 2975 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2976 CodeGenFunction &CGF) const; 2977 2978 llvm::CallingConv::ID getLLVMDefaultCC() const; 2979 llvm::CallingConv::ID getABIDefaultCC() const; 2980 void setRuntimeCC(); 2981}; 2982 2983class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2984public: 2985 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2986 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2987 2988 const ARMABIInfo &getABIInfo() const { 2989 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2990 } 2991 2992 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2993 return 13; 2994 } 2995 2996 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2997 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2998 } 2999 3000 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3001 llvm::Value *Address) const { 3002 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3003 3004 // 0-15 are the 16 integer registers. 3005 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 3006 return false; 3007 } 3008 3009 unsigned getSizeOfUnwindException() const { 3010 if (getABIInfo().isEABI()) return 88; 3011 return TargetCodeGenInfo::getSizeOfUnwindException(); 3012 } 3013}; 3014 3015} 3016 3017void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3018 // To correctly handle Homogeneous Aggregate, we need to keep track of the 3019 // VFP registers allocated so far. 3020 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3021 // VFP registers of the appropriate type unallocated then the argument is 3022 // allocated to the lowest-numbered sequence of such registers. 3023 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3024 // unallocated are marked as unavailable. 3025 unsigned AllocatedVFP = 0; 3026 int VFPRegs[16] = { 0 }; 3027 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3028 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3029 it != ie; ++it) { 3030 unsigned PreAllocation = AllocatedVFP; 3031 bool IsHA = false; 3032 // 6.1.2.3 There is one VFP co-processor register class using registers 3033 // s0-s15 (d0-d7) for passing arguments. 3034 const unsigned NumVFPs = 16; 3035 it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA); 3036 // If we do not have enough VFP registers for the HA, any VFP registers 3037 // that are unallocated are marked as unavailable. To achieve this, we add 3038 // padding of (NumVFPs - PreAllocation) floats. 3039 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) { 3040 llvm::Type *PaddingTy = llvm::ArrayType::get( 3041 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation); 3042 it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy); 3043 } 3044 } 3045 3046 // Always honor user-specified calling convention. 3047 if (FI.getCallingConvention() != llvm::CallingConv::C) 3048 return; 3049 3050 llvm::CallingConv::ID cc = getRuntimeCC(); 3051 if (cc != llvm::CallingConv::C) 3052 FI.setEffectiveCallingConvention(cc); 3053} 3054 3055/// Return the default calling convention that LLVM will use. 3056llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 3057 // The default calling convention that LLVM will infer. 3058 if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf") 3059 return llvm::CallingConv::ARM_AAPCS_VFP; 3060 else if (isEABI()) 3061 return llvm::CallingConv::ARM_AAPCS; 3062 else 3063 return llvm::CallingConv::ARM_APCS; 3064} 3065 3066/// Return the calling convention that our ABI would like us to use 3067/// as the C calling convention. 3068llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 3069 switch (getABIKind()) { 3070 case APCS: return llvm::CallingConv::ARM_APCS; 3071 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 3072 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 3073 } 3074 llvm_unreachable("bad ABI kind"); 3075} 3076 3077void ARMABIInfo::setRuntimeCC() { 3078 assert(getRuntimeCC() == llvm::CallingConv::C); 3079 3080 // Don't muddy up the IR with a ton of explicit annotations if 3081 // they'd just match what LLVM will infer from the triple. 3082 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 3083 if (abiCC != getLLVMDefaultCC()) 3084 RuntimeCC = abiCC; 3085} 3086 3087/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 3088/// aggregate. If HAMembers is non-null, the number of base elements 3089/// contained in the type is returned through it; this is used for the 3090/// recursive calls that check aggregate component types. 3091static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 3092 ASTContext &Context, 3093 uint64_t *HAMembers = 0) { 3094 uint64_t Members = 0; 3095 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3096 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 3097 return false; 3098 Members *= AT->getSize().getZExtValue(); 3099 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3100 const RecordDecl *RD = RT->getDecl(); 3101 if (RD->hasFlexibleArrayMember()) 3102 return false; 3103 3104 Members = 0; 3105 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3106 i != e; ++i) { 3107 const FieldDecl *FD = *i; 3108 uint64_t FldMembers; 3109 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 3110 return false; 3111 3112 Members = (RD->isUnion() ? 3113 std::max(Members, FldMembers) : Members + FldMembers); 3114 } 3115 } else { 3116 Members = 1; 3117 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3118 Members = 2; 3119 Ty = CT->getElementType(); 3120 } 3121 3122 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 3123 // double, or 64-bit or 128-bit vectors. 3124 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3125 if (BT->getKind() != BuiltinType::Float && 3126 BT->getKind() != BuiltinType::Double && 3127 BT->getKind() != BuiltinType::LongDouble) 3128 return false; 3129 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 3130 unsigned VecSize = Context.getTypeSize(VT); 3131 if (VecSize != 64 && VecSize != 128) 3132 return false; 3133 } else { 3134 return false; 3135 } 3136 3137 // The base type must be the same for all members. Vector types of the 3138 // same total size are treated as being equivalent here. 3139 const Type *TyPtr = Ty.getTypePtr(); 3140 if (!Base) 3141 Base = TyPtr; 3142 if (Base != TyPtr && 3143 (!Base->isVectorType() || !TyPtr->isVectorType() || 3144 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 3145 return false; 3146 } 3147 3148 // Homogeneous Aggregates can have at most 4 members of the base type. 3149 if (HAMembers) 3150 *HAMembers = Members; 3151 3152 return (Members > 0 && Members <= 4); 3153} 3154 3155/// markAllocatedVFPs - update VFPRegs according to the alignment and 3156/// number of VFP registers (unit is S register) requested. 3157static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP, 3158 unsigned Alignment, 3159 unsigned NumRequired) { 3160 // Early Exit. 3161 if (AllocatedVFP >= 16) 3162 return; 3163 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3164 // VFP registers of the appropriate type unallocated then the argument is 3165 // allocated to the lowest-numbered sequence of such registers. 3166 for (unsigned I = 0; I < 16; I += Alignment) { 3167 bool FoundSlot = true; 3168 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3169 if (J >= 16 || VFPRegs[J]) { 3170 FoundSlot = false; 3171 break; 3172 } 3173 if (FoundSlot) { 3174 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3175 VFPRegs[J] = 1; 3176 AllocatedVFP += NumRequired; 3177 return; 3178 } 3179 } 3180 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3181 // unallocated are marked as unavailable. 3182 for (unsigned I = 0; I < 16; I++) 3183 VFPRegs[I] = 1; 3184 AllocatedVFP = 17; // We do not have enough VFP registers. 3185} 3186 3187ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs, 3188 unsigned &AllocatedVFP, 3189 bool &IsHA) const { 3190 // We update number of allocated VFPs according to 3191 // 6.1.2.1 The following argument types are VFP CPRCs: 3192 // A single-precision floating-point type (including promoted 3193 // half-precision types); A double-precision floating-point type; 3194 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 3195 // with a Base Type of a single- or double-precision floating-point type, 3196 // 64-bit containerized vectors or 128-bit containerized vectors with one 3197 // to four Elements. 3198 3199 // Handle illegal vector types here. 3200 if (isIllegalVectorType(Ty)) { 3201 uint64_t Size = getContext().getTypeSize(Ty); 3202 if (Size <= 32) { 3203 llvm::Type *ResType = 3204 llvm::Type::getInt32Ty(getVMContext()); 3205 return ABIArgInfo::getDirect(ResType); 3206 } 3207 if (Size == 64) { 3208 llvm::Type *ResType = llvm::VectorType::get( 3209 llvm::Type::getInt32Ty(getVMContext()), 2); 3210 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3211 return ABIArgInfo::getDirect(ResType); 3212 } 3213 if (Size == 128) { 3214 llvm::Type *ResType = llvm::VectorType::get( 3215 llvm::Type::getInt32Ty(getVMContext()), 4); 3216 markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4); 3217 return ABIArgInfo::getDirect(ResType); 3218 } 3219 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3220 } 3221 // Update VFPRegs for legal vector types. 3222 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3223 uint64_t Size = getContext().getTypeSize(VT); 3224 // Size of a legal vector should be power of 2 and above 64. 3225 markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32); 3226 } 3227 // Update VFPRegs for floating point types. 3228 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3229 if (BT->getKind() == BuiltinType::Half || 3230 BT->getKind() == BuiltinType::Float) 3231 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1); 3232 if (BT->getKind() == BuiltinType::Double || 3233 BT->getKind() == BuiltinType::LongDouble) 3234 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3235 } 3236 3237 if (!isAggregateTypeForABI(Ty)) { 3238 // Treat an enum type as its underlying type. 3239 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3240 Ty = EnumTy->getDecl()->getIntegerType(); 3241 3242 return (Ty->isPromotableIntegerType() ? 3243 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3244 } 3245 3246 // Ignore empty records. 3247 if (isEmptyRecord(getContext(), Ty, true)) 3248 return ABIArgInfo::getIgnore(); 3249 3250 // Structures with either a non-trivial destructor or a non-trivial 3251 // copy constructor are always indirect. 3252 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3253 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3254 3255 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3256 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 3257 // into VFP registers. 3258 const Type *Base = 0; 3259 uint64_t Members = 0; 3260 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) { 3261 assert(Base && "Base class should be set for homogeneous aggregate"); 3262 // Base can be a floating-point or a vector. 3263 if (Base->isVectorType()) { 3264 // ElementSize is in number of floats. 3265 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 3266 markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize, 3267 Members * ElementSize); 3268 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 3269 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members); 3270 else { 3271 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 3272 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 3273 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2); 3274 } 3275 IsHA = true; 3276 return ABIArgInfo::getExpand(); 3277 } 3278 } 3279 3280 // Support byval for ARM. 3281 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 3282 // most 8-byte. We realign the indirect argument if type alignment is bigger 3283 // than ABI alignment. 3284 uint64_t ABIAlign = 4; 3285 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3286 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3287 getABIKind() == ARMABIInfo::AAPCS) 3288 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3289 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 3290 return ABIArgInfo::getIndirect(0, /*ByVal=*/true, 3291 /*Realign=*/TyAlign > ABIAlign); 3292 } 3293 3294 // Otherwise, pass by coercing to a structure of the appropriate size. 3295 llvm::Type* ElemTy; 3296 unsigned SizeRegs; 3297 // FIXME: Try to match the types of the arguments more accurately where 3298 // we can. 3299 if (getContext().getTypeAlign(Ty) <= 32) { 3300 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3301 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3302 } else { 3303 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3304 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3305 } 3306 3307 llvm::Type *STy = 3308 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3309 return ABIArgInfo::getDirect(STy); 3310} 3311 3312static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3313 llvm::LLVMContext &VMContext) { 3314 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3315 // is called integer-like if its size is less than or equal to one word, and 3316 // the offset of each of its addressable sub-fields is zero. 3317 3318 uint64_t Size = Context.getTypeSize(Ty); 3319 3320 // Check that the type fits in a word. 3321 if (Size > 32) 3322 return false; 3323 3324 // FIXME: Handle vector types! 3325 if (Ty->isVectorType()) 3326 return false; 3327 3328 // Float types are never treated as "integer like". 3329 if (Ty->isRealFloatingType()) 3330 return false; 3331 3332 // If this is a builtin or pointer type then it is ok. 3333 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3334 return true; 3335 3336 // Small complex integer types are "integer like". 3337 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3338 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3339 3340 // Single element and zero sized arrays should be allowed, by the definition 3341 // above, but they are not. 3342 3343 // Otherwise, it must be a record type. 3344 const RecordType *RT = Ty->getAs<RecordType>(); 3345 if (!RT) return false; 3346 3347 // Ignore records with flexible arrays. 3348 const RecordDecl *RD = RT->getDecl(); 3349 if (RD->hasFlexibleArrayMember()) 3350 return false; 3351 3352 // Check that all sub-fields are at offset 0, and are themselves "integer 3353 // like". 3354 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3355 3356 bool HadField = false; 3357 unsigned idx = 0; 3358 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3359 i != e; ++i, ++idx) { 3360 const FieldDecl *FD = *i; 3361 3362 // Bit-fields are not addressable, we only need to verify they are "integer 3363 // like". We still have to disallow a subsequent non-bitfield, for example: 3364 // struct { int : 0; int x } 3365 // is non-integer like according to gcc. 3366 if (FD->isBitField()) { 3367 if (!RD->isUnion()) 3368 HadField = true; 3369 3370 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3371 return false; 3372 3373 continue; 3374 } 3375 3376 // Check if this field is at offset 0. 3377 if (Layout.getFieldOffset(idx) != 0) 3378 return false; 3379 3380 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3381 return false; 3382 3383 // Only allow at most one field in a structure. This doesn't match the 3384 // wording above, but follows gcc in situations with a field following an 3385 // empty structure. 3386 if (!RD->isUnion()) { 3387 if (HadField) 3388 return false; 3389 3390 HadField = true; 3391 } 3392 } 3393 3394 return true; 3395} 3396 3397ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3398 if (RetTy->isVoidType()) 3399 return ABIArgInfo::getIgnore(); 3400 3401 // Large vector types should be returned via memory. 3402 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3403 return ABIArgInfo::getIndirect(0); 3404 3405 if (!isAggregateTypeForABI(RetTy)) { 3406 // Treat an enum type as its underlying type. 3407 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3408 RetTy = EnumTy->getDecl()->getIntegerType(); 3409 3410 return (RetTy->isPromotableIntegerType() ? 3411 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3412 } 3413 3414 // Structures with either a non-trivial destructor or a non-trivial 3415 // copy constructor are always indirect. 3416 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3417 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3418 3419 // Are we following APCS? 3420 if (getABIKind() == APCS) { 3421 if (isEmptyRecord(getContext(), RetTy, false)) 3422 return ABIArgInfo::getIgnore(); 3423 3424 // Complex types are all returned as packed integers. 3425 // 3426 // FIXME: Consider using 2 x vector types if the back end handles them 3427 // correctly. 3428 if (RetTy->isAnyComplexType()) 3429 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3430 getContext().getTypeSize(RetTy))); 3431 3432 // Integer like structures are returned in r0. 3433 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3434 // Return in the smallest viable integer type. 3435 uint64_t Size = getContext().getTypeSize(RetTy); 3436 if (Size <= 8) 3437 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3438 if (Size <= 16) 3439 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3440 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3441 } 3442 3443 // Otherwise return in memory. 3444 return ABIArgInfo::getIndirect(0); 3445 } 3446 3447 // Otherwise this is an AAPCS variant. 3448 3449 if (isEmptyRecord(getContext(), RetTy, true)) 3450 return ABIArgInfo::getIgnore(); 3451 3452 // Check for homogeneous aggregates with AAPCS-VFP. 3453 if (getABIKind() == AAPCS_VFP) { 3454 const Type *Base = 0; 3455 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3456 assert(Base && "Base class should be set for homogeneous aggregate"); 3457 // Homogeneous Aggregates are returned directly. 3458 return ABIArgInfo::getDirect(); 3459 } 3460 } 3461 3462 // Aggregates <= 4 bytes are returned in r0; other aggregates 3463 // are returned indirectly. 3464 uint64_t Size = getContext().getTypeSize(RetTy); 3465 if (Size <= 32) { 3466 // Return in the smallest viable integer type. 3467 if (Size <= 8) 3468 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3469 if (Size <= 16) 3470 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3471 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3472 } 3473 3474 return ABIArgInfo::getIndirect(0); 3475} 3476 3477/// isIllegalVector - check whether Ty is an illegal vector type. 3478bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3479 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3480 // Check whether VT is legal. 3481 unsigned NumElements = VT->getNumElements(); 3482 uint64_t Size = getContext().getTypeSize(VT); 3483 // NumElements should be power of 2. 3484 if ((NumElements & (NumElements - 1)) != 0) 3485 return true; 3486 // Size should be greater than 32 bits. 3487 return Size <= 32; 3488 } 3489 return false; 3490} 3491 3492llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3493 CodeGenFunction &CGF) const { 3494 llvm::Type *BP = CGF.Int8PtrTy; 3495 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3496 3497 CGBuilderTy &Builder = CGF.Builder; 3498 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3499 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3500 3501 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3502 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3503 bool IsIndirect = false; 3504 3505 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3506 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3507 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3508 getABIKind() == ARMABIInfo::AAPCS) 3509 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3510 else 3511 TyAlign = 4; 3512 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3513 if (isIllegalVectorType(Ty) && Size > 16) { 3514 IsIndirect = true; 3515 Size = 4; 3516 TyAlign = 4; 3517 } 3518 3519 // Handle address alignment for ABI alignment > 4 bytes. 3520 if (TyAlign > 4) { 3521 assert((TyAlign & (TyAlign - 1)) == 0 && 3522 "Alignment is not power of 2!"); 3523 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3524 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3525 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3526 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3527 } 3528 3529 uint64_t Offset = 3530 llvm::RoundUpToAlignment(Size, 4); 3531 llvm::Value *NextAddr = 3532 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3533 "ap.next"); 3534 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3535 3536 if (IsIndirect) 3537 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3538 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3539 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3540 // may not be correctly aligned for the vector type. We create an aligned 3541 // temporary space and copy the content over from ap.cur to the temporary 3542 // space. This is necessary if the natural alignment of the type is greater 3543 // than the ABI alignment. 3544 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3545 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3546 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3547 "var.align"); 3548 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3549 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3550 Builder.CreateMemCpy(Dst, Src, 3551 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3552 TyAlign, false); 3553 Addr = AlignedTemp; //The content is in aligned location. 3554 } 3555 llvm::Type *PTy = 3556 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3557 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3558 3559 return AddrTyped; 3560} 3561 3562namespace { 3563 3564class NaClARMABIInfo : public ABIInfo { 3565 public: 3566 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3567 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3568 virtual void computeInfo(CGFunctionInfo &FI) const; 3569 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3570 CodeGenFunction &CGF) const; 3571 private: 3572 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3573 ARMABIInfo NInfo; // Used for everything else. 3574}; 3575 3576class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3577 public: 3578 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3579 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3580}; 3581 3582} 3583 3584void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3585 if (FI.getASTCallingConvention() == CC_PnaclCall) 3586 PInfo.computeInfo(FI); 3587 else 3588 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3589} 3590 3591llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3592 CodeGenFunction &CGF) const { 3593 // Always use the native convention; calling pnacl-style varargs functions 3594 // is unsupported. 3595 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3596} 3597 3598//===----------------------------------------------------------------------===// 3599// AArch64 ABI Implementation 3600//===----------------------------------------------------------------------===// 3601 3602namespace { 3603 3604class AArch64ABIInfo : public ABIInfo { 3605public: 3606 AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3607 3608private: 3609 // The AArch64 PCS is explicit about return types and argument types being 3610 // handled identically, so we don't need to draw a distinction between 3611 // Argument and Return classification. 3612 ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs, 3613 int &FreeVFPRegs) const; 3614 3615 ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt, 3616 llvm::Type *DirectTy = 0) const; 3617 3618 virtual void computeInfo(CGFunctionInfo &FI) const; 3619 3620 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3621 CodeGenFunction &CGF) const; 3622}; 3623 3624class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 3625public: 3626 AArch64TargetCodeGenInfo(CodeGenTypes &CGT) 3627 :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {} 3628 3629 const AArch64ABIInfo &getABIInfo() const { 3630 return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 3631 } 3632 3633 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3634 return 31; 3635 } 3636 3637 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3638 llvm::Value *Address) const { 3639 // 0-31 are x0-x30 and sp: 8 bytes each 3640 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 3641 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31); 3642 3643 // 64-95 are v0-v31: 16 bytes each 3644 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 3645 AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95); 3646 3647 return false; 3648 } 3649 3650}; 3651 3652} 3653 3654void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3655 int FreeIntRegs = 8, FreeVFPRegs = 8; 3656 3657 FI.getReturnInfo() = classifyGenericType(FI.getReturnType(), 3658 FreeIntRegs, FreeVFPRegs); 3659 3660 FreeIntRegs = FreeVFPRegs = 8; 3661 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3662 it != ie; ++it) { 3663 it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs); 3664 3665 } 3666} 3667 3668ABIArgInfo 3669AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, 3670 bool IsInt, llvm::Type *DirectTy) const { 3671 if (FreeRegs >= RegsNeeded) { 3672 FreeRegs -= RegsNeeded; 3673 return ABIArgInfo::getDirect(DirectTy); 3674 } 3675 3676 llvm::Type *Padding = 0; 3677 3678 // We need padding so that later arguments don't get filled in anyway. That 3679 // wouldn't happen if only ByVal arguments followed in the same category, but 3680 // a large structure will simply seem to be a pointer as far as LLVM is 3681 // concerned. 3682 if (FreeRegs > 0) { 3683 if (IsInt) 3684 Padding = llvm::Type::getInt64Ty(getVMContext()); 3685 else 3686 Padding = llvm::Type::getFloatTy(getVMContext()); 3687 3688 // Either [N x i64] or [N x float]. 3689 Padding = llvm::ArrayType::get(Padding, FreeRegs); 3690 FreeRegs = 0; 3691 } 3692 3693 return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8, 3694 /*IsByVal=*/ true, /*Realign=*/ false, 3695 Padding); 3696} 3697 3698 3699ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty, 3700 int &FreeIntRegs, 3701 int &FreeVFPRegs) const { 3702 // Can only occurs for return, but harmless otherwise. 3703 if (Ty->isVoidType()) 3704 return ABIArgInfo::getIgnore(); 3705 3706 // Large vector types should be returned via memory. There's no such concept 3707 // in the ABI, but they'd be over 16 bytes anyway so no matter how they're 3708 // classified they'd go into memory (see B.3). 3709 if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) { 3710 if (FreeIntRegs > 0) 3711 --FreeIntRegs; 3712 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3713 } 3714 3715 // All non-aggregate LLVM types have a concrete ABI representation so they can 3716 // be passed directly. After this block we're guaranteed to be in a 3717 // complicated case. 3718 if (!isAggregateTypeForABI(Ty)) { 3719 // Treat an enum type as its underlying type. 3720 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3721 Ty = EnumTy->getDecl()->getIntegerType(); 3722 3723 if (Ty->isFloatingType() || Ty->isVectorType()) 3724 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false); 3725 3726 assert(getContext().getTypeSize(Ty) <= 128 && 3727 "unexpectedly large scalar type"); 3728 3729 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1; 3730 3731 // If the type may need padding registers to ensure "alignment", we must be 3732 // careful when this is accounted for. Increasing the effective size covers 3733 // all cases. 3734 if (getContext().getTypeAlign(Ty) == 128) 3735 RegsNeeded += FreeIntRegs % 2 != 0; 3736 3737 return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true); 3738 } 3739 3740 // Structures with either a non-trivial destructor or a non-trivial 3741 // copy constructor are always indirect. 3742 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3743 if (FreeIntRegs > 0) 3744 --FreeIntRegs; 3745 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3746 } 3747 3748 if (isEmptyRecord(getContext(), Ty, true)) { 3749 if (!getContext().getLangOpts().CPlusPlus) { 3750 // Empty structs outside C++ mode are a GNU extension, so no ABI can 3751 // possibly tell us what to do. It turns out (I believe) that GCC ignores 3752 // the object for parameter-passsing purposes. 3753 return ABIArgInfo::getIgnore(); 3754 } 3755 3756 // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode 3757 // description of va_arg in the PCS require that an empty struct does 3758 // actually occupy space for parameter-passing. I'm hoping for a 3759 // clarification giving an explicit paragraph to point to in future. 3760 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true, 3761 llvm::Type::getInt8Ty(getVMContext())); 3762 } 3763 3764 // Homogeneous vector aggregates get passed in registers or on the stack. 3765 const Type *Base = 0; 3766 uint64_t NumMembers = 0; 3767 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) { 3768 assert(Base && "Base class should be set for homogeneous aggregate"); 3769 // Homogeneous aggregates are passed and returned directly. 3770 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers, 3771 /*IsInt=*/ false); 3772 } 3773 3774 uint64_t Size = getContext().getTypeSize(Ty); 3775 if (Size <= 128) { 3776 // Small structs can use the same direct type whether they're in registers 3777 // or on the stack. 3778 llvm::Type *BaseTy; 3779 unsigned NumBases; 3780 int SizeInRegs = (Size + 63) / 64; 3781 3782 if (getContext().getTypeAlign(Ty) == 128) { 3783 BaseTy = llvm::Type::getIntNTy(getVMContext(), 128); 3784 NumBases = 1; 3785 3786 // If the type may need padding registers to ensure "alignment", we must 3787 // be careful when this is accounted for. Increasing the effective size 3788 // covers all cases. 3789 SizeInRegs += FreeIntRegs % 2 != 0; 3790 } else { 3791 BaseTy = llvm::Type::getInt64Ty(getVMContext()); 3792 NumBases = SizeInRegs; 3793 } 3794 llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases); 3795 3796 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs, 3797 /*IsInt=*/ true, DirectTy); 3798 } 3799 3800 // If the aggregate is > 16 bytes, it's passed and returned indirectly. In 3801 // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere. 3802 --FreeIntRegs; 3803 return ABIArgInfo::getIndirect(0, /* byVal = */ false); 3804} 3805 3806llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3807 CodeGenFunction &CGF) const { 3808 // The AArch64 va_list type and handling is specified in the Procedure Call 3809 // Standard, section B.4: 3810 // 3811 // struct { 3812 // void *__stack; 3813 // void *__gr_top; 3814 // void *__vr_top; 3815 // int __gr_offs; 3816 // int __vr_offs; 3817 // }; 3818 3819 assert(!CGF.CGM.getDataLayout().isBigEndian() 3820 && "va_arg not implemented for big-endian AArch64"); 3821 3822 int FreeIntRegs = 8, FreeVFPRegs = 8; 3823 Ty = CGF.getContext().getCanonicalType(Ty); 3824 ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs); 3825 3826 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 3827 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3828 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 3829 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3830 3831 llvm::Value *reg_offs_p = 0, *reg_offs = 0; 3832 int reg_top_index; 3833 int RegSize; 3834 if (FreeIntRegs < 8) { 3835 assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs"); 3836 // 3 is the field number of __gr_offs 3837 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 3838 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 3839 reg_top_index = 1; // field number for __gr_top 3840 RegSize = 8 * (8 - FreeIntRegs); 3841 } else { 3842 assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs"); 3843 // 4 is the field number of __vr_offs. 3844 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 3845 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 3846 reg_top_index = 2; // field number for __vr_top 3847 RegSize = 16 * (8 - FreeVFPRegs); 3848 } 3849 3850 //======================================= 3851 // Find out where argument was passed 3852 //======================================= 3853 3854 // If reg_offs >= 0 we're already using the stack for this type of 3855 // argument. We don't want to keep updating reg_offs (in case it overflows, 3856 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 3857 // whatever they get). 3858 llvm::Value *UsingStack = 0; 3859 UsingStack = CGF.Builder.CreateICmpSGE(reg_offs, 3860 llvm::ConstantInt::get(CGF.Int32Ty, 0)); 3861 3862 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 3863 3864 // Otherwise, at least some kind of argument could go in these registers, the 3865 // quesiton is whether this particular type is too big. 3866 CGF.EmitBlock(MaybeRegBlock); 3867 3868 // Integer arguments may need to correct register alignment (for example a 3869 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 3870 // align __gr_offs to calculate the potential address. 3871 if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) { 3872 int Align = getContext().getTypeAlign(Ty) / 8; 3873 3874 reg_offs = CGF.Builder.CreateAdd(reg_offs, 3875 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 3876 "align_regoffs"); 3877 reg_offs = CGF.Builder.CreateAnd(reg_offs, 3878 llvm::ConstantInt::get(CGF.Int32Ty, -Align), 3879 "aligned_regoffs"); 3880 } 3881 3882 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 3883 llvm::Value *NewOffset = 0; 3884 NewOffset = CGF.Builder.CreateAdd(reg_offs, 3885 llvm::ConstantInt::get(CGF.Int32Ty, RegSize), 3886 "new_reg_offs"); 3887 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 3888 3889 // Now we're in a position to decide whether this argument really was in 3890 // registers or not. 3891 llvm::Value *InRegs = 0; 3892 InRegs = CGF.Builder.CreateICmpSLE(NewOffset, 3893 llvm::ConstantInt::get(CGF.Int32Ty, 0), 3894 "inreg"); 3895 3896 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 3897 3898 //======================================= 3899 // Argument was in registers 3900 //======================================= 3901 3902 // Now we emit the code for if the argument was originally passed in 3903 // registers. First start the appropriate block: 3904 CGF.EmitBlock(InRegBlock); 3905 3906 llvm::Value *reg_top_p = 0, *reg_top = 0; 3907 reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 3908 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 3909 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); 3910 llvm::Value *RegAddr = 0; 3911 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 3912 3913 if (!AI.isDirect()) { 3914 // If it's been passed indirectly (actually a struct), whatever we find from 3915 // stored registers or on the stack will actually be a struct **. 3916 MemTy = llvm::PointerType::getUnqual(MemTy); 3917 } 3918 3919 const Type *Base = 0; 3920 uint64_t NumMembers; 3921 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers) 3922 && NumMembers > 1) { 3923 // Homogeneous aggregates passed in registers will have their elements split 3924 // and stored 16-bytes apart regardless of size (they're notionally in qN, 3925 // qN+1, ...). We reload and store into a temporary local variable 3926 // contiguously. 3927 assert(AI.isDirect() && "Homogeneous aggregates should be passed directly"); 3928 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 3929 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 3930 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); 3931 3932 for (unsigned i = 0; i < NumMembers; ++i) { 3933 llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i); 3934 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); 3935 LoadAddr = CGF.Builder.CreateBitCast(LoadAddr, 3936 llvm::PointerType::getUnqual(BaseTy)); 3937 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); 3938 3939 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 3940 CGF.Builder.CreateStore(Elem, StoreAddr); 3941 } 3942 3943 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); 3944 } else { 3945 // Otherwise the object is contiguous in memory 3946 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); 3947 } 3948 3949 CGF.EmitBranch(ContBlock); 3950 3951 //======================================= 3952 // Argument was on the stack 3953 //======================================= 3954 CGF.EmitBlock(OnStackBlock); 3955 3956 llvm::Value *stack_p = 0, *OnStackAddr = 0; 3957 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 3958 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); 3959 3960 // Again, stack arguments may need realigmnent. In this case both integer and 3961 // floating-point ones might be affected. 3962 if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) { 3963 int Align = getContext().getTypeAlign(Ty) / 8; 3964 3965 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 3966 3967 OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr, 3968 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 3969 "align_stack"); 3970 OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr, 3971 llvm::ConstantInt::get(CGF.Int64Ty, -Align), 3972 "align_stack"); 3973 3974 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 3975 } 3976 3977 uint64_t StackSize; 3978 if (AI.isDirect()) 3979 StackSize = getContext().getTypeSize(Ty) / 8; 3980 else 3981 StackSize = 8; 3982 3983 // All stack slots are 8 bytes 3984 StackSize = llvm::RoundUpToAlignment(StackSize, 8); 3985 3986 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); 3987 llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, 3988 "new_stack"); 3989 3990 // Write the new value of __stack for the next call to va_arg 3991 CGF.Builder.CreateStore(NewStack, stack_p); 3992 3993 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); 3994 3995 CGF.EmitBranch(ContBlock); 3996 3997 //======================================= 3998 // Tidy up 3999 //======================================= 4000 CGF.EmitBlock(ContBlock); 4001 4002 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); 4003 ResAddr->addIncoming(RegAddr, InRegBlock); 4004 ResAddr->addIncoming(OnStackAddr, OnStackBlock); 4005 4006 if (AI.isDirect()) 4007 return ResAddr; 4008 4009 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); 4010} 4011 4012//===----------------------------------------------------------------------===// 4013// NVPTX ABI Implementation 4014//===----------------------------------------------------------------------===// 4015 4016namespace { 4017 4018class NVPTXABIInfo : public ABIInfo { 4019public: 4020 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4021 4022 ABIArgInfo classifyReturnType(QualType RetTy) const; 4023 ABIArgInfo classifyArgumentType(QualType Ty) const; 4024 4025 virtual void computeInfo(CGFunctionInfo &FI) const; 4026 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4027 CodeGenFunction &CFG) const; 4028}; 4029 4030class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 4031public: 4032 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 4033 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 4034 4035 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4036 CodeGen::CodeGenModule &M) const; 4037private: 4038 static void addKernelMetadata(llvm::Function *F); 4039}; 4040 4041ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 4042 if (RetTy->isVoidType()) 4043 return ABIArgInfo::getIgnore(); 4044 if (isAggregateTypeForABI(RetTy)) 4045 return ABIArgInfo::getIndirect(0); 4046 return ABIArgInfo::getDirect(); 4047} 4048 4049ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 4050 if (isAggregateTypeForABI(Ty)) 4051 return ABIArgInfo::getIndirect(0); 4052 4053 return ABIArgInfo::getDirect(); 4054} 4055 4056void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 4057 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4058 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4059 it != ie; ++it) 4060 it->info = classifyArgumentType(it->type); 4061 4062 // Always honor user-specified calling convention. 4063 if (FI.getCallingConvention() != llvm::CallingConv::C) 4064 return; 4065 4066 FI.setEffectiveCallingConvention(getRuntimeCC()); 4067} 4068 4069llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4070 CodeGenFunction &CFG) const { 4071 llvm_unreachable("NVPTX does not support varargs"); 4072} 4073 4074void NVPTXTargetCodeGenInfo:: 4075SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4076 CodeGen::CodeGenModule &M) const{ 4077 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4078 if (!FD) return; 4079 4080 llvm::Function *F = cast<llvm::Function>(GV); 4081 4082 // Perform special handling in OpenCL mode 4083 if (M.getLangOpts().OpenCL) { 4084 // Use OpenCL function attributes to check for kernel functions 4085 // By default, all functions are device functions 4086 if (FD->hasAttr<OpenCLKernelAttr>()) { 4087 // OpenCL __kernel functions get kernel metadata 4088 addKernelMetadata(F); 4089 // And kernel functions are not subject to inlining 4090 F->addFnAttr(llvm::Attribute::NoInline); 4091 } 4092 } 4093 4094 // Perform special handling in CUDA mode. 4095 if (M.getLangOpts().CUDA) { 4096 // CUDA __global__ functions get a kernel metadata entry. Since 4097 // __global__ functions cannot be called from the device, we do not 4098 // need to set the noinline attribute. 4099 if (FD->getAttr<CUDAGlobalAttr>()) 4100 addKernelMetadata(F); 4101 } 4102} 4103 4104void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) { 4105 llvm::Module *M = F->getParent(); 4106 llvm::LLVMContext &Ctx = M->getContext(); 4107 4108 // Get "nvvm.annotations" metadata node 4109 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 4110 4111 // Create !{<func-ref>, metadata !"kernel", i32 1} node 4112 llvm::SmallVector<llvm::Value *, 3> MDVals; 4113 MDVals.push_back(F); 4114 MDVals.push_back(llvm::MDString::get(Ctx, "kernel")); 4115 MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1)); 4116 4117 // Append metadata to nvvm.annotations 4118 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 4119} 4120 4121} 4122 4123//===----------------------------------------------------------------------===// 4124// MBlaze ABI Implementation 4125//===----------------------------------------------------------------------===// 4126 4127namespace { 4128 4129class MBlazeABIInfo : public ABIInfo { 4130public: 4131 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4132 4133 bool isPromotableIntegerType(QualType Ty) const; 4134 4135 ABIArgInfo classifyReturnType(QualType RetTy) const; 4136 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4137 4138 virtual void computeInfo(CGFunctionInfo &FI) const { 4139 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4140 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4141 it != ie; ++it) 4142 it->info = classifyArgumentType(it->type); 4143 } 4144 4145 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4146 CodeGenFunction &CGF) const; 4147}; 4148 4149class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 4150public: 4151 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 4152 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 4153 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4154 CodeGen::CodeGenModule &M) const; 4155}; 4156 4157} 4158 4159bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 4160 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 4161 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4162 switch (BT->getKind()) { 4163 case BuiltinType::Bool: 4164 case BuiltinType::Char_S: 4165 case BuiltinType::Char_U: 4166 case BuiltinType::SChar: 4167 case BuiltinType::UChar: 4168 case BuiltinType::Short: 4169 case BuiltinType::UShort: 4170 return true; 4171 default: 4172 return false; 4173 } 4174 return false; 4175} 4176 4177llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4178 CodeGenFunction &CGF) const { 4179 // FIXME: Implement 4180 return 0; 4181} 4182 4183 4184ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 4185 if (RetTy->isVoidType()) 4186 return ABIArgInfo::getIgnore(); 4187 if (isAggregateTypeForABI(RetTy)) 4188 return ABIArgInfo::getIndirect(0); 4189 4190 return (isPromotableIntegerType(RetTy) ? 4191 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4192} 4193 4194ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 4195 if (isAggregateTypeForABI(Ty)) 4196 return ABIArgInfo::getIndirect(0); 4197 4198 return (isPromotableIntegerType(Ty) ? 4199 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4200} 4201 4202void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4203 llvm::GlobalValue *GV, 4204 CodeGen::CodeGenModule &M) 4205 const { 4206 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4207 if (!FD) return; 4208 4209 llvm::CallingConv::ID CC = llvm::CallingConv::C; 4210 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 4211 CC = llvm::CallingConv::MBLAZE_INTR; 4212 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 4213 CC = llvm::CallingConv::MBLAZE_SVOL; 4214 4215 if (CC != llvm::CallingConv::C) { 4216 // Handle 'interrupt_handler' attribute: 4217 llvm::Function *F = cast<llvm::Function>(GV); 4218 4219 // Step 1: Set ISR calling convention. 4220 F->setCallingConv(CC); 4221 4222 // Step 2: Add attributes goodness. 4223 F->addFnAttr(llvm::Attribute::NoInline); 4224 } 4225 4226 // Step 3: Emit _interrupt_handler alias. 4227 if (CC == llvm::CallingConv::MBLAZE_INTR) 4228 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 4229 "_interrupt_handler", GV, &M.getModule()); 4230} 4231 4232 4233//===----------------------------------------------------------------------===// 4234// MSP430 ABI Implementation 4235//===----------------------------------------------------------------------===// 4236 4237namespace { 4238 4239class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 4240public: 4241 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 4242 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 4243 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4244 CodeGen::CodeGenModule &M) const; 4245}; 4246 4247} 4248 4249void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4250 llvm::GlobalValue *GV, 4251 CodeGen::CodeGenModule &M) const { 4252 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 4253 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 4254 // Handle 'interrupt' attribute: 4255 llvm::Function *F = cast<llvm::Function>(GV); 4256 4257 // Step 1: Set ISR calling convention. 4258 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 4259 4260 // Step 2: Add attributes goodness. 4261 F->addFnAttr(llvm::Attribute::NoInline); 4262 4263 // Step 3: Emit ISR vector alias. 4264 unsigned Num = attr->getNumber() / 2; 4265 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 4266 "__isr_" + Twine(Num), 4267 GV, &M.getModule()); 4268 } 4269 } 4270} 4271 4272//===----------------------------------------------------------------------===// 4273// MIPS ABI Implementation. This works for both little-endian and 4274// big-endian variants. 4275//===----------------------------------------------------------------------===// 4276 4277namespace { 4278class MipsABIInfo : public ABIInfo { 4279 bool IsO32; 4280 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 4281 void CoerceToIntArgs(uint64_t TySize, 4282 SmallVector<llvm::Type*, 8> &ArgList) const; 4283 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 4284 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 4285 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 4286public: 4287 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 4288 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 4289 StackAlignInBytes(IsO32 ? 8 : 16) {} 4290 4291 ABIArgInfo classifyReturnType(QualType RetTy) const; 4292 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 4293 virtual void computeInfo(CGFunctionInfo &FI) const; 4294 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4295 CodeGenFunction &CGF) const; 4296}; 4297 4298class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 4299 unsigned SizeOfUnwindException; 4300public: 4301 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 4302 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 4303 SizeOfUnwindException(IsO32 ? 24 : 32) {} 4304 4305 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 4306 return 29; 4307 } 4308 4309 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4310 CodeGen::CodeGenModule &CGM) const { 4311 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4312 if (!FD) return; 4313 llvm::Function *Fn = cast<llvm::Function>(GV); 4314 if (FD->hasAttr<Mips16Attr>()) { 4315 Fn->addFnAttr("mips16"); 4316 } 4317 else if (FD->hasAttr<NoMips16Attr>()) { 4318 Fn->addFnAttr("nomips16"); 4319 } 4320 } 4321 4322 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4323 llvm::Value *Address) const; 4324 4325 unsigned getSizeOfUnwindException() const { 4326 return SizeOfUnwindException; 4327 } 4328}; 4329} 4330 4331void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 4332 SmallVector<llvm::Type*, 8> &ArgList) const { 4333 llvm::IntegerType *IntTy = 4334 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 4335 4336 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 4337 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 4338 ArgList.push_back(IntTy); 4339 4340 // If necessary, add one more integer type to ArgList. 4341 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 4342 4343 if (R) 4344 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 4345} 4346 4347// In N32/64, an aligned double precision floating point field is passed in 4348// a register. 4349llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 4350 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 4351 4352 if (IsO32) { 4353 CoerceToIntArgs(TySize, ArgList); 4354 return llvm::StructType::get(getVMContext(), ArgList); 4355 } 4356 4357 if (Ty->isComplexType()) 4358 return CGT.ConvertType(Ty); 4359 4360 const RecordType *RT = Ty->getAs<RecordType>(); 4361 4362 // Unions/vectors are passed in integer registers. 4363 if (!RT || !RT->isStructureOrClassType()) { 4364 CoerceToIntArgs(TySize, ArgList); 4365 return llvm::StructType::get(getVMContext(), ArgList); 4366 } 4367 4368 const RecordDecl *RD = RT->getDecl(); 4369 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4370 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 4371 4372 uint64_t LastOffset = 0; 4373 unsigned idx = 0; 4374 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 4375 4376 // Iterate over fields in the struct/class and check if there are any aligned 4377 // double fields. 4378 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 4379 i != e; ++i, ++idx) { 4380 const QualType Ty = i->getType(); 4381 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 4382 4383 if (!BT || BT->getKind() != BuiltinType::Double) 4384 continue; 4385 4386 uint64_t Offset = Layout.getFieldOffset(idx); 4387 if (Offset % 64) // Ignore doubles that are not aligned. 4388 continue; 4389 4390 // Add ((Offset - LastOffset) / 64) args of type i64. 4391 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 4392 ArgList.push_back(I64); 4393 4394 // Add double type. 4395 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 4396 LastOffset = Offset + 64; 4397 } 4398 4399 CoerceToIntArgs(TySize - LastOffset, IntArgList); 4400 ArgList.append(IntArgList.begin(), IntArgList.end()); 4401 4402 return llvm::StructType::get(getVMContext(), ArgList); 4403} 4404 4405llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 4406 assert((Offset % MinABIStackAlignInBytes) == 0); 4407 4408 if ((Align - 1) & Offset) 4409 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 4410 4411 return 0; 4412} 4413 4414ABIArgInfo 4415MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 4416 uint64_t OrigOffset = Offset; 4417 uint64_t TySize = getContext().getTypeSize(Ty); 4418 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 4419 4420 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 4421 (uint64_t)StackAlignInBytes); 4422 Offset = llvm::RoundUpToAlignment(Offset, Align); 4423 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 4424 4425 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 4426 // Ignore empty aggregates. 4427 if (TySize == 0) 4428 return ABIArgInfo::getIgnore(); 4429 4430 // Records with non trivial destructors/constructors should not be passed 4431 // by value. 4432 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 4433 Offset = OrigOffset + MinABIStackAlignInBytes; 4434 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4435 } 4436 4437 // If we have reached here, aggregates are passed directly by coercing to 4438 // another structure type. Padding is inserted if the offset of the 4439 // aggregate is unaligned. 4440 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 4441 getPaddingType(Align, OrigOffset)); 4442 } 4443 4444 // Treat an enum type as its underlying type. 4445 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4446 Ty = EnumTy->getDecl()->getIntegerType(); 4447 4448 if (Ty->isPromotableIntegerType()) 4449 return ABIArgInfo::getExtend(); 4450 4451 return ABIArgInfo::getDirect(0, 0, 4452 IsO32 ? 0 : getPaddingType(Align, OrigOffset)); 4453} 4454 4455llvm::Type* 4456MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 4457 const RecordType *RT = RetTy->getAs<RecordType>(); 4458 SmallVector<llvm::Type*, 8> RTList; 4459 4460 if (RT && RT->isStructureOrClassType()) { 4461 const RecordDecl *RD = RT->getDecl(); 4462 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4463 unsigned FieldCnt = Layout.getFieldCount(); 4464 4465 // N32/64 returns struct/classes in floating point registers if the 4466 // following conditions are met: 4467 // 1. The size of the struct/class is no larger than 128-bit. 4468 // 2. The struct/class has one or two fields all of which are floating 4469 // point types. 4470 // 3. The offset of the first field is zero (this follows what gcc does). 4471 // 4472 // Any other composite results are returned in integer registers. 4473 // 4474 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 4475 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 4476 for (; b != e; ++b) { 4477 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 4478 4479 if (!BT || !BT->isFloatingPoint()) 4480 break; 4481 4482 RTList.push_back(CGT.ConvertType(b->getType())); 4483 } 4484 4485 if (b == e) 4486 return llvm::StructType::get(getVMContext(), RTList, 4487 RD->hasAttr<PackedAttr>()); 4488 4489 RTList.clear(); 4490 } 4491 } 4492 4493 CoerceToIntArgs(Size, RTList); 4494 return llvm::StructType::get(getVMContext(), RTList); 4495} 4496 4497ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 4498 uint64_t Size = getContext().getTypeSize(RetTy); 4499 4500 if (RetTy->isVoidType() || Size == 0) 4501 return ABIArgInfo::getIgnore(); 4502 4503 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 4504 if (Size <= 128) { 4505 if (RetTy->isAnyComplexType()) 4506 return ABIArgInfo::getDirect(); 4507 4508 // O32 returns integer vectors in registers. 4509 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 4510 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 4511 4512 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4513 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 4514 } 4515 4516 return ABIArgInfo::getIndirect(0); 4517 } 4518 4519 // Treat an enum type as its underlying type. 4520 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4521 RetTy = EnumTy->getDecl()->getIntegerType(); 4522 4523 return (RetTy->isPromotableIntegerType() ? 4524 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4525} 4526 4527void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 4528 ABIArgInfo &RetInfo = FI.getReturnInfo(); 4529 RetInfo = classifyReturnType(FI.getReturnType()); 4530 4531 // Check if a pointer to an aggregate is passed as a hidden argument. 4532 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 4533 4534 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4535 it != ie; ++it) 4536 it->info = classifyArgumentType(it->type, Offset); 4537} 4538 4539llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4540 CodeGenFunction &CGF) const { 4541 llvm::Type *BP = CGF.Int8PtrTy; 4542 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4543 4544 CGBuilderTy &Builder = CGF.Builder; 4545 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4546 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4547 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 4548 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4549 llvm::Value *AddrTyped; 4550 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 4551 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 4552 4553 if (TypeAlign > MinABIStackAlignInBytes) { 4554 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 4555 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 4556 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 4557 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 4558 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 4559 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 4560 } 4561 else 4562 AddrTyped = Builder.CreateBitCast(Addr, PTy); 4563 4564 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 4565 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 4566 uint64_t Offset = 4567 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 4568 llvm::Value *NextAddr = 4569 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 4570 "ap.next"); 4571 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4572 4573 return AddrTyped; 4574} 4575 4576bool 4577MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4578 llvm::Value *Address) const { 4579 // This information comes from gcc's implementation, which seems to 4580 // as canonical as it gets. 4581 4582 // Everything on MIPS is 4 bytes. Double-precision FP registers 4583 // are aliased to pairs of single-precision FP registers. 4584 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4585 4586 // 0-31 are the general purpose registers, $0 - $31. 4587 // 32-63 are the floating-point registers, $f0 - $f31. 4588 // 64 and 65 are the multiply/divide registers, $hi and $lo. 4589 // 66 is the (notional, I think) register for signal-handler return. 4590 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 4591 4592 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 4593 // They are one bit wide and ignored here. 4594 4595 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 4596 // (coprocessor 1 is the FP unit) 4597 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 4598 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 4599 // 176-181 are the DSP accumulator registers. 4600 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 4601 return false; 4602} 4603 4604//===----------------------------------------------------------------------===// 4605// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 4606// Currently subclassed only to implement custom OpenCL C function attribute 4607// handling. 4608//===----------------------------------------------------------------------===// 4609 4610namespace { 4611 4612class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4613public: 4614 TCETargetCodeGenInfo(CodeGenTypes &CGT) 4615 : DefaultTargetCodeGenInfo(CGT) {} 4616 4617 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4618 CodeGen::CodeGenModule &M) const; 4619}; 4620 4621void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4622 llvm::GlobalValue *GV, 4623 CodeGen::CodeGenModule &M) const { 4624 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4625 if (!FD) return; 4626 4627 llvm::Function *F = cast<llvm::Function>(GV); 4628 4629 if (M.getLangOpts().OpenCL) { 4630 if (FD->hasAttr<OpenCLKernelAttr>()) { 4631 // OpenCL C Kernel functions are not subject to inlining 4632 F->addFnAttr(llvm::Attribute::NoInline); 4633 4634 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 4635 4636 // Convert the reqd_work_group_size() attributes to metadata. 4637 llvm::LLVMContext &Context = F->getContext(); 4638 llvm::NamedMDNode *OpenCLMetadata = 4639 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 4640 4641 SmallVector<llvm::Value*, 5> Operands; 4642 Operands.push_back(F); 4643 4644 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4645 llvm::APInt(32, 4646 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 4647 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4648 llvm::APInt(32, 4649 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 4650 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4651 llvm::APInt(32, 4652 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 4653 4654 // Add a boolean constant operand for "required" (true) or "hint" (false) 4655 // for implementing the work_group_size_hint attr later. Currently 4656 // always true as the hint is not yet implemented. 4657 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 4658 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 4659 } 4660 } 4661 } 4662} 4663 4664} 4665 4666//===----------------------------------------------------------------------===// 4667// Hexagon ABI Implementation 4668//===----------------------------------------------------------------------===// 4669 4670namespace { 4671 4672class HexagonABIInfo : public ABIInfo { 4673 4674 4675public: 4676 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4677 4678private: 4679 4680 ABIArgInfo classifyReturnType(QualType RetTy) const; 4681 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4682 4683 virtual void computeInfo(CGFunctionInfo &FI) const; 4684 4685 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4686 CodeGenFunction &CGF) const; 4687}; 4688 4689class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4690public: 4691 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4692 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4693 4694 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4695 return 29; 4696 } 4697}; 4698 4699} 4700 4701void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4702 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4703 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4704 it != ie; ++it) 4705 it->info = classifyArgumentType(it->type); 4706} 4707 4708ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4709 if (!isAggregateTypeForABI(Ty)) { 4710 // Treat an enum type as its underlying type. 4711 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4712 Ty = EnumTy->getDecl()->getIntegerType(); 4713 4714 return (Ty->isPromotableIntegerType() ? 4715 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4716 } 4717 4718 // Ignore empty records. 4719 if (isEmptyRecord(getContext(), Ty, true)) 4720 return ABIArgInfo::getIgnore(); 4721 4722 // Structures with either a non-trivial destructor or a non-trivial 4723 // copy constructor are always indirect. 4724 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4725 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4726 4727 uint64_t Size = getContext().getTypeSize(Ty); 4728 if (Size > 64) 4729 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4730 // Pass in the smallest viable integer type. 4731 else if (Size > 32) 4732 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4733 else if (Size > 16) 4734 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4735 else if (Size > 8) 4736 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4737 else 4738 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4739} 4740 4741ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4742 if (RetTy->isVoidType()) 4743 return ABIArgInfo::getIgnore(); 4744 4745 // Large vector types should be returned via memory. 4746 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4747 return ABIArgInfo::getIndirect(0); 4748 4749 if (!isAggregateTypeForABI(RetTy)) { 4750 // Treat an enum type as its underlying type. 4751 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4752 RetTy = EnumTy->getDecl()->getIntegerType(); 4753 4754 return (RetTy->isPromotableIntegerType() ? 4755 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4756 } 4757 4758 // Structures with either a non-trivial destructor or a non-trivial 4759 // copy constructor are always indirect. 4760 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4761 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4762 4763 if (isEmptyRecord(getContext(), RetTy, true)) 4764 return ABIArgInfo::getIgnore(); 4765 4766 // Aggregates <= 8 bytes are returned in r0; other aggregates 4767 // are returned indirectly. 4768 uint64_t Size = getContext().getTypeSize(RetTy); 4769 if (Size <= 64) { 4770 // Return in the smallest viable integer type. 4771 if (Size <= 8) 4772 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4773 if (Size <= 16) 4774 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4775 if (Size <= 32) 4776 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4777 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4778 } 4779 4780 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4781} 4782 4783llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4784 CodeGenFunction &CGF) const { 4785 // FIXME: Need to handle alignment 4786 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4787 4788 CGBuilderTy &Builder = CGF.Builder; 4789 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4790 "ap"); 4791 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4792 llvm::Type *PTy = 4793 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4794 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4795 4796 uint64_t Offset = 4797 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4798 llvm::Value *NextAddr = 4799 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4800 "ap.next"); 4801 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4802 4803 return AddrTyped; 4804} 4805 4806 4807const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4808 if (TheTargetCodeGenInfo) 4809 return *TheTargetCodeGenInfo; 4810 4811 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4812 switch (Triple.getArch()) { 4813 default: 4814 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4815 4816 case llvm::Triple::le32: 4817 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4818 case llvm::Triple::mips: 4819 case llvm::Triple::mipsel: 4820 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4821 4822 case llvm::Triple::mips64: 4823 case llvm::Triple::mips64el: 4824 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4825 4826 case llvm::Triple::aarch64: 4827 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types)); 4828 4829 case llvm::Triple::arm: 4830 case llvm::Triple::thumb: 4831 { 4832 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4833 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4834 Kind = ARMABIInfo::APCS; 4835 else if (CodeGenOpts.FloatABI == "hard" || 4836 (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF)) 4837 Kind = ARMABIInfo::AAPCS_VFP; 4838 4839 switch (Triple.getOS()) { 4840 case llvm::Triple::NaCl: 4841 return *(TheTargetCodeGenInfo = 4842 new NaClARMTargetCodeGenInfo(Types, Kind)); 4843 default: 4844 return *(TheTargetCodeGenInfo = 4845 new ARMTargetCodeGenInfo(Types, Kind)); 4846 } 4847 } 4848 4849 case llvm::Triple::ppc: 4850 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4851 case llvm::Triple::ppc64: 4852 if (Triple.isOSBinFormatELF()) 4853 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4854 else 4855 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4856 4857 case llvm::Triple::nvptx: 4858 case llvm::Triple::nvptx64: 4859 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4860 4861 case llvm::Triple::mblaze: 4862 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4863 4864 case llvm::Triple::msp430: 4865 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4866 4867 case llvm::Triple::tce: 4868 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4869 4870 case llvm::Triple::x86: { 4871 if (Triple.isOSDarwin()) 4872 return *(TheTargetCodeGenInfo = 4873 new X86_32TargetCodeGenInfo(Types, true, true, false, 4874 CodeGenOpts.NumRegisterParameters)); 4875 4876 switch (Triple.getOS()) { 4877 case llvm::Triple::Cygwin: 4878 case llvm::Triple::MinGW32: 4879 case llvm::Triple::AuroraUX: 4880 case llvm::Triple::DragonFly: 4881 case llvm::Triple::FreeBSD: 4882 case llvm::Triple::OpenBSD: 4883 case llvm::Triple::Bitrig: 4884 return *(TheTargetCodeGenInfo = 4885 new X86_32TargetCodeGenInfo(Types, false, true, false, 4886 CodeGenOpts.NumRegisterParameters)); 4887 4888 case llvm::Triple::Win32: 4889 return *(TheTargetCodeGenInfo = 4890 new X86_32TargetCodeGenInfo(Types, false, true, true, 4891 CodeGenOpts.NumRegisterParameters)); 4892 4893 default: 4894 return *(TheTargetCodeGenInfo = 4895 new X86_32TargetCodeGenInfo(Types, false, false, false, 4896 CodeGenOpts.NumRegisterParameters)); 4897 } 4898 } 4899 4900 case llvm::Triple::x86_64: { 4901 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4902 4903 switch (Triple.getOS()) { 4904 case llvm::Triple::Win32: 4905 case llvm::Triple::MinGW32: 4906 case llvm::Triple::Cygwin: 4907 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4908 case llvm::Triple::NaCl: 4909 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4910 default: 4911 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4912 HasAVX)); 4913 } 4914 } 4915 case llvm::Triple::hexagon: 4916 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4917 } 4918} 4919