TargetInfo.cpp revision 0d5833921cc728bc1d2e45fbaf7b3e11cddbf99d
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "clang/AST/RecordLayout.h" 19#include "clang/Frontend/CodeGenOptions.h" 20#include "llvm/Type.h" 21#include "llvm/DataLayout.h" 22#include "llvm/ADT/Triple.h" 23#include "llvm/Support/raw_ostream.h" 24using namespace clang; 25using namespace CodeGen; 26 27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37} 38 39static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42} 43 44ABIInfo::~ABIInfo() {} 45 46ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48} 49 50llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52} 53 54const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56} 57 58 59void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86} 87 88TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90// If someone can figure out a general rule for this, that would be great. 91// It's probably just doomed to be platform-dependent, though. 92unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99} 100 101bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108} 109 110static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112/// isEmptyField - Return true iff a the field is "empty", that is it 113/// is an unnamed bit-field or an (array of) empty record(s). 114static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142} 143 144/// isEmptyRecord - Return true iff a structure contains only empty 145/// fields. Note that a structure with a flexible array member is not 146/// considered empty. 147static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167} 168 169/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170/// a non-trivial destructor or a non-trivial copy constructor. 171static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177} 178 179/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180/// a record type with either a non-trivial destructor or a non-trivial copy 181/// constructor. 182static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188} 189 190/// isSingleElementStruct - Determine if a structure is a "single 191/// element struct", i.e. it has exactly one non-empty field or 192/// exactly one field which is itself a single element 193/// struct. Structures with flexible array members are never 194/// considered single element structs. 195/// 196/// \return The field declaration for the single non-empty field, if 197/// it exists. 198static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266} 267 268static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276} 277 278/// canExpandIndirectArgument - Test whether an argument type which is to be 279/// passed indirectly (on the stack) would have the equivalent layout if it was 280/// expanded into separate arguments. If so, we prefer to do the latter to avoid 281/// inhibiting optimizations. 282/// 283// FIXME: This predicate is missing many cases, currently it just follows 284// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285// should probably make this smarter, or better yet make the LLVM backend 286// capable of handling it. 287static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323} 324 325namespace { 326/// DefaultABIInfo - The default implementation for ABI specific 327/// details. This implementation provides information which results in 328/// self-consistent and sensible LLVM IR generation, but does not 329/// conform to any particular ABI. 330class DefaultABIInfo : public ABIInfo { 331public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346}; 347 348class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352}; 353 354llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357} 358 359ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375} 376 377ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390} 391 392//===----------------------------------------------------------------------===// 393// le32/PNaCl bitcode ABI Implementation 394//===----------------------------------------------------------------------===// 395 396class PNaClABIInfo : public ABIInfo { 397 public: 398 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 399 400 ABIArgInfo classifyReturnType(QualType RetTy) const; 401 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 402 403 virtual void computeInfo(CGFunctionInfo &FI) const; 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406}; 407 408class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 409 public: 410 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 411 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 412}; 413 414void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 415 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 416 417 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 418 419 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 420 it != ie; ++it) 421 it->info = classifyArgumentType(it->type, FreeRegs); 422 } 423 424llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 425 CodeGenFunction &CGF) const { 426 return 0; 427} 428 429ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 430 unsigned &FreeRegs) const { 431 if (isAggregateTypeForABI(Ty)) { 432 // Records with non trivial destructors/constructors should not be passed 433 // by value. 434 FreeRegs = 0; 435 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 436 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 437 438 return ABIArgInfo::getIndirect(0); 439 } 440 441 // Treat an enum type as its underlying type. 442 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 443 Ty = EnumTy->getDecl()->getIntegerType(); 444 445 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 446 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 447 448 // Regparm regs hold 32 bits. 449 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 450 if (SizeInRegs == 0) return BaseInfo; 451 if (SizeInRegs > FreeRegs) { 452 FreeRegs = 0; 453 return BaseInfo; 454 } 455 FreeRegs -= SizeInRegs; 456 return BaseInfo.isDirect() ? 457 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 458 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 459} 460 461ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 462 if (RetTy->isVoidType()) 463 return ABIArgInfo::getIgnore(); 464 465 if (isAggregateTypeForABI(RetTy)) 466 return ABIArgInfo::getIndirect(0); 467 468 // Treat an enum type as its underlying type. 469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 470 RetTy = EnumTy->getDecl()->getIntegerType(); 471 472 return (RetTy->isPromotableIntegerType() ? 473 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 474} 475 476/// UseX86_MMXType - Return true if this is an MMX type that should use the 477/// special x86_mmx type. 478bool UseX86_MMXType(llvm::Type *IRType) { 479 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 480 // special x86_mmx type. 481 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 482 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 483 IRType->getScalarSizeInBits() != 64; 484} 485 486static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 487 StringRef Constraint, 488 llvm::Type* Ty) { 489 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 490 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 491 return Ty; 492} 493 494//===----------------------------------------------------------------------===// 495// X86-32 ABI Implementation 496//===----------------------------------------------------------------------===// 497 498/// X86_32ABIInfo - The X86-32 ABI information. 499class X86_32ABIInfo : public ABIInfo { 500 enum Class { 501 Integer, 502 Float 503 }; 504 505 static const unsigned MinABIStackAlignInBytes = 4; 506 507 bool IsDarwinVectorABI; 508 bool IsSmallStructInRegABI; 509 bool IsMMXDisabled; 510 bool IsWin32FloatStructABI; 511 unsigned DefaultNumRegisterParameters; 512 513 static bool isRegisterSize(unsigned Size) { 514 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 515 } 516 517 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 518 unsigned callingConvention); 519 520 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 521 /// such that the argument will be passed in memory. 522 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 523 524 /// \brief Return the alignment to use for the given type on the stack. 525 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 526 527 Class classify(QualType Ty) const; 528 ABIArgInfo classifyReturnType(QualType RetTy, 529 unsigned callingConvention) const; 530 ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy, 531 unsigned &FreeRegs) const; 532 ABIArgInfo classifyArgumentType(QualType RetTy) const; 533 534public: 535 536 virtual void computeInfo(CGFunctionInfo &FI) const; 537 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 538 CodeGenFunction &CGF) const; 539 540 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 541 unsigned r) 542 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 543 IsMMXDisabled(m), IsWin32FloatStructABI(w), 544 DefaultNumRegisterParameters(r) {} 545}; 546 547class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 548public: 549 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 550 bool d, bool p, bool m, bool w, unsigned r) 551 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 552 553 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 554 CodeGen::CodeGenModule &CGM) const; 555 556 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 557 // Darwin uses different dwarf register numbers for EH. 558 if (CGM.isTargetDarwin()) return 5; 559 560 return 4; 561 } 562 563 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 564 llvm::Value *Address) const; 565 566 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 567 StringRef Constraint, 568 llvm::Type* Ty) const { 569 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 570 } 571 572}; 573 574} 575 576/// shouldReturnTypeInRegister - Determine if the given type should be 577/// passed in a register (for the Darwin ABI). 578bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 579 ASTContext &Context, 580 unsigned callingConvention) { 581 uint64_t Size = Context.getTypeSize(Ty); 582 583 // Type must be register sized. 584 if (!isRegisterSize(Size)) 585 return false; 586 587 if (Ty->isVectorType()) { 588 // 64- and 128- bit vectors inside structures are not returned in 589 // registers. 590 if (Size == 64 || Size == 128) 591 return false; 592 593 return true; 594 } 595 596 // If this is a builtin, pointer, enum, complex type, member pointer, or 597 // member function pointer it is ok. 598 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 599 Ty->isAnyComplexType() || Ty->isEnumeralType() || 600 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 601 return true; 602 603 // Arrays are treated like records. 604 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 605 return shouldReturnTypeInRegister(AT->getElementType(), Context, 606 callingConvention); 607 608 // Otherwise, it must be a record type. 609 const RecordType *RT = Ty->getAs<RecordType>(); 610 if (!RT) return false; 611 612 // FIXME: Traverse bases here too. 613 614 // For thiscall conventions, structures will never be returned in 615 // a register. This is for compatibility with the MSVC ABI 616 if (callingConvention == llvm::CallingConv::X86_ThisCall && 617 RT->isStructureType()) { 618 return false; 619 } 620 621 // Structure types are passed in register if all fields would be 622 // passed in a register. 623 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 624 e = RT->getDecl()->field_end(); i != e; ++i) { 625 const FieldDecl *FD = *i; 626 627 // Empty fields are ignored. 628 if (isEmptyField(Context, FD, true)) 629 continue; 630 631 // Check fields recursively. 632 if (!shouldReturnTypeInRegister(FD->getType(), Context, 633 callingConvention)) 634 return false; 635 } 636 return true; 637} 638 639ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 640 unsigned callingConvention) const { 641 if (RetTy->isVoidType()) 642 return ABIArgInfo::getIgnore(); 643 644 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 645 // On Darwin, some vectors are returned in registers. 646 if (IsDarwinVectorABI) { 647 uint64_t Size = getContext().getTypeSize(RetTy); 648 649 // 128-bit vectors are a special case; they are returned in 650 // registers and we need to make sure to pick a type the LLVM 651 // backend will like. 652 if (Size == 128) 653 return ABIArgInfo::getDirect(llvm::VectorType::get( 654 llvm::Type::getInt64Ty(getVMContext()), 2)); 655 656 // Always return in register if it fits in a general purpose 657 // register, or if it is 64 bits and has a single element. 658 if ((Size == 8 || Size == 16 || Size == 32) || 659 (Size == 64 && VT->getNumElements() == 1)) 660 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 661 Size)); 662 663 return ABIArgInfo::getIndirect(0); 664 } 665 666 return ABIArgInfo::getDirect(); 667 } 668 669 if (isAggregateTypeForABI(RetTy)) { 670 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 671 // Structures with either a non-trivial destructor or a non-trivial 672 // copy constructor are always indirect. 673 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 674 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 675 676 // Structures with flexible arrays are always indirect. 677 if (RT->getDecl()->hasFlexibleArrayMember()) 678 return ABIArgInfo::getIndirect(0); 679 } 680 681 // If specified, structs and unions are always indirect. 682 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 683 return ABIArgInfo::getIndirect(0); 684 685 // Small structures which are register sized are generally returned 686 // in a register. 687 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 688 callingConvention)) { 689 uint64_t Size = getContext().getTypeSize(RetTy); 690 691 // As a special-case, if the struct is a "single-element" struct, and 692 // the field is of type "float" or "double", return it in a 693 // floating-point register. (MSVC does not apply this special case.) 694 // We apply a similar transformation for pointer types to improve the 695 // quality of the generated IR. 696 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 697 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 698 || SeltTy->hasPointerRepresentation()) 699 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 700 701 // FIXME: We should be able to narrow this integer in cases with dead 702 // padding. 703 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 704 } 705 706 return ABIArgInfo::getIndirect(0); 707 } 708 709 // Treat an enum type as its underlying type. 710 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 711 RetTy = EnumTy->getDecl()->getIntegerType(); 712 713 return (RetTy->isPromotableIntegerType() ? 714 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 715} 716 717static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 718 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 719} 720 721static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 722 const RecordType *RT = Ty->getAs<RecordType>(); 723 if (!RT) 724 return 0; 725 const RecordDecl *RD = RT->getDecl(); 726 727 // If this is a C++ record, check the bases first. 728 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 729 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 730 e = CXXRD->bases_end(); i != e; ++i) 731 if (!isRecordWithSSEVectorType(Context, i->getType())) 732 return false; 733 734 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 735 i != e; ++i) { 736 QualType FT = i->getType(); 737 738 if (isSSEVectorType(Context, FT)) 739 return true; 740 741 if (isRecordWithSSEVectorType(Context, FT)) 742 return true; 743 } 744 745 return false; 746} 747 748unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 749 unsigned Align) const { 750 // Otherwise, if the alignment is less than or equal to the minimum ABI 751 // alignment, just use the default; the backend will handle this. 752 if (Align <= MinABIStackAlignInBytes) 753 return 0; // Use default alignment. 754 755 // On non-Darwin, the stack type alignment is always 4. 756 if (!IsDarwinVectorABI) { 757 // Set explicit alignment, since we may need to realign the top. 758 return MinABIStackAlignInBytes; 759 } 760 761 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 762 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 763 isRecordWithSSEVectorType(getContext(), Ty))) 764 return 16; 765 766 return MinABIStackAlignInBytes; 767} 768 769ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 770 if (!ByVal) 771 return ABIArgInfo::getIndirect(0, false); 772 773 // Compute the byval alignment. 774 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 775 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 776 if (StackAlign == 0) 777 return ABIArgInfo::getIndirect(4); 778 779 // If the stack alignment is less than the type alignment, realign the 780 // argument. 781 if (StackAlign < TypeAlign) 782 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 783 /*Realign=*/true); 784 785 return ABIArgInfo::getIndirect(StackAlign); 786} 787 788X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 789 const Type *T = isSingleElementStruct(Ty, getContext()); 790 if (!T) 791 T = Ty.getTypePtr(); 792 793 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 794 BuiltinType::Kind K = BT->getKind(); 795 if (K == BuiltinType::Float || K == BuiltinType::Double) 796 return Float; 797 } 798 return Integer; 799} 800 801ABIArgInfo 802X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty, 803 unsigned &FreeRegs) const { 804 // Common case first. 805 if (FreeRegs == 0) 806 return classifyArgumentType(Ty); 807 808 Class C = classify(Ty); 809 if (C == Float) 810 return classifyArgumentType(Ty); 811 812 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 813 if (SizeInRegs == 0) 814 return classifyArgumentType(Ty); 815 816 if (SizeInRegs > FreeRegs) { 817 FreeRegs = 0; 818 return classifyArgumentType(Ty); 819 } 820 assert(SizeInRegs >= 1 && SizeInRegs <= 3); 821 FreeRegs -= SizeInRegs; 822 823 // If it is a simple scalar, keep the type so that we produce a cleaner IR. 824 ABIArgInfo Foo = classifyArgumentType(Ty); 825 if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType()) 826 return ABIArgInfo::getDirectInReg(Foo.getCoerceToType()); 827 if (Foo.isExtend()) 828 return ABIArgInfo::getExtendInReg(Foo.getCoerceToType()); 829 830 llvm::LLVMContext &LLVMContext = getVMContext(); 831 llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext); 832 SmallVector<llvm::Type*, 3> Elements; 833 for (unsigned I = 0; I < SizeInRegs; ++I) 834 Elements.push_back(Int32); 835 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 836 return ABIArgInfo::getDirectInReg(Result); 837} 838 839ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 840 // FIXME: Set alignment on indirect arguments. 841 if (isAggregateTypeForABI(Ty)) { 842 // Structures with flexible arrays are always indirect. 843 if (const RecordType *RT = Ty->getAs<RecordType>()) { 844 // Structures with either a non-trivial destructor or a non-trivial 845 // copy constructor are always indirect. 846 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 847 return getIndirectResult(Ty, /*ByVal=*/false); 848 849 if (RT->getDecl()->hasFlexibleArrayMember()) 850 return getIndirectResult(Ty); 851 } 852 853 // Ignore empty structs/unions. 854 if (isEmptyRecord(getContext(), Ty, true)) 855 return ABIArgInfo::getIgnore(); 856 857 // Expand small (<= 128-bit) record types when we know that the stack layout 858 // of those arguments will match the struct. This is important because the 859 // LLVM backend isn't smart enough to remove byval, which inhibits many 860 // optimizations. 861 if (getContext().getTypeSize(Ty) <= 4*32 && 862 canExpandIndirectArgument(Ty, getContext())) 863 return ABIArgInfo::getExpand(); 864 865 return getIndirectResult(Ty); 866 } 867 868 if (const VectorType *VT = Ty->getAs<VectorType>()) { 869 // On Darwin, some vectors are passed in memory, we handle this by passing 870 // it as an i8/i16/i32/i64. 871 if (IsDarwinVectorABI) { 872 uint64_t Size = getContext().getTypeSize(Ty); 873 if ((Size == 8 || Size == 16 || Size == 32) || 874 (Size == 64 && VT->getNumElements() == 1)) 875 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 876 Size)); 877 } 878 879 llvm::Type *IRType = CGT.ConvertType(Ty); 880 if (UseX86_MMXType(IRType)) { 881 if (IsMMXDisabled) 882 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 883 64)); 884 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 885 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 886 return AAI; 887 } 888 889 return ABIArgInfo::getDirect(); 890 } 891 892 893 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 894 Ty = EnumTy->getDecl()->getIntegerType(); 895 896 return (Ty->isPromotableIntegerType() ? 897 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 898} 899 900void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 901 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 902 FI.getCallingConvention()); 903 904 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 905 DefaultNumRegisterParameters; 906 907 // If the return value is indirect, then the hidden argument is consuming one 908 // integer register. 909 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 910 --FreeRegs; 911 ABIArgInfo &Old = FI.getReturnInfo(); 912 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 913 Old.getIndirectByVal(), 914 Old.getIndirectRealign()); 915 } 916 917 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 918 it != ie; ++it) 919 it->info = classifyArgumentTypeWithReg(it->type, FreeRegs); 920} 921 922llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 923 CodeGenFunction &CGF) const { 924 llvm::Type *BPP = CGF.Int8PtrPtrTy; 925 926 CGBuilderTy &Builder = CGF.Builder; 927 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 928 "ap"); 929 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 930 931 // Compute if the address needs to be aligned 932 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 933 Align = getTypeStackAlignInBytes(Ty, Align); 934 Align = std::max(Align, 4U); 935 if (Align > 4) { 936 // addr = (addr + align - 1) & -align; 937 llvm::Value *Offset = 938 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 939 Addr = CGF.Builder.CreateGEP(Addr, Offset); 940 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 941 CGF.Int32Ty); 942 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 943 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 944 Addr->getType(), 945 "ap.cur.aligned"); 946 } 947 948 llvm::Type *PTy = 949 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 950 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 951 952 uint64_t Offset = 953 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 954 llvm::Value *NextAddr = 955 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 956 "ap.next"); 957 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 958 959 return AddrTyped; 960} 961 962void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 963 llvm::GlobalValue *GV, 964 CodeGen::CodeGenModule &CGM) const { 965 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 966 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 967 // Get the LLVM function. 968 llvm::Function *Fn = cast<llvm::Function>(GV); 969 970 // Now add the 'alignstack' attribute with a value of 16. 971 llvm::AttrBuilder B; 972 B.addStackAlignmentAttr(16); 973 Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, 974 llvm::Attributes::get(CGM.getLLVMContext(), B)); 975 } 976 } 977} 978 979bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 980 CodeGen::CodeGenFunction &CGF, 981 llvm::Value *Address) const { 982 CodeGen::CGBuilderTy &Builder = CGF.Builder; 983 984 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 985 986 // 0-7 are the eight integer registers; the order is different 987 // on Darwin (for EH), but the range is the same. 988 // 8 is %eip. 989 AssignToArrayRange(Builder, Address, Four8, 0, 8); 990 991 if (CGF.CGM.isTargetDarwin()) { 992 // 12-16 are st(0..4). Not sure why we stop at 4. 993 // These have size 16, which is sizeof(long double) on 994 // platforms with 8-byte alignment for that type. 995 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 996 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 997 998 } else { 999 // 9 is %eflags, which doesn't get a size on Darwin for some 1000 // reason. 1001 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1002 1003 // 11-16 are st(0..5). Not sure why we stop at 5. 1004 // These have size 12, which is sizeof(long double) on 1005 // platforms with 4-byte alignment for that type. 1006 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1007 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1008 } 1009 1010 return false; 1011} 1012 1013//===----------------------------------------------------------------------===// 1014// X86-64 ABI Implementation 1015//===----------------------------------------------------------------------===// 1016 1017 1018namespace { 1019/// X86_64ABIInfo - The X86_64 ABI information. 1020class X86_64ABIInfo : public ABIInfo { 1021 enum Class { 1022 Integer = 0, 1023 SSE, 1024 SSEUp, 1025 X87, 1026 X87Up, 1027 ComplexX87, 1028 NoClass, 1029 Memory 1030 }; 1031 1032 /// merge - Implement the X86_64 ABI merging algorithm. 1033 /// 1034 /// Merge an accumulating classification \arg Accum with a field 1035 /// classification \arg Field. 1036 /// 1037 /// \param Accum - The accumulating classification. This should 1038 /// always be either NoClass or the result of a previous merge 1039 /// call. In addition, this should never be Memory (the caller 1040 /// should just return Memory for the aggregate). 1041 static Class merge(Class Accum, Class Field); 1042 1043 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1044 /// 1045 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1046 /// final MEMORY or SSE classes when necessary. 1047 /// 1048 /// \param AggregateSize - The size of the current aggregate in 1049 /// the classification process. 1050 /// 1051 /// \param Lo - The classification for the parts of the type 1052 /// residing in the low word of the containing object. 1053 /// 1054 /// \param Hi - The classification for the parts of the type 1055 /// residing in the higher words of the containing object. 1056 /// 1057 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1058 1059 /// classify - Determine the x86_64 register classes in which the 1060 /// given type T should be passed. 1061 /// 1062 /// \param Lo - The classification for the parts of the type 1063 /// residing in the low word of the containing object. 1064 /// 1065 /// \param Hi - The classification for the parts of the type 1066 /// residing in the high word of the containing object. 1067 /// 1068 /// \param OffsetBase - The bit offset of this type in the 1069 /// containing object. Some parameters are classified different 1070 /// depending on whether they straddle an eightbyte boundary. 1071 /// 1072 /// If a word is unused its result will be NoClass; if a type should 1073 /// be passed in Memory then at least the classification of \arg Lo 1074 /// will be Memory. 1075 /// 1076 /// The \arg Lo class will be NoClass iff the argument is ignored. 1077 /// 1078 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1079 /// also be ComplexX87. 1080 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1081 1082 llvm::Type *GetByteVectorType(QualType Ty) const; 1083 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1084 unsigned IROffset, QualType SourceTy, 1085 unsigned SourceOffset) const; 1086 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1087 unsigned IROffset, QualType SourceTy, 1088 unsigned SourceOffset) const; 1089 1090 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1091 /// such that the argument will be returned in memory. 1092 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1093 1094 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1095 /// such that the argument will be passed in memory. 1096 /// 1097 /// \param freeIntRegs - The number of free integer registers remaining 1098 /// available. 1099 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1100 1101 ABIArgInfo classifyReturnType(QualType RetTy) const; 1102 1103 ABIArgInfo classifyArgumentType(QualType Ty, 1104 unsigned freeIntRegs, 1105 unsigned &neededInt, 1106 unsigned &neededSSE) const; 1107 1108 bool IsIllegalVectorType(QualType Ty) const; 1109 1110 /// The 0.98 ABI revision clarified a lot of ambiguities, 1111 /// unfortunately in ways that were not always consistent with 1112 /// certain previous compilers. In particular, platforms which 1113 /// required strict binary compatibility with older versions of GCC 1114 /// may need to exempt themselves. 1115 bool honorsRevision0_98() const { 1116 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1117 } 1118 1119 bool HasAVX; 1120 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1121 // 64-bit hardware. 1122 bool Has64BitPointers; 1123 1124public: 1125 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1126 ABIInfo(CGT), HasAVX(hasavx), 1127 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1128 } 1129 1130 bool isPassedUsingAVXType(QualType type) const { 1131 unsigned neededInt, neededSSE; 1132 // The freeIntRegs argument doesn't matter here. 1133 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1134 if (info.isDirect()) { 1135 llvm::Type *ty = info.getCoerceToType(); 1136 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1137 return (vectorTy->getBitWidth() > 128); 1138 } 1139 return false; 1140 } 1141 1142 virtual void computeInfo(CGFunctionInfo &FI) const; 1143 1144 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1145 CodeGenFunction &CGF) const; 1146}; 1147 1148/// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1149class WinX86_64ABIInfo : public ABIInfo { 1150 1151 ABIArgInfo classify(QualType Ty) const; 1152 1153public: 1154 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1155 1156 virtual void computeInfo(CGFunctionInfo &FI) const; 1157 1158 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1159 CodeGenFunction &CGF) const; 1160}; 1161 1162class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1163public: 1164 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1165 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1166 1167 const X86_64ABIInfo &getABIInfo() const { 1168 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1169 } 1170 1171 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1172 return 7; 1173 } 1174 1175 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1176 llvm::Value *Address) const { 1177 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1178 1179 // 0-15 are the 16 integer registers. 1180 // 16 is %rip. 1181 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1182 return false; 1183 } 1184 1185 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1186 StringRef Constraint, 1187 llvm::Type* Ty) const { 1188 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1189 } 1190 1191 bool isNoProtoCallVariadic(const CallArgList &args, 1192 const FunctionNoProtoType *fnType) const { 1193 // The default CC on x86-64 sets %al to the number of SSA 1194 // registers used, and GCC sets this when calling an unprototyped 1195 // function, so we override the default behavior. However, don't do 1196 // that when AVX types are involved: the ABI explicitly states it is 1197 // undefined, and it doesn't work in practice because of how the ABI 1198 // defines varargs anyway. 1199 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1200 bool HasAVXType = false; 1201 for (CallArgList::const_iterator 1202 it = args.begin(), ie = args.end(); it != ie; ++it) { 1203 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1204 HasAVXType = true; 1205 break; 1206 } 1207 } 1208 1209 if (!HasAVXType) 1210 return true; 1211 } 1212 1213 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1214 } 1215 1216}; 1217 1218class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1219public: 1220 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1221 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1222 1223 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1224 return 7; 1225 } 1226 1227 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1228 llvm::Value *Address) const { 1229 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1230 1231 // 0-15 are the 16 integer registers. 1232 // 16 is %rip. 1233 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1234 return false; 1235 } 1236}; 1237 1238} 1239 1240void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1241 Class &Hi) const { 1242 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1243 // 1244 // (a) If one of the classes is Memory, the whole argument is passed in 1245 // memory. 1246 // 1247 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1248 // memory. 1249 // 1250 // (c) If the size of the aggregate exceeds two eightbytes and the first 1251 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1252 // argument is passed in memory. NOTE: This is necessary to keep the 1253 // ABI working for processors that don't support the __m256 type. 1254 // 1255 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1256 // 1257 // Some of these are enforced by the merging logic. Others can arise 1258 // only with unions; for example: 1259 // union { _Complex double; unsigned; } 1260 // 1261 // Note that clauses (b) and (c) were added in 0.98. 1262 // 1263 if (Hi == Memory) 1264 Lo = Memory; 1265 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1266 Lo = Memory; 1267 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1268 Lo = Memory; 1269 if (Hi == SSEUp && Lo != SSE) 1270 Hi = SSE; 1271} 1272 1273X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1274 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1275 // classified recursively so that always two fields are 1276 // considered. The resulting class is calculated according to 1277 // the classes of the fields in the eightbyte: 1278 // 1279 // (a) If both classes are equal, this is the resulting class. 1280 // 1281 // (b) If one of the classes is NO_CLASS, the resulting class is 1282 // the other class. 1283 // 1284 // (c) If one of the classes is MEMORY, the result is the MEMORY 1285 // class. 1286 // 1287 // (d) If one of the classes is INTEGER, the result is the 1288 // INTEGER. 1289 // 1290 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1291 // MEMORY is used as class. 1292 // 1293 // (f) Otherwise class SSE is used. 1294 1295 // Accum should never be memory (we should have returned) or 1296 // ComplexX87 (because this cannot be passed in a structure). 1297 assert((Accum != Memory && Accum != ComplexX87) && 1298 "Invalid accumulated classification during merge."); 1299 if (Accum == Field || Field == NoClass) 1300 return Accum; 1301 if (Field == Memory) 1302 return Memory; 1303 if (Accum == NoClass) 1304 return Field; 1305 if (Accum == Integer || Field == Integer) 1306 return Integer; 1307 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1308 Accum == X87 || Accum == X87Up) 1309 return Memory; 1310 return SSE; 1311} 1312 1313void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1314 Class &Lo, Class &Hi) const { 1315 // FIXME: This code can be simplified by introducing a simple value class for 1316 // Class pairs with appropriate constructor methods for the various 1317 // situations. 1318 1319 // FIXME: Some of the split computations are wrong; unaligned vectors 1320 // shouldn't be passed in registers for example, so there is no chance they 1321 // can straddle an eightbyte. Verify & simplify. 1322 1323 Lo = Hi = NoClass; 1324 1325 Class &Current = OffsetBase < 64 ? Lo : Hi; 1326 Current = Memory; 1327 1328 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1329 BuiltinType::Kind k = BT->getKind(); 1330 1331 if (k == BuiltinType::Void) { 1332 Current = NoClass; 1333 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1334 Lo = Integer; 1335 Hi = Integer; 1336 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1337 Current = Integer; 1338 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1339 (k == BuiltinType::LongDouble && 1340 getContext().getTargetInfo().getTriple().getOS() == 1341 llvm::Triple::NativeClient)) { 1342 Current = SSE; 1343 } else if (k == BuiltinType::LongDouble) { 1344 Lo = X87; 1345 Hi = X87Up; 1346 } 1347 // FIXME: _Decimal32 and _Decimal64 are SSE. 1348 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1349 return; 1350 } 1351 1352 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1353 // Classify the underlying integer type. 1354 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1355 return; 1356 } 1357 1358 if (Ty->hasPointerRepresentation()) { 1359 Current = Integer; 1360 return; 1361 } 1362 1363 if (Ty->isMemberPointerType()) { 1364 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1365 Lo = Hi = Integer; 1366 else 1367 Current = Integer; 1368 return; 1369 } 1370 1371 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1372 uint64_t Size = getContext().getTypeSize(VT); 1373 if (Size == 32) { 1374 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1375 // float> as integer. 1376 Current = Integer; 1377 1378 // If this type crosses an eightbyte boundary, it should be 1379 // split. 1380 uint64_t EB_Real = (OffsetBase) / 64; 1381 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1382 if (EB_Real != EB_Imag) 1383 Hi = Lo; 1384 } else if (Size == 64) { 1385 // gcc passes <1 x double> in memory. :( 1386 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1387 return; 1388 1389 // gcc passes <1 x long long> as INTEGER. 1390 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1391 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1392 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1393 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1394 Current = Integer; 1395 else 1396 Current = SSE; 1397 1398 // If this type crosses an eightbyte boundary, it should be 1399 // split. 1400 if (OffsetBase && OffsetBase != 64) 1401 Hi = Lo; 1402 } else if (Size == 128 || (HasAVX && Size == 256)) { 1403 // Arguments of 256-bits are split into four eightbyte chunks. The 1404 // least significant one belongs to class SSE and all the others to class 1405 // SSEUP. The original Lo and Hi design considers that types can't be 1406 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1407 // This design isn't correct for 256-bits, but since there're no cases 1408 // where the upper parts would need to be inspected, avoid adding 1409 // complexity and just consider Hi to match the 64-256 part. 1410 Lo = SSE; 1411 Hi = SSEUp; 1412 } 1413 return; 1414 } 1415 1416 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1417 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1418 1419 uint64_t Size = getContext().getTypeSize(Ty); 1420 if (ET->isIntegralOrEnumerationType()) { 1421 if (Size <= 64) 1422 Current = Integer; 1423 else if (Size <= 128) 1424 Lo = Hi = Integer; 1425 } else if (ET == getContext().FloatTy) 1426 Current = SSE; 1427 else if (ET == getContext().DoubleTy || 1428 (ET == getContext().LongDoubleTy && 1429 getContext().getTargetInfo().getTriple().getOS() == 1430 llvm::Triple::NativeClient)) 1431 Lo = Hi = SSE; 1432 else if (ET == getContext().LongDoubleTy) 1433 Current = ComplexX87; 1434 1435 // If this complex type crosses an eightbyte boundary then it 1436 // should be split. 1437 uint64_t EB_Real = (OffsetBase) / 64; 1438 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1439 if (Hi == NoClass && EB_Real != EB_Imag) 1440 Hi = Lo; 1441 1442 return; 1443 } 1444 1445 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1446 // Arrays are treated like structures. 1447 1448 uint64_t Size = getContext().getTypeSize(Ty); 1449 1450 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1451 // than four eightbytes, ..., it has class MEMORY. 1452 if (Size > 256) 1453 return; 1454 1455 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1456 // fields, it has class MEMORY. 1457 // 1458 // Only need to check alignment of array base. 1459 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1460 return; 1461 1462 // Otherwise implement simplified merge. We could be smarter about 1463 // this, but it isn't worth it and would be harder to verify. 1464 Current = NoClass; 1465 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1466 uint64_t ArraySize = AT->getSize().getZExtValue(); 1467 1468 // The only case a 256-bit wide vector could be used is when the array 1469 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1470 // to work for sizes wider than 128, early check and fallback to memory. 1471 if (Size > 128 && EltSize != 256) 1472 return; 1473 1474 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1475 Class FieldLo, FieldHi; 1476 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1477 Lo = merge(Lo, FieldLo); 1478 Hi = merge(Hi, FieldHi); 1479 if (Lo == Memory || Hi == Memory) 1480 break; 1481 } 1482 1483 postMerge(Size, Lo, Hi); 1484 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1485 return; 1486 } 1487 1488 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1489 uint64_t Size = getContext().getTypeSize(Ty); 1490 1491 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1492 // than four eightbytes, ..., it has class MEMORY. 1493 if (Size > 256) 1494 return; 1495 1496 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1497 // copy constructor or a non-trivial destructor, it is passed by invisible 1498 // reference. 1499 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1500 return; 1501 1502 const RecordDecl *RD = RT->getDecl(); 1503 1504 // Assume variable sized types are passed in memory. 1505 if (RD->hasFlexibleArrayMember()) 1506 return; 1507 1508 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1509 1510 // Reset Lo class, this will be recomputed. 1511 Current = NoClass; 1512 1513 // If this is a C++ record, classify the bases first. 1514 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1515 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1516 e = CXXRD->bases_end(); i != e; ++i) { 1517 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1518 "Unexpected base class!"); 1519 const CXXRecordDecl *Base = 1520 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1521 1522 // Classify this field. 1523 // 1524 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1525 // single eightbyte, each is classified separately. Each eightbyte gets 1526 // initialized to class NO_CLASS. 1527 Class FieldLo, FieldHi; 1528 uint64_t Offset = 1529 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1530 classify(i->getType(), Offset, FieldLo, FieldHi); 1531 Lo = merge(Lo, FieldLo); 1532 Hi = merge(Hi, FieldHi); 1533 if (Lo == Memory || Hi == Memory) 1534 break; 1535 } 1536 } 1537 1538 // Classify the fields one at a time, merging the results. 1539 unsigned idx = 0; 1540 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1541 i != e; ++i, ++idx) { 1542 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1543 bool BitField = i->isBitField(); 1544 1545 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1546 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1547 // 1548 // The only case a 256-bit wide vector could be used is when the struct 1549 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1550 // to work for sizes wider than 128, early check and fallback to memory. 1551 // 1552 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1553 Lo = Memory; 1554 return; 1555 } 1556 // Note, skip this test for bit-fields, see below. 1557 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1558 Lo = Memory; 1559 return; 1560 } 1561 1562 // Classify this field. 1563 // 1564 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1565 // exceeds a single eightbyte, each is classified 1566 // separately. Each eightbyte gets initialized to class 1567 // NO_CLASS. 1568 Class FieldLo, FieldHi; 1569 1570 // Bit-fields require special handling, they do not force the 1571 // structure to be passed in memory even if unaligned, and 1572 // therefore they can straddle an eightbyte. 1573 if (BitField) { 1574 // Ignore padding bit-fields. 1575 if (i->isUnnamedBitfield()) 1576 continue; 1577 1578 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1579 uint64_t Size = i->getBitWidthValue(getContext()); 1580 1581 uint64_t EB_Lo = Offset / 64; 1582 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1583 FieldLo = FieldHi = NoClass; 1584 if (EB_Lo) { 1585 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1586 FieldLo = NoClass; 1587 FieldHi = Integer; 1588 } else { 1589 FieldLo = Integer; 1590 FieldHi = EB_Hi ? Integer : NoClass; 1591 } 1592 } else 1593 classify(i->getType(), Offset, FieldLo, FieldHi); 1594 Lo = merge(Lo, FieldLo); 1595 Hi = merge(Hi, FieldHi); 1596 if (Lo == Memory || Hi == Memory) 1597 break; 1598 } 1599 1600 postMerge(Size, Lo, Hi); 1601 } 1602} 1603 1604ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1605 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1606 // place naturally. 1607 if (!isAggregateTypeForABI(Ty)) { 1608 // Treat an enum type as its underlying type. 1609 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1610 Ty = EnumTy->getDecl()->getIntegerType(); 1611 1612 return (Ty->isPromotableIntegerType() ? 1613 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1614 } 1615 1616 return ABIArgInfo::getIndirect(0); 1617} 1618 1619bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1620 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1621 uint64_t Size = getContext().getTypeSize(VecTy); 1622 unsigned LargestVector = HasAVX ? 256 : 128; 1623 if (Size <= 64 || Size > LargestVector) 1624 return true; 1625 } 1626 1627 return false; 1628} 1629 1630ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1631 unsigned freeIntRegs) const { 1632 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1633 // place naturally. 1634 // 1635 // This assumption is optimistic, as there could be free registers available 1636 // when we need to pass this argument in memory, and LLVM could try to pass 1637 // the argument in the free register. This does not seem to happen currently, 1638 // but this code would be much safer if we could mark the argument with 1639 // 'onstack'. See PR12193. 1640 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1641 // Treat an enum type as its underlying type. 1642 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1643 Ty = EnumTy->getDecl()->getIntegerType(); 1644 1645 return (Ty->isPromotableIntegerType() ? 1646 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1647 } 1648 1649 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1650 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1651 1652 // Compute the byval alignment. We specify the alignment of the byval in all 1653 // cases so that the mid-level optimizer knows the alignment of the byval. 1654 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1655 1656 // Attempt to avoid passing indirect results using byval when possible. This 1657 // is important for good codegen. 1658 // 1659 // We do this by coercing the value into a scalar type which the backend can 1660 // handle naturally (i.e., without using byval). 1661 // 1662 // For simplicity, we currently only do this when we have exhausted all of the 1663 // free integer registers. Doing this when there are free integer registers 1664 // would require more care, as we would have to ensure that the coerced value 1665 // did not claim the unused register. That would require either reording the 1666 // arguments to the function (so that any subsequent inreg values came first), 1667 // or only doing this optimization when there were no following arguments that 1668 // might be inreg. 1669 // 1670 // We currently expect it to be rare (particularly in well written code) for 1671 // arguments to be passed on the stack when there are still free integer 1672 // registers available (this would typically imply large structs being passed 1673 // by value), so this seems like a fair tradeoff for now. 1674 // 1675 // We can revisit this if the backend grows support for 'onstack' parameter 1676 // attributes. See PR12193. 1677 if (freeIntRegs == 0) { 1678 uint64_t Size = getContext().getTypeSize(Ty); 1679 1680 // If this type fits in an eightbyte, coerce it into the matching integral 1681 // type, which will end up on the stack (with alignment 8). 1682 if (Align == 8 && Size <= 64) 1683 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1684 Size)); 1685 } 1686 1687 return ABIArgInfo::getIndirect(Align); 1688} 1689 1690/// GetByteVectorType - The ABI specifies that a value should be passed in an 1691/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1692/// vector register. 1693llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1694 llvm::Type *IRType = CGT.ConvertType(Ty); 1695 1696 // Wrapper structs that just contain vectors are passed just like vectors, 1697 // strip them off if present. 1698 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1699 while (STy && STy->getNumElements() == 1) { 1700 IRType = STy->getElementType(0); 1701 STy = dyn_cast<llvm::StructType>(IRType); 1702 } 1703 1704 // If the preferred type is a 16-byte vector, prefer to pass it. 1705 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1706 llvm::Type *EltTy = VT->getElementType(); 1707 unsigned BitWidth = VT->getBitWidth(); 1708 if ((BitWidth >= 128 && BitWidth <= 256) && 1709 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1710 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1711 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1712 EltTy->isIntegerTy(128))) 1713 return VT; 1714 } 1715 1716 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1717} 1718 1719/// BitsContainNoUserData - Return true if the specified [start,end) bit range 1720/// is known to either be off the end of the specified type or being in 1721/// alignment padding. The user type specified is known to be at most 128 bits 1722/// in size, and have passed through X86_64ABIInfo::classify with a successful 1723/// classification that put one of the two halves in the INTEGER class. 1724/// 1725/// It is conservatively correct to return false. 1726static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1727 unsigned EndBit, ASTContext &Context) { 1728 // If the bytes being queried are off the end of the type, there is no user 1729 // data hiding here. This handles analysis of builtins, vectors and other 1730 // types that don't contain interesting padding. 1731 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1732 if (TySize <= StartBit) 1733 return true; 1734 1735 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1736 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1737 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1738 1739 // Check each element to see if the element overlaps with the queried range. 1740 for (unsigned i = 0; i != NumElts; ++i) { 1741 // If the element is after the span we care about, then we're done.. 1742 unsigned EltOffset = i*EltSize; 1743 if (EltOffset >= EndBit) break; 1744 1745 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1746 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1747 EndBit-EltOffset, Context)) 1748 return false; 1749 } 1750 // If it overlaps no elements, then it is safe to process as padding. 1751 return true; 1752 } 1753 1754 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1755 const RecordDecl *RD = RT->getDecl(); 1756 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1757 1758 // If this is a C++ record, check the bases first. 1759 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1760 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1761 e = CXXRD->bases_end(); i != e; ++i) { 1762 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1763 "Unexpected base class!"); 1764 const CXXRecordDecl *Base = 1765 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1766 1767 // If the base is after the span we care about, ignore it. 1768 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1769 if (BaseOffset >= EndBit) continue; 1770 1771 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1772 if (!BitsContainNoUserData(i->getType(), BaseStart, 1773 EndBit-BaseOffset, Context)) 1774 return false; 1775 } 1776 } 1777 1778 // Verify that no field has data that overlaps the region of interest. Yes 1779 // this could be sped up a lot by being smarter about queried fields, 1780 // however we're only looking at structs up to 16 bytes, so we don't care 1781 // much. 1782 unsigned idx = 0; 1783 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1784 i != e; ++i, ++idx) { 1785 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1786 1787 // If we found a field after the region we care about, then we're done. 1788 if (FieldOffset >= EndBit) break; 1789 1790 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1791 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1792 Context)) 1793 return false; 1794 } 1795 1796 // If nothing in this record overlapped the area of interest, then we're 1797 // clean. 1798 return true; 1799 } 1800 1801 return false; 1802} 1803 1804/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1805/// float member at the specified offset. For example, {int,{float}} has a 1806/// float at offset 4. It is conservatively correct for this routine to return 1807/// false. 1808static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1809 const llvm::DataLayout &TD) { 1810 // Base case if we find a float. 1811 if (IROffset == 0 && IRType->isFloatTy()) 1812 return true; 1813 1814 // If this is a struct, recurse into the field at the specified offset. 1815 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1816 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1817 unsigned Elt = SL->getElementContainingOffset(IROffset); 1818 IROffset -= SL->getElementOffset(Elt); 1819 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1820 } 1821 1822 // If this is an array, recurse into the field at the specified offset. 1823 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1824 llvm::Type *EltTy = ATy->getElementType(); 1825 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1826 IROffset -= IROffset/EltSize*EltSize; 1827 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1828 } 1829 1830 return false; 1831} 1832 1833 1834/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1835/// low 8 bytes of an XMM register, corresponding to the SSE class. 1836llvm::Type *X86_64ABIInfo:: 1837GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1838 QualType SourceTy, unsigned SourceOffset) const { 1839 // The only three choices we have are either double, <2 x float>, or float. We 1840 // pass as float if the last 4 bytes is just padding. This happens for 1841 // structs that contain 3 floats. 1842 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1843 SourceOffset*8+64, getContext())) 1844 return llvm::Type::getFloatTy(getVMContext()); 1845 1846 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1847 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1848 // case. 1849 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1850 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1851 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1852 1853 return llvm::Type::getDoubleTy(getVMContext()); 1854} 1855 1856 1857/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1858/// an 8-byte GPR. This means that we either have a scalar or we are talking 1859/// about the high or low part of an up-to-16-byte struct. This routine picks 1860/// the best LLVM IR type to represent this, which may be i64 or may be anything 1861/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1862/// etc). 1863/// 1864/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1865/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1866/// the 8-byte value references. PrefType may be null. 1867/// 1868/// SourceTy is the source level type for the entire argument. SourceOffset is 1869/// an offset into this that we're processing (which is always either 0 or 8). 1870/// 1871llvm::Type *X86_64ABIInfo:: 1872GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1873 QualType SourceTy, unsigned SourceOffset) const { 1874 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1875 // returning an 8-byte unit starting with it. See if we can safely use it. 1876 if (IROffset == 0) { 1877 // Pointers and int64's always fill the 8-byte unit. 1878 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1879 IRType->isIntegerTy(64)) 1880 return IRType; 1881 1882 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1883 // goodness in the source type is just tail padding. This is allowed to 1884 // kick in for struct {double,int} on the int, but not on 1885 // struct{double,int,int} because we wouldn't return the second int. We 1886 // have to do this analysis on the source type because we can't depend on 1887 // unions being lowered a specific way etc. 1888 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1889 IRType->isIntegerTy(32) || 1890 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1891 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1892 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1893 1894 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1895 SourceOffset*8+64, getContext())) 1896 return IRType; 1897 } 1898 } 1899 1900 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1901 // If this is a struct, recurse into the field at the specified offset. 1902 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1903 if (IROffset < SL->getSizeInBytes()) { 1904 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1905 IROffset -= SL->getElementOffset(FieldIdx); 1906 1907 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1908 SourceTy, SourceOffset); 1909 } 1910 } 1911 1912 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1913 llvm::Type *EltTy = ATy->getElementType(); 1914 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1915 unsigned EltOffset = IROffset/EltSize*EltSize; 1916 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1917 SourceOffset); 1918 } 1919 1920 // Okay, we don't have any better idea of what to pass, so we pass this in an 1921 // integer register that isn't too big to fit the rest of the struct. 1922 unsigned TySizeInBytes = 1923 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1924 1925 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1926 1927 // It is always safe to classify this as an integer type up to i64 that 1928 // isn't larger than the structure. 1929 return llvm::IntegerType::get(getVMContext(), 1930 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1931} 1932 1933 1934/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1935/// be used as elements of a two register pair to pass or return, return a 1936/// first class aggregate to represent them. For example, if the low part of 1937/// a by-value argument should be passed as i32* and the high part as float, 1938/// return {i32*, float}. 1939static llvm::Type * 1940GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1941 const llvm::DataLayout &TD) { 1942 // In order to correctly satisfy the ABI, we need to the high part to start 1943 // at offset 8. If the high and low parts we inferred are both 4-byte types 1944 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1945 // the second element at offset 8. Check for this: 1946 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1947 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1948 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1949 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1950 1951 // To handle this, we have to increase the size of the low part so that the 1952 // second element will start at an 8 byte offset. We can't increase the size 1953 // of the second element because it might make us access off the end of the 1954 // struct. 1955 if (HiStart != 8) { 1956 // There are only two sorts of types the ABI generation code can produce for 1957 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1958 // Promote these to a larger type. 1959 if (Lo->isFloatTy()) 1960 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1961 else { 1962 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1963 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1964 } 1965 } 1966 1967 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1968 1969 1970 // Verify that the second element is at an 8-byte offset. 1971 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1972 "Invalid x86-64 argument pair!"); 1973 return Result; 1974} 1975 1976ABIArgInfo X86_64ABIInfo:: 1977classifyReturnType(QualType RetTy) const { 1978 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1979 // classification algorithm. 1980 X86_64ABIInfo::Class Lo, Hi; 1981 classify(RetTy, 0, Lo, Hi); 1982 1983 // Check some invariants. 1984 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1985 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1986 1987 llvm::Type *ResType = 0; 1988 switch (Lo) { 1989 case NoClass: 1990 if (Hi == NoClass) 1991 return ABIArgInfo::getIgnore(); 1992 // If the low part is just padding, it takes no register, leave ResType 1993 // null. 1994 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1995 "Unknown missing lo part"); 1996 break; 1997 1998 case SSEUp: 1999 case X87Up: 2000 llvm_unreachable("Invalid classification for lo word."); 2001 2002 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2003 // hidden argument. 2004 case Memory: 2005 return getIndirectReturnResult(RetTy); 2006 2007 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2008 // available register of the sequence %rax, %rdx is used. 2009 case Integer: 2010 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2011 2012 // If we have a sign or zero extended integer, make sure to return Extend 2013 // so that the parameter gets the right LLVM IR attributes. 2014 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2015 // Treat an enum type as its underlying type. 2016 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2017 RetTy = EnumTy->getDecl()->getIntegerType(); 2018 2019 if (RetTy->isIntegralOrEnumerationType() && 2020 RetTy->isPromotableIntegerType()) 2021 return ABIArgInfo::getExtend(); 2022 } 2023 break; 2024 2025 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2026 // available SSE register of the sequence %xmm0, %xmm1 is used. 2027 case SSE: 2028 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2029 break; 2030 2031 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2032 // returned on the X87 stack in %st0 as 80-bit x87 number. 2033 case X87: 2034 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2035 break; 2036 2037 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2038 // part of the value is returned in %st0 and the imaginary part in 2039 // %st1. 2040 case ComplexX87: 2041 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2042 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2043 llvm::Type::getX86_FP80Ty(getVMContext()), 2044 NULL); 2045 break; 2046 } 2047 2048 llvm::Type *HighPart = 0; 2049 switch (Hi) { 2050 // Memory was handled previously and X87 should 2051 // never occur as a hi class. 2052 case Memory: 2053 case X87: 2054 llvm_unreachable("Invalid classification for hi word."); 2055 2056 case ComplexX87: // Previously handled. 2057 case NoClass: 2058 break; 2059 2060 case Integer: 2061 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2062 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2063 return ABIArgInfo::getDirect(HighPart, 8); 2064 break; 2065 case SSE: 2066 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2067 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2068 return ABIArgInfo::getDirect(HighPart, 8); 2069 break; 2070 2071 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2072 // is passed in the next available eightbyte chunk if the last used 2073 // vector register. 2074 // 2075 // SSEUP should always be preceded by SSE, just widen. 2076 case SSEUp: 2077 assert(Lo == SSE && "Unexpected SSEUp classification."); 2078 ResType = GetByteVectorType(RetTy); 2079 break; 2080 2081 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2082 // returned together with the previous X87 value in %st0. 2083 case X87Up: 2084 // If X87Up is preceded by X87, we don't need to do 2085 // anything. However, in some cases with unions it may not be 2086 // preceded by X87. In such situations we follow gcc and pass the 2087 // extra bits in an SSE reg. 2088 if (Lo != X87) { 2089 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2090 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2091 return ABIArgInfo::getDirect(HighPart, 8); 2092 } 2093 break; 2094 } 2095 2096 // If a high part was specified, merge it together with the low part. It is 2097 // known to pass in the high eightbyte of the result. We do this by forming a 2098 // first class struct aggregate with the high and low part: {low, high} 2099 if (HighPart) 2100 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2101 2102 return ABIArgInfo::getDirect(ResType); 2103} 2104 2105ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2106 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2107 const 2108{ 2109 X86_64ABIInfo::Class Lo, Hi; 2110 classify(Ty, 0, Lo, Hi); 2111 2112 // Check some invariants. 2113 // FIXME: Enforce these by construction. 2114 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2115 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2116 2117 neededInt = 0; 2118 neededSSE = 0; 2119 llvm::Type *ResType = 0; 2120 switch (Lo) { 2121 case NoClass: 2122 if (Hi == NoClass) 2123 return ABIArgInfo::getIgnore(); 2124 // If the low part is just padding, it takes no register, leave ResType 2125 // null. 2126 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2127 "Unknown missing lo part"); 2128 break; 2129 2130 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2131 // on the stack. 2132 case Memory: 2133 2134 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2135 // COMPLEX_X87, it is passed in memory. 2136 case X87: 2137 case ComplexX87: 2138 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2139 ++neededInt; 2140 return getIndirectResult(Ty, freeIntRegs); 2141 2142 case SSEUp: 2143 case X87Up: 2144 llvm_unreachable("Invalid classification for lo word."); 2145 2146 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2147 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2148 // and %r9 is used. 2149 case Integer: 2150 ++neededInt; 2151 2152 // Pick an 8-byte type based on the preferred type. 2153 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2154 2155 // If we have a sign or zero extended integer, make sure to return Extend 2156 // so that the parameter gets the right LLVM IR attributes. 2157 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2158 // Treat an enum type as its underlying type. 2159 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2160 Ty = EnumTy->getDecl()->getIntegerType(); 2161 2162 if (Ty->isIntegralOrEnumerationType() && 2163 Ty->isPromotableIntegerType()) 2164 return ABIArgInfo::getExtend(); 2165 } 2166 2167 break; 2168 2169 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2170 // available SSE register is used, the registers are taken in the 2171 // order from %xmm0 to %xmm7. 2172 case SSE: { 2173 llvm::Type *IRType = CGT.ConvertType(Ty); 2174 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2175 ++neededSSE; 2176 break; 2177 } 2178 } 2179 2180 llvm::Type *HighPart = 0; 2181 switch (Hi) { 2182 // Memory was handled previously, ComplexX87 and X87 should 2183 // never occur as hi classes, and X87Up must be preceded by X87, 2184 // which is passed in memory. 2185 case Memory: 2186 case X87: 2187 case ComplexX87: 2188 llvm_unreachable("Invalid classification for hi word."); 2189 2190 case NoClass: break; 2191 2192 case Integer: 2193 ++neededInt; 2194 // Pick an 8-byte type based on the preferred type. 2195 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2196 2197 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2198 return ABIArgInfo::getDirect(HighPart, 8); 2199 break; 2200 2201 // X87Up generally doesn't occur here (long double is passed in 2202 // memory), except in situations involving unions. 2203 case X87Up: 2204 case SSE: 2205 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2206 2207 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2208 return ABIArgInfo::getDirect(HighPart, 8); 2209 2210 ++neededSSE; 2211 break; 2212 2213 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2214 // eightbyte is passed in the upper half of the last used SSE 2215 // register. This only happens when 128-bit vectors are passed. 2216 case SSEUp: 2217 assert(Lo == SSE && "Unexpected SSEUp classification"); 2218 ResType = GetByteVectorType(Ty); 2219 break; 2220 } 2221 2222 // If a high part was specified, merge it together with the low part. It is 2223 // known to pass in the high eightbyte of the result. We do this by forming a 2224 // first class struct aggregate with the high and low part: {low, high} 2225 if (HighPart) 2226 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2227 2228 return ABIArgInfo::getDirect(ResType); 2229} 2230 2231void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2232 2233 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2234 2235 // Keep track of the number of assigned registers. 2236 unsigned freeIntRegs = 6, freeSSERegs = 8; 2237 2238 // If the return value is indirect, then the hidden argument is consuming one 2239 // integer register. 2240 if (FI.getReturnInfo().isIndirect()) 2241 --freeIntRegs; 2242 2243 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2244 // get assigned (in left-to-right order) for passing as follows... 2245 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2246 it != ie; ++it) { 2247 unsigned neededInt, neededSSE; 2248 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2249 neededSSE); 2250 2251 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2252 // eightbyte of an argument, the whole argument is passed on the 2253 // stack. If registers have already been assigned for some 2254 // eightbytes of such an argument, the assignments get reverted. 2255 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2256 freeIntRegs -= neededInt; 2257 freeSSERegs -= neededSSE; 2258 } else { 2259 it->info = getIndirectResult(it->type, freeIntRegs); 2260 } 2261 } 2262} 2263 2264static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2265 QualType Ty, 2266 CodeGenFunction &CGF) { 2267 llvm::Value *overflow_arg_area_p = 2268 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2269 llvm::Value *overflow_arg_area = 2270 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2271 2272 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2273 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2274 // It isn't stated explicitly in the standard, but in practice we use 2275 // alignment greater than 16 where necessary. 2276 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2277 if (Align > 8) { 2278 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2279 llvm::Value *Offset = 2280 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2281 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2282 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2283 CGF.Int64Ty); 2284 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2285 overflow_arg_area = 2286 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2287 overflow_arg_area->getType(), 2288 "overflow_arg_area.align"); 2289 } 2290 2291 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2292 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2293 llvm::Value *Res = 2294 CGF.Builder.CreateBitCast(overflow_arg_area, 2295 llvm::PointerType::getUnqual(LTy)); 2296 2297 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2298 // l->overflow_arg_area + sizeof(type). 2299 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2300 // an 8 byte boundary. 2301 2302 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2303 llvm::Value *Offset = 2304 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2305 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2306 "overflow_arg_area.next"); 2307 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2308 2309 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2310 return Res; 2311} 2312 2313llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2314 CodeGenFunction &CGF) const { 2315 // Assume that va_list type is correct; should be pointer to LLVM type: 2316 // struct { 2317 // i32 gp_offset; 2318 // i32 fp_offset; 2319 // i8* overflow_arg_area; 2320 // i8* reg_save_area; 2321 // }; 2322 unsigned neededInt, neededSSE; 2323 2324 Ty = CGF.getContext().getCanonicalType(Ty); 2325 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2326 2327 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2328 // in the registers. If not go to step 7. 2329 if (!neededInt && !neededSSE) 2330 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2331 2332 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2333 // general purpose registers needed to pass type and num_fp to hold 2334 // the number of floating point registers needed. 2335 2336 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2337 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2338 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2339 // 2340 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2341 // register save space). 2342 2343 llvm::Value *InRegs = 0; 2344 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2345 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2346 if (neededInt) { 2347 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2348 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2349 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2350 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2351 } 2352 2353 if (neededSSE) { 2354 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2355 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2356 llvm::Value *FitsInFP = 2357 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2358 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2359 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2360 } 2361 2362 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2363 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2364 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2365 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2366 2367 // Emit code to load the value if it was passed in registers. 2368 2369 CGF.EmitBlock(InRegBlock); 2370 2371 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2372 // an offset of l->gp_offset and/or l->fp_offset. This may require 2373 // copying to a temporary location in case the parameter is passed 2374 // in different register classes or requires an alignment greater 2375 // than 8 for general purpose registers and 16 for XMM registers. 2376 // 2377 // FIXME: This really results in shameful code when we end up needing to 2378 // collect arguments from different places; often what should result in a 2379 // simple assembling of a structure from scattered addresses has many more 2380 // loads than necessary. Can we clean this up? 2381 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2382 llvm::Value *RegAddr = 2383 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2384 "reg_save_area"); 2385 if (neededInt && neededSSE) { 2386 // FIXME: Cleanup. 2387 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2388 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2389 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2390 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2391 llvm::Type *TyLo = ST->getElementType(0); 2392 llvm::Type *TyHi = ST->getElementType(1); 2393 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2394 "Unexpected ABI info for mixed regs"); 2395 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2396 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2397 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2398 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2399 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2400 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2401 llvm::Value *V = 2402 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2403 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2404 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2405 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2406 2407 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2408 llvm::PointerType::getUnqual(LTy)); 2409 } else if (neededInt) { 2410 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2411 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2412 llvm::PointerType::getUnqual(LTy)); 2413 } else if (neededSSE == 1) { 2414 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2415 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2416 llvm::PointerType::getUnqual(LTy)); 2417 } else { 2418 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2419 // SSE registers are spaced 16 bytes apart in the register save 2420 // area, we need to collect the two eightbytes together. 2421 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2422 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2423 llvm::Type *DoubleTy = CGF.DoubleTy; 2424 llvm::Type *DblPtrTy = 2425 llvm::PointerType::getUnqual(DoubleTy); 2426 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2427 DoubleTy, NULL); 2428 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2429 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2430 DblPtrTy)); 2431 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2432 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2433 DblPtrTy)); 2434 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2435 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2436 llvm::PointerType::getUnqual(LTy)); 2437 } 2438 2439 // AMD64-ABI 3.5.7p5: Step 5. Set: 2440 // l->gp_offset = l->gp_offset + num_gp * 8 2441 // l->fp_offset = l->fp_offset + num_fp * 16. 2442 if (neededInt) { 2443 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2444 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2445 gp_offset_p); 2446 } 2447 if (neededSSE) { 2448 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2449 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2450 fp_offset_p); 2451 } 2452 CGF.EmitBranch(ContBlock); 2453 2454 // Emit code to load the value if it was passed in memory. 2455 2456 CGF.EmitBlock(InMemBlock); 2457 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2458 2459 // Return the appropriate result. 2460 2461 CGF.EmitBlock(ContBlock); 2462 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2463 "vaarg.addr"); 2464 ResAddr->addIncoming(RegAddr, InRegBlock); 2465 ResAddr->addIncoming(MemAddr, InMemBlock); 2466 return ResAddr; 2467} 2468 2469ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2470 2471 if (Ty->isVoidType()) 2472 return ABIArgInfo::getIgnore(); 2473 2474 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2475 Ty = EnumTy->getDecl()->getIntegerType(); 2476 2477 uint64_t Size = getContext().getTypeSize(Ty); 2478 2479 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2480 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2481 RT->getDecl()->hasFlexibleArrayMember()) 2482 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2483 2484 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2485 if (Size == 128 && 2486 getContext().getTargetInfo().getTriple().getOS() 2487 == llvm::Triple::MinGW32) 2488 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2489 Size)); 2490 2491 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2492 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2493 if (Size <= 64 && 2494 (Size & (Size - 1)) == 0) 2495 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2496 Size)); 2497 2498 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2499 } 2500 2501 if (Ty->isPromotableIntegerType()) 2502 return ABIArgInfo::getExtend(); 2503 2504 return ABIArgInfo::getDirect(); 2505} 2506 2507void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2508 2509 QualType RetTy = FI.getReturnType(); 2510 FI.getReturnInfo() = classify(RetTy); 2511 2512 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2513 it != ie; ++it) 2514 it->info = classify(it->type); 2515} 2516 2517llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2518 CodeGenFunction &CGF) const { 2519 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2520 2521 CGBuilderTy &Builder = CGF.Builder; 2522 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2523 "ap"); 2524 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2525 llvm::Type *PTy = 2526 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2527 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2528 2529 uint64_t Offset = 2530 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2531 llvm::Value *NextAddr = 2532 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2533 "ap.next"); 2534 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2535 2536 return AddrTyped; 2537} 2538 2539// PowerPC-32 2540 2541namespace { 2542class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2543public: 2544 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2545 2546 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2547 // This is recovered from gcc output. 2548 return 1; // r1 is the dedicated stack pointer 2549 } 2550 2551 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2552 llvm::Value *Address) const; 2553}; 2554 2555} 2556 2557bool 2558PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2559 llvm::Value *Address) const { 2560 // This is calculated from the LLVM and GCC tables and verified 2561 // against gcc output. AFAIK all ABIs use the same encoding. 2562 2563 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2564 2565 llvm::IntegerType *i8 = CGF.Int8Ty; 2566 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2567 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2568 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2569 2570 // 0-31: r0-31, the 4-byte general-purpose registers 2571 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2572 2573 // 32-63: fp0-31, the 8-byte floating-point registers 2574 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2575 2576 // 64-76 are various 4-byte special-purpose registers: 2577 // 64: mq 2578 // 65: lr 2579 // 66: ctr 2580 // 67: ap 2581 // 68-75 cr0-7 2582 // 76: xer 2583 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2584 2585 // 77-108: v0-31, the 16-byte vector registers 2586 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2587 2588 // 109: vrsave 2589 // 110: vscr 2590 // 111: spe_acc 2591 // 112: spefscr 2592 // 113: sfp 2593 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2594 2595 return false; 2596} 2597 2598// PowerPC-64 2599 2600namespace { 2601/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2602class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2603 2604public: 2605 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2606 2607 // TODO: We can add more logic to computeInfo to improve performance. 2608 // Example: For aggregate arguments that fit in a register, we could 2609 // use getDirectInReg (as is done below for structs containing a single 2610 // floating-point value) to avoid pushing them to memory on function 2611 // entry. This would require changing the logic in PPCISelLowering 2612 // when lowering the parameters in the caller and args in the callee. 2613 virtual void computeInfo(CGFunctionInfo &FI) const { 2614 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2615 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2616 it != ie; ++it) { 2617 // We rely on the default argument classification for the most part. 2618 // One exception: An aggregate containing a single floating-point 2619 // item must be passed in a register if one is available. 2620 const Type *T = isSingleElementStruct(it->type, getContext()); 2621 if (T) { 2622 const BuiltinType *BT = T->getAs<BuiltinType>(); 2623 if (BT && BT->isFloatingPoint()) { 2624 QualType QT(T, 0); 2625 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2626 continue; 2627 } 2628 } 2629 it->info = classifyArgumentType(it->type); 2630 } 2631 } 2632 2633 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2634 QualType Ty, 2635 CodeGenFunction &CGF) const; 2636}; 2637 2638class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2639public: 2640 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2641 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2642 2643 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2644 // This is recovered from gcc output. 2645 return 1; // r1 is the dedicated stack pointer 2646 } 2647 2648 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2649 llvm::Value *Address) const; 2650}; 2651 2652class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2653public: 2654 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2655 2656 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2657 // This is recovered from gcc output. 2658 return 1; // r1 is the dedicated stack pointer 2659 } 2660 2661 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2662 llvm::Value *Address) const; 2663}; 2664 2665} 2666 2667// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2668llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2669 QualType Ty, 2670 CodeGenFunction &CGF) const { 2671 llvm::Type *BP = CGF.Int8PtrTy; 2672 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2673 2674 CGBuilderTy &Builder = CGF.Builder; 2675 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2676 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2677 2678 // Handle address alignment for type alignment > 64 bits. Although 2679 // long double normally requires 16-byte alignment, this is not the 2680 // case when it is passed as an argument; so handle that special case. 2681 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 2682 unsigned TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2683 2684 if (TyAlign > 8 && (!BT || !BT->isFloatingPoint())) { 2685 assert((TyAlign & (TyAlign - 1)) == 0 && 2686 "Alignment is not power of 2!"); 2687 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2688 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(TyAlign - 1)); 2689 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(~(TyAlign - 1))); 2690 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2691 } 2692 2693 // Update the va_list pointer. 2694 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2695 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2696 llvm::Value *NextAddr = 2697 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2698 "ap.next"); 2699 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2700 2701 // If the argument is smaller than 8 bytes, it is right-adjusted in 2702 // its doubleword slot. Adjust the pointer to pick it up from the 2703 // correct offset. 2704 if (SizeInBytes < 8) { 2705 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2706 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2707 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2708 } 2709 2710 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2711 return Builder.CreateBitCast(Addr, PTy); 2712} 2713 2714static bool 2715PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2716 llvm::Value *Address) { 2717 // This is calculated from the LLVM and GCC tables and verified 2718 // against gcc output. AFAIK all ABIs use the same encoding. 2719 2720 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2721 2722 llvm::IntegerType *i8 = CGF.Int8Ty; 2723 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2724 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2725 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2726 2727 // 0-31: r0-31, the 8-byte general-purpose registers 2728 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2729 2730 // 32-63: fp0-31, the 8-byte floating-point registers 2731 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2732 2733 // 64-76 are various 4-byte special-purpose registers: 2734 // 64: mq 2735 // 65: lr 2736 // 66: ctr 2737 // 67: ap 2738 // 68-75 cr0-7 2739 // 76: xer 2740 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2741 2742 // 77-108: v0-31, the 16-byte vector registers 2743 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2744 2745 // 109: vrsave 2746 // 110: vscr 2747 // 111: spe_acc 2748 // 112: spefscr 2749 // 113: sfp 2750 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2751 2752 return false; 2753} 2754 2755bool 2756PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2757 CodeGen::CodeGenFunction &CGF, 2758 llvm::Value *Address) const { 2759 2760 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2761} 2762 2763bool 2764PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2765 llvm::Value *Address) const { 2766 2767 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2768} 2769 2770//===----------------------------------------------------------------------===// 2771// ARM ABI Implementation 2772//===----------------------------------------------------------------------===// 2773 2774namespace { 2775 2776class ARMABIInfo : public ABIInfo { 2777public: 2778 enum ABIKind { 2779 APCS = 0, 2780 AAPCS = 1, 2781 AAPCS_VFP 2782 }; 2783 2784private: 2785 ABIKind Kind; 2786 2787public: 2788 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2789 2790 bool isEABI() const { 2791 StringRef Env = 2792 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2793 return (Env == "gnueabi" || Env == "eabi" || 2794 Env == "android" || Env == "androideabi"); 2795 } 2796 2797private: 2798 ABIKind getABIKind() const { return Kind; } 2799 2800 ABIArgInfo classifyReturnType(QualType RetTy) const; 2801 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2802 2803 virtual void computeInfo(CGFunctionInfo &FI) const; 2804 2805 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2806 CodeGenFunction &CGF) const; 2807}; 2808 2809class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2810public: 2811 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2812 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2813 2814 const ARMABIInfo &getABIInfo() const { 2815 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2816 } 2817 2818 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2819 return 13; 2820 } 2821 2822 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2823 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2824 } 2825 2826 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2827 llvm::Value *Address) const { 2828 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2829 2830 // 0-15 are the 16 integer registers. 2831 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2832 return false; 2833 } 2834 2835 unsigned getSizeOfUnwindException() const { 2836 if (getABIInfo().isEABI()) return 88; 2837 return TargetCodeGenInfo::getSizeOfUnwindException(); 2838 } 2839}; 2840 2841} 2842 2843void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2844 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2845 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2846 it != ie; ++it) 2847 it->info = classifyArgumentType(it->type); 2848 2849 // Always honor user-specified calling convention. 2850 if (FI.getCallingConvention() != llvm::CallingConv::C) 2851 return; 2852 2853 // Calling convention as default by an ABI. 2854 llvm::CallingConv::ID DefaultCC; 2855 if (isEABI()) 2856 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2857 else 2858 DefaultCC = llvm::CallingConv::ARM_APCS; 2859 2860 // If user did not ask for specific calling convention explicitly (e.g. via 2861 // pcs attribute), set effective calling convention if it's different than ABI 2862 // default. 2863 switch (getABIKind()) { 2864 case APCS: 2865 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2866 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2867 break; 2868 case AAPCS: 2869 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2870 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2871 break; 2872 case AAPCS_VFP: 2873 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2874 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2875 break; 2876 } 2877} 2878 2879/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2880/// aggregate. If HAMembers is non-null, the number of base elements 2881/// contained in the type is returned through it; this is used for the 2882/// recursive calls that check aggregate component types. 2883static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2884 ASTContext &Context, 2885 uint64_t *HAMembers = 0) { 2886 uint64_t Members = 0; 2887 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2888 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2889 return false; 2890 Members *= AT->getSize().getZExtValue(); 2891 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2892 const RecordDecl *RD = RT->getDecl(); 2893 if (RD->hasFlexibleArrayMember()) 2894 return false; 2895 2896 Members = 0; 2897 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2898 i != e; ++i) { 2899 const FieldDecl *FD = *i; 2900 uint64_t FldMembers; 2901 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2902 return false; 2903 2904 Members = (RD->isUnion() ? 2905 std::max(Members, FldMembers) : Members + FldMembers); 2906 } 2907 } else { 2908 Members = 1; 2909 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2910 Members = 2; 2911 Ty = CT->getElementType(); 2912 } 2913 2914 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2915 // double, or 64-bit or 128-bit vectors. 2916 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2917 if (BT->getKind() != BuiltinType::Float && 2918 BT->getKind() != BuiltinType::Double && 2919 BT->getKind() != BuiltinType::LongDouble) 2920 return false; 2921 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2922 unsigned VecSize = Context.getTypeSize(VT); 2923 if (VecSize != 64 && VecSize != 128) 2924 return false; 2925 } else { 2926 return false; 2927 } 2928 2929 // The base type must be the same for all members. Vector types of the 2930 // same total size are treated as being equivalent here. 2931 const Type *TyPtr = Ty.getTypePtr(); 2932 if (!Base) 2933 Base = TyPtr; 2934 if (Base != TyPtr && 2935 (!Base->isVectorType() || !TyPtr->isVectorType() || 2936 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2937 return false; 2938 } 2939 2940 // Homogeneous Aggregates can have at most 4 members of the base type. 2941 if (HAMembers) 2942 *HAMembers = Members; 2943 2944 return (Members > 0 && Members <= 4); 2945} 2946 2947ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2948 if (!isAggregateTypeForABI(Ty)) { 2949 // Treat an enum type as its underlying type. 2950 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2951 Ty = EnumTy->getDecl()->getIntegerType(); 2952 2953 return (Ty->isPromotableIntegerType() ? 2954 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2955 } 2956 2957 // Ignore empty records. 2958 if (isEmptyRecord(getContext(), Ty, true)) 2959 return ABIArgInfo::getIgnore(); 2960 2961 // Structures with either a non-trivial destructor or a non-trivial 2962 // copy constructor are always indirect. 2963 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2964 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2965 2966 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 2967 // Homogeneous Aggregates need to be expanded. 2968 const Type *Base = 0; 2969 if (isHomogeneousAggregate(Ty, Base, getContext())) { 2970 assert(Base && "Base class should be set for homogeneous aggregate"); 2971 return ABIArgInfo::getExpand(); 2972 } 2973 } 2974 2975 // Support byval for ARM. 2976 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 2977 getContext().getTypeAlign(Ty) > 64) { 2978 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 2979 } 2980 2981 // Otherwise, pass by coercing to a structure of the appropriate size. 2982 llvm::Type* ElemTy; 2983 unsigned SizeRegs; 2984 // FIXME: Try to match the types of the arguments more accurately where 2985 // we can. 2986 if (getContext().getTypeAlign(Ty) <= 32) { 2987 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2988 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2989 } else { 2990 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2991 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2992 } 2993 2994 llvm::Type *STy = 2995 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2996 return ABIArgInfo::getDirect(STy); 2997} 2998 2999static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3000 llvm::LLVMContext &VMContext) { 3001 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3002 // is called integer-like if its size is less than or equal to one word, and 3003 // the offset of each of its addressable sub-fields is zero. 3004 3005 uint64_t Size = Context.getTypeSize(Ty); 3006 3007 // Check that the type fits in a word. 3008 if (Size > 32) 3009 return false; 3010 3011 // FIXME: Handle vector types! 3012 if (Ty->isVectorType()) 3013 return false; 3014 3015 // Float types are never treated as "integer like". 3016 if (Ty->isRealFloatingType()) 3017 return false; 3018 3019 // If this is a builtin or pointer type then it is ok. 3020 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3021 return true; 3022 3023 // Small complex integer types are "integer like". 3024 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3025 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3026 3027 // Single element and zero sized arrays should be allowed, by the definition 3028 // above, but they are not. 3029 3030 // Otherwise, it must be a record type. 3031 const RecordType *RT = Ty->getAs<RecordType>(); 3032 if (!RT) return false; 3033 3034 // Ignore records with flexible arrays. 3035 const RecordDecl *RD = RT->getDecl(); 3036 if (RD->hasFlexibleArrayMember()) 3037 return false; 3038 3039 // Check that all sub-fields are at offset 0, and are themselves "integer 3040 // like". 3041 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3042 3043 bool HadField = false; 3044 unsigned idx = 0; 3045 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3046 i != e; ++i, ++idx) { 3047 const FieldDecl *FD = *i; 3048 3049 // Bit-fields are not addressable, we only need to verify they are "integer 3050 // like". We still have to disallow a subsequent non-bitfield, for example: 3051 // struct { int : 0; int x } 3052 // is non-integer like according to gcc. 3053 if (FD->isBitField()) { 3054 if (!RD->isUnion()) 3055 HadField = true; 3056 3057 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3058 return false; 3059 3060 continue; 3061 } 3062 3063 // Check if this field is at offset 0. 3064 if (Layout.getFieldOffset(idx) != 0) 3065 return false; 3066 3067 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3068 return false; 3069 3070 // Only allow at most one field in a structure. This doesn't match the 3071 // wording above, but follows gcc in situations with a field following an 3072 // empty structure. 3073 if (!RD->isUnion()) { 3074 if (HadField) 3075 return false; 3076 3077 HadField = true; 3078 } 3079 } 3080 3081 return true; 3082} 3083 3084ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3085 if (RetTy->isVoidType()) 3086 return ABIArgInfo::getIgnore(); 3087 3088 // Large vector types should be returned via memory. 3089 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3090 return ABIArgInfo::getIndirect(0); 3091 3092 if (!isAggregateTypeForABI(RetTy)) { 3093 // Treat an enum type as its underlying type. 3094 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3095 RetTy = EnumTy->getDecl()->getIntegerType(); 3096 3097 return (RetTy->isPromotableIntegerType() ? 3098 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3099 } 3100 3101 // Structures with either a non-trivial destructor or a non-trivial 3102 // copy constructor are always indirect. 3103 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3104 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3105 3106 // Are we following APCS? 3107 if (getABIKind() == APCS) { 3108 if (isEmptyRecord(getContext(), RetTy, false)) 3109 return ABIArgInfo::getIgnore(); 3110 3111 // Complex types are all returned as packed integers. 3112 // 3113 // FIXME: Consider using 2 x vector types if the back end handles them 3114 // correctly. 3115 if (RetTy->isAnyComplexType()) 3116 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3117 getContext().getTypeSize(RetTy))); 3118 3119 // Integer like structures are returned in r0. 3120 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3121 // Return in the smallest viable integer type. 3122 uint64_t Size = getContext().getTypeSize(RetTy); 3123 if (Size <= 8) 3124 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3125 if (Size <= 16) 3126 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3127 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3128 } 3129 3130 // Otherwise return in memory. 3131 return ABIArgInfo::getIndirect(0); 3132 } 3133 3134 // Otherwise this is an AAPCS variant. 3135 3136 if (isEmptyRecord(getContext(), RetTy, true)) 3137 return ABIArgInfo::getIgnore(); 3138 3139 // Check for homogeneous aggregates with AAPCS-VFP. 3140 if (getABIKind() == AAPCS_VFP) { 3141 const Type *Base = 0; 3142 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3143 assert(Base && "Base class should be set for homogeneous aggregate"); 3144 // Homogeneous Aggregates are returned directly. 3145 return ABIArgInfo::getDirect(); 3146 } 3147 } 3148 3149 // Aggregates <= 4 bytes are returned in r0; other aggregates 3150 // are returned indirectly. 3151 uint64_t Size = getContext().getTypeSize(RetTy); 3152 if (Size <= 32) { 3153 // Return in the smallest viable integer type. 3154 if (Size <= 8) 3155 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3156 if (Size <= 16) 3157 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3158 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3159 } 3160 3161 return ABIArgInfo::getIndirect(0); 3162} 3163 3164llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3165 CodeGenFunction &CGF) const { 3166 llvm::Type *BP = CGF.Int8PtrTy; 3167 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3168 3169 CGBuilderTy &Builder = CGF.Builder; 3170 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3171 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3172 // Handle address alignment for type alignment > 32 bits 3173 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3174 if (TyAlign > 4) { 3175 assert((TyAlign & (TyAlign - 1)) == 0 && 3176 "Alignment is not power of 2!"); 3177 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3178 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3179 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3180 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 3181 } 3182 llvm::Type *PTy = 3183 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3184 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3185 3186 uint64_t Offset = 3187 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 3188 llvm::Value *NextAddr = 3189 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3190 "ap.next"); 3191 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3192 3193 return AddrTyped; 3194} 3195 3196//===----------------------------------------------------------------------===// 3197// NVPTX ABI Implementation 3198//===----------------------------------------------------------------------===// 3199 3200namespace { 3201 3202class NVPTXABIInfo : public ABIInfo { 3203public: 3204 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3205 3206 ABIArgInfo classifyReturnType(QualType RetTy) const; 3207 ABIArgInfo classifyArgumentType(QualType Ty) const; 3208 3209 virtual void computeInfo(CGFunctionInfo &FI) const; 3210 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3211 CodeGenFunction &CFG) const; 3212}; 3213 3214class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3215public: 3216 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3217 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3218 3219 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3220 CodeGen::CodeGenModule &M) const; 3221}; 3222 3223ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3224 if (RetTy->isVoidType()) 3225 return ABIArgInfo::getIgnore(); 3226 if (isAggregateTypeForABI(RetTy)) 3227 return ABIArgInfo::getIndirect(0); 3228 return ABIArgInfo::getDirect(); 3229} 3230 3231ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3232 if (isAggregateTypeForABI(Ty)) 3233 return ABIArgInfo::getIndirect(0); 3234 3235 return ABIArgInfo::getDirect(); 3236} 3237 3238void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3239 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3240 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3241 it != ie; ++it) 3242 it->info = classifyArgumentType(it->type); 3243 3244 // Always honor user-specified calling convention. 3245 if (FI.getCallingConvention() != llvm::CallingConv::C) 3246 return; 3247 3248 // Calling convention as default by an ABI. 3249 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3250 // but we should switch to NVVM metadata later on. 3251 llvm::CallingConv::ID DefaultCC; 3252 const LangOptions &LangOpts = getContext().getLangOpts(); 3253 if (LangOpts.OpenCL || LangOpts.CUDA) { 3254 // If we are in OpenCL or CUDA mode, then default to device functions 3255 DefaultCC = llvm::CallingConv::PTX_Device; 3256 } else { 3257 // If we are in standard C/C++ mode, use the triple to decide on the default 3258 StringRef Env = 3259 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3260 if (Env == "device") 3261 DefaultCC = llvm::CallingConv::PTX_Device; 3262 else 3263 DefaultCC = llvm::CallingConv::PTX_Kernel; 3264 } 3265 FI.setEffectiveCallingConvention(DefaultCC); 3266 3267} 3268 3269llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3270 CodeGenFunction &CFG) const { 3271 llvm_unreachable("NVPTX does not support varargs"); 3272} 3273 3274void NVPTXTargetCodeGenInfo:: 3275SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3276 CodeGen::CodeGenModule &M) const{ 3277 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3278 if (!FD) return; 3279 3280 llvm::Function *F = cast<llvm::Function>(GV); 3281 3282 // Perform special handling in OpenCL mode 3283 if (M.getLangOpts().OpenCL) { 3284 // Use OpenCL function attributes to set proper calling conventions 3285 // By default, all functions are device functions 3286 if (FD->hasAttr<OpenCLKernelAttr>()) { 3287 // OpenCL __kernel functions get a kernel calling convention 3288 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3289 // And kernel functions are not subject to inlining 3290 F->addFnAttr(llvm::Attributes::NoInline); 3291 } 3292 } 3293 3294 // Perform special handling in CUDA mode. 3295 if (M.getLangOpts().CUDA) { 3296 // CUDA __global__ functions get a kernel calling convention. Since 3297 // __global__ functions cannot be called from the device, we do not 3298 // need to set the noinline attribute. 3299 if (FD->getAttr<CUDAGlobalAttr>()) 3300 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3301 } 3302} 3303 3304} 3305 3306//===----------------------------------------------------------------------===// 3307// MBlaze ABI Implementation 3308//===----------------------------------------------------------------------===// 3309 3310namespace { 3311 3312class MBlazeABIInfo : public ABIInfo { 3313public: 3314 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3315 3316 bool isPromotableIntegerType(QualType Ty) const; 3317 3318 ABIArgInfo classifyReturnType(QualType RetTy) const; 3319 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3320 3321 virtual void computeInfo(CGFunctionInfo &FI) const { 3322 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3323 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3324 it != ie; ++it) 3325 it->info = classifyArgumentType(it->type); 3326 } 3327 3328 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3329 CodeGenFunction &CGF) const; 3330}; 3331 3332class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3333public: 3334 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3335 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3336 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3337 CodeGen::CodeGenModule &M) const; 3338}; 3339 3340} 3341 3342bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3343 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3344 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3345 switch (BT->getKind()) { 3346 case BuiltinType::Bool: 3347 case BuiltinType::Char_S: 3348 case BuiltinType::Char_U: 3349 case BuiltinType::SChar: 3350 case BuiltinType::UChar: 3351 case BuiltinType::Short: 3352 case BuiltinType::UShort: 3353 return true; 3354 default: 3355 return false; 3356 } 3357 return false; 3358} 3359 3360llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3361 CodeGenFunction &CGF) const { 3362 // FIXME: Implement 3363 return 0; 3364} 3365 3366 3367ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3368 if (RetTy->isVoidType()) 3369 return ABIArgInfo::getIgnore(); 3370 if (isAggregateTypeForABI(RetTy)) 3371 return ABIArgInfo::getIndirect(0); 3372 3373 return (isPromotableIntegerType(RetTy) ? 3374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3375} 3376 3377ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3378 if (isAggregateTypeForABI(Ty)) 3379 return ABIArgInfo::getIndirect(0); 3380 3381 return (isPromotableIntegerType(Ty) ? 3382 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3383} 3384 3385void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3386 llvm::GlobalValue *GV, 3387 CodeGen::CodeGenModule &M) 3388 const { 3389 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3390 if (!FD) return; 3391 3392 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3393 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3394 CC = llvm::CallingConv::MBLAZE_INTR; 3395 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3396 CC = llvm::CallingConv::MBLAZE_SVOL; 3397 3398 if (CC != llvm::CallingConv::C) { 3399 // Handle 'interrupt_handler' attribute: 3400 llvm::Function *F = cast<llvm::Function>(GV); 3401 3402 // Step 1: Set ISR calling convention. 3403 F->setCallingConv(CC); 3404 3405 // Step 2: Add attributes goodness. 3406 F->addFnAttr(llvm::Attributes::NoInline); 3407 } 3408 3409 // Step 3: Emit _interrupt_handler alias. 3410 if (CC == llvm::CallingConv::MBLAZE_INTR) 3411 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3412 "_interrupt_handler", GV, &M.getModule()); 3413} 3414 3415 3416//===----------------------------------------------------------------------===// 3417// MSP430 ABI Implementation 3418//===----------------------------------------------------------------------===// 3419 3420namespace { 3421 3422class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3423public: 3424 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3425 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3426 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3427 CodeGen::CodeGenModule &M) const; 3428}; 3429 3430} 3431 3432void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3433 llvm::GlobalValue *GV, 3434 CodeGen::CodeGenModule &M) const { 3435 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3436 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3437 // Handle 'interrupt' attribute: 3438 llvm::Function *F = cast<llvm::Function>(GV); 3439 3440 // Step 1: Set ISR calling convention. 3441 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3442 3443 // Step 2: Add attributes goodness. 3444 F->addFnAttr(llvm::Attributes::NoInline); 3445 3446 // Step 3: Emit ISR vector alias. 3447 unsigned Num = attr->getNumber() + 0xffe0; 3448 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3449 "vector_" + Twine::utohexstr(Num), 3450 GV, &M.getModule()); 3451 } 3452 } 3453} 3454 3455//===----------------------------------------------------------------------===// 3456// MIPS ABI Implementation. This works for both little-endian and 3457// big-endian variants. 3458//===----------------------------------------------------------------------===// 3459 3460namespace { 3461class MipsABIInfo : public ABIInfo { 3462 bool IsO32; 3463 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3464 void CoerceToIntArgs(uint64_t TySize, 3465 SmallVector<llvm::Type*, 8> &ArgList) const; 3466 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3467 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3468 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3469public: 3470 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3471 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3472 StackAlignInBytes(IsO32 ? 8 : 16) {} 3473 3474 ABIArgInfo classifyReturnType(QualType RetTy) const; 3475 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3476 virtual void computeInfo(CGFunctionInfo &FI) const; 3477 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3478 CodeGenFunction &CGF) const; 3479}; 3480 3481class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3482 unsigned SizeOfUnwindException; 3483public: 3484 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3485 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3486 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3487 3488 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3489 return 29; 3490 } 3491 3492 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3493 llvm::Value *Address) const; 3494 3495 unsigned getSizeOfUnwindException() const { 3496 return SizeOfUnwindException; 3497 } 3498}; 3499} 3500 3501void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3502 SmallVector<llvm::Type*, 8> &ArgList) const { 3503 llvm::IntegerType *IntTy = 3504 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3505 3506 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3507 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3508 ArgList.push_back(IntTy); 3509 3510 // If necessary, add one more integer type to ArgList. 3511 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3512 3513 if (R) 3514 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3515} 3516 3517// In N32/64, an aligned double precision floating point field is passed in 3518// a register. 3519llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3520 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3521 3522 if (IsO32) { 3523 CoerceToIntArgs(TySize, ArgList); 3524 return llvm::StructType::get(getVMContext(), ArgList); 3525 } 3526 3527 if (Ty->isComplexType()) 3528 return CGT.ConvertType(Ty); 3529 3530 const RecordType *RT = Ty->getAs<RecordType>(); 3531 3532 // Unions/vectors are passed in integer registers. 3533 if (!RT || !RT->isStructureOrClassType()) { 3534 CoerceToIntArgs(TySize, ArgList); 3535 return llvm::StructType::get(getVMContext(), ArgList); 3536 } 3537 3538 const RecordDecl *RD = RT->getDecl(); 3539 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3540 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3541 3542 uint64_t LastOffset = 0; 3543 unsigned idx = 0; 3544 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3545 3546 // Iterate over fields in the struct/class and check if there are any aligned 3547 // double fields. 3548 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3549 i != e; ++i, ++idx) { 3550 const QualType Ty = i->getType(); 3551 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3552 3553 if (!BT || BT->getKind() != BuiltinType::Double) 3554 continue; 3555 3556 uint64_t Offset = Layout.getFieldOffset(idx); 3557 if (Offset % 64) // Ignore doubles that are not aligned. 3558 continue; 3559 3560 // Add ((Offset - LastOffset) / 64) args of type i64. 3561 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3562 ArgList.push_back(I64); 3563 3564 // Add double type. 3565 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3566 LastOffset = Offset + 64; 3567 } 3568 3569 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3570 ArgList.append(IntArgList.begin(), IntArgList.end()); 3571 3572 return llvm::StructType::get(getVMContext(), ArgList); 3573} 3574 3575llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3576 assert((Offset % MinABIStackAlignInBytes) == 0); 3577 3578 if ((Align - 1) & Offset) 3579 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3580 3581 return 0; 3582} 3583 3584ABIArgInfo 3585MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3586 uint64_t OrigOffset = Offset; 3587 uint64_t TySize = getContext().getTypeSize(Ty); 3588 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3589 3590 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3591 (uint64_t)StackAlignInBytes); 3592 Offset = llvm::RoundUpToAlignment(Offset, Align); 3593 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3594 3595 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3596 // Ignore empty aggregates. 3597 if (TySize == 0) 3598 return ABIArgInfo::getIgnore(); 3599 3600 // Records with non trivial destructors/constructors should not be passed 3601 // by value. 3602 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3603 Offset = OrigOffset + MinABIStackAlignInBytes; 3604 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3605 } 3606 3607 // If we have reached here, aggregates are passed directly by coercing to 3608 // another structure type. Padding is inserted if the offset of the 3609 // aggregate is unaligned. 3610 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3611 getPaddingType(Align, OrigOffset)); 3612 } 3613 3614 // Treat an enum type as its underlying type. 3615 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3616 Ty = EnumTy->getDecl()->getIntegerType(); 3617 3618 if (Ty->isPromotableIntegerType()) 3619 return ABIArgInfo::getExtend(); 3620 3621 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3622} 3623 3624llvm::Type* 3625MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3626 const RecordType *RT = RetTy->getAs<RecordType>(); 3627 SmallVector<llvm::Type*, 8> RTList; 3628 3629 if (RT && RT->isStructureOrClassType()) { 3630 const RecordDecl *RD = RT->getDecl(); 3631 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3632 unsigned FieldCnt = Layout.getFieldCount(); 3633 3634 // N32/64 returns struct/classes in floating point registers if the 3635 // following conditions are met: 3636 // 1. The size of the struct/class is no larger than 128-bit. 3637 // 2. The struct/class has one or two fields all of which are floating 3638 // point types. 3639 // 3. The offset of the first field is zero (this follows what gcc does). 3640 // 3641 // Any other composite results are returned in integer registers. 3642 // 3643 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3644 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3645 for (; b != e; ++b) { 3646 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3647 3648 if (!BT || !BT->isFloatingPoint()) 3649 break; 3650 3651 RTList.push_back(CGT.ConvertType(b->getType())); 3652 } 3653 3654 if (b == e) 3655 return llvm::StructType::get(getVMContext(), RTList, 3656 RD->hasAttr<PackedAttr>()); 3657 3658 RTList.clear(); 3659 } 3660 } 3661 3662 CoerceToIntArgs(Size, RTList); 3663 return llvm::StructType::get(getVMContext(), RTList); 3664} 3665 3666ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3667 uint64_t Size = getContext().getTypeSize(RetTy); 3668 3669 if (RetTy->isVoidType() || Size == 0) 3670 return ABIArgInfo::getIgnore(); 3671 3672 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3673 if (Size <= 128) { 3674 if (RetTy->isAnyComplexType()) 3675 return ABIArgInfo::getDirect(); 3676 3677 // O32 returns integer vectors in registers. 3678 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3679 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3680 3681 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3682 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3683 } 3684 3685 return ABIArgInfo::getIndirect(0); 3686 } 3687 3688 // Treat an enum type as its underlying type. 3689 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3690 RetTy = EnumTy->getDecl()->getIntegerType(); 3691 3692 return (RetTy->isPromotableIntegerType() ? 3693 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3694} 3695 3696void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3697 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3698 RetInfo = classifyReturnType(FI.getReturnType()); 3699 3700 // Check if a pointer to an aggregate is passed as a hidden argument. 3701 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3702 3703 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3704 it != ie; ++it) 3705 it->info = classifyArgumentType(it->type, Offset); 3706} 3707 3708llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3709 CodeGenFunction &CGF) const { 3710 llvm::Type *BP = CGF.Int8PtrTy; 3711 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3712 3713 CGBuilderTy &Builder = CGF.Builder; 3714 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3715 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3716 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3717 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3718 llvm::Value *AddrTyped; 3719 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3720 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3721 3722 if (TypeAlign > MinABIStackAlignInBytes) { 3723 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3724 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3725 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3726 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3727 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3728 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3729 } 3730 else 3731 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3732 3733 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3734 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3735 uint64_t Offset = 3736 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3737 llvm::Value *NextAddr = 3738 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3739 "ap.next"); 3740 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3741 3742 return AddrTyped; 3743} 3744 3745bool 3746MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3747 llvm::Value *Address) const { 3748 // This information comes from gcc's implementation, which seems to 3749 // as canonical as it gets. 3750 3751 // Everything on MIPS is 4 bytes. Double-precision FP registers 3752 // are aliased to pairs of single-precision FP registers. 3753 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3754 3755 // 0-31 are the general purpose registers, $0 - $31. 3756 // 32-63 are the floating-point registers, $f0 - $f31. 3757 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3758 // 66 is the (notional, I think) register for signal-handler return. 3759 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3760 3761 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3762 // They are one bit wide and ignored here. 3763 3764 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3765 // (coprocessor 1 is the FP unit) 3766 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3767 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3768 // 176-181 are the DSP accumulator registers. 3769 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3770 return false; 3771} 3772 3773//===----------------------------------------------------------------------===// 3774// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3775// Currently subclassed only to implement custom OpenCL C function attribute 3776// handling. 3777//===----------------------------------------------------------------------===// 3778 3779namespace { 3780 3781class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3782public: 3783 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3784 : DefaultTargetCodeGenInfo(CGT) {} 3785 3786 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3787 CodeGen::CodeGenModule &M) const; 3788}; 3789 3790void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3791 llvm::GlobalValue *GV, 3792 CodeGen::CodeGenModule &M) const { 3793 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3794 if (!FD) return; 3795 3796 llvm::Function *F = cast<llvm::Function>(GV); 3797 3798 if (M.getLangOpts().OpenCL) { 3799 if (FD->hasAttr<OpenCLKernelAttr>()) { 3800 // OpenCL C Kernel functions are not subject to inlining 3801 F->addFnAttr(llvm::Attributes::NoInline); 3802 3803 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3804 3805 // Convert the reqd_work_group_size() attributes to metadata. 3806 llvm::LLVMContext &Context = F->getContext(); 3807 llvm::NamedMDNode *OpenCLMetadata = 3808 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3809 3810 SmallVector<llvm::Value*, 5> Operands; 3811 Operands.push_back(F); 3812 3813 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3814 llvm::APInt(32, 3815 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3816 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3817 llvm::APInt(32, 3818 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3819 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3820 llvm::APInt(32, 3821 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3822 3823 // Add a boolean constant operand for "required" (true) or "hint" (false) 3824 // for implementing the work_group_size_hint attr later. Currently 3825 // always true as the hint is not yet implemented. 3826 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3827 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3828 } 3829 } 3830 } 3831} 3832 3833} 3834 3835//===----------------------------------------------------------------------===// 3836// Hexagon ABI Implementation 3837//===----------------------------------------------------------------------===// 3838 3839namespace { 3840 3841class HexagonABIInfo : public ABIInfo { 3842 3843 3844public: 3845 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3846 3847private: 3848 3849 ABIArgInfo classifyReturnType(QualType RetTy) const; 3850 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3851 3852 virtual void computeInfo(CGFunctionInfo &FI) const; 3853 3854 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3855 CodeGenFunction &CGF) const; 3856}; 3857 3858class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 3859public: 3860 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 3861 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 3862 3863 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3864 return 29; 3865 } 3866}; 3867 3868} 3869 3870void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 3871 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3872 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3873 it != ie; ++it) 3874 it->info = classifyArgumentType(it->type); 3875} 3876 3877ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 3878 if (!isAggregateTypeForABI(Ty)) { 3879 // Treat an enum type as its underlying type. 3880 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3881 Ty = EnumTy->getDecl()->getIntegerType(); 3882 3883 return (Ty->isPromotableIntegerType() ? 3884 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3885 } 3886 3887 // Ignore empty records. 3888 if (isEmptyRecord(getContext(), Ty, true)) 3889 return ABIArgInfo::getIgnore(); 3890 3891 // Structures with either a non-trivial destructor or a non-trivial 3892 // copy constructor are always indirect. 3893 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3894 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3895 3896 uint64_t Size = getContext().getTypeSize(Ty); 3897 if (Size > 64) 3898 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3899 // Pass in the smallest viable integer type. 3900 else if (Size > 32) 3901 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3902 else if (Size > 16) 3903 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3904 else if (Size > 8) 3905 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3906 else 3907 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3908} 3909 3910ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 3911 if (RetTy->isVoidType()) 3912 return ABIArgInfo::getIgnore(); 3913 3914 // Large vector types should be returned via memory. 3915 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 3916 return ABIArgInfo::getIndirect(0); 3917 3918 if (!isAggregateTypeForABI(RetTy)) { 3919 // Treat an enum type as its underlying type. 3920 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3921 RetTy = EnumTy->getDecl()->getIntegerType(); 3922 3923 return (RetTy->isPromotableIntegerType() ? 3924 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3925 } 3926 3927 // Structures with either a non-trivial destructor or a non-trivial 3928 // copy constructor are always indirect. 3929 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3930 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3931 3932 if (isEmptyRecord(getContext(), RetTy, true)) 3933 return ABIArgInfo::getIgnore(); 3934 3935 // Aggregates <= 8 bytes are returned in r0; other aggregates 3936 // are returned indirectly. 3937 uint64_t Size = getContext().getTypeSize(RetTy); 3938 if (Size <= 64) { 3939 // Return in the smallest viable integer type. 3940 if (Size <= 8) 3941 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3942 if (Size <= 16) 3943 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3944 if (Size <= 32) 3945 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3946 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3947 } 3948 3949 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3950} 3951 3952llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3953 CodeGenFunction &CGF) const { 3954 // FIXME: Need to handle alignment 3955 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3956 3957 CGBuilderTy &Builder = CGF.Builder; 3958 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3959 "ap"); 3960 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3961 llvm::Type *PTy = 3962 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3963 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3964 3965 uint64_t Offset = 3966 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 3967 llvm::Value *NextAddr = 3968 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3969 "ap.next"); 3970 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3971 3972 return AddrTyped; 3973} 3974 3975 3976const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3977 if (TheTargetCodeGenInfo) 3978 return *TheTargetCodeGenInfo; 3979 3980 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 3981 switch (Triple.getArch()) { 3982 default: 3983 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3984 3985 case llvm::Triple::le32: 3986 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 3987 case llvm::Triple::mips: 3988 case llvm::Triple::mipsel: 3989 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 3990 3991 case llvm::Triple::mips64: 3992 case llvm::Triple::mips64el: 3993 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 3994 3995 case llvm::Triple::arm: 3996 case llvm::Triple::thumb: 3997 { 3998 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3999 4000 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4001 Kind = ARMABIInfo::APCS; 4002 else if (CodeGenOpts.FloatABI == "hard") 4003 Kind = ARMABIInfo::AAPCS_VFP; 4004 4005 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 4006 } 4007 4008 case llvm::Triple::ppc: 4009 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4010 case llvm::Triple::ppc64: 4011 if (Triple.isOSBinFormatELF()) 4012 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4013 else 4014 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4015 4016 case llvm::Triple::nvptx: 4017 case llvm::Triple::nvptx64: 4018 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4019 4020 case llvm::Triple::mblaze: 4021 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4022 4023 case llvm::Triple::msp430: 4024 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4025 4026 case llvm::Triple::tce: 4027 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4028 4029 case llvm::Triple::x86: { 4030 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4031 4032 if (Triple.isOSDarwin()) 4033 return *(TheTargetCodeGenInfo = 4034 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4035 CodeGenOpts.NumRegisterParameters)); 4036 4037 switch (Triple.getOS()) { 4038 case llvm::Triple::Cygwin: 4039 case llvm::Triple::MinGW32: 4040 case llvm::Triple::AuroraUX: 4041 case llvm::Triple::DragonFly: 4042 case llvm::Triple::FreeBSD: 4043 case llvm::Triple::OpenBSD: 4044 case llvm::Triple::Bitrig: 4045 return *(TheTargetCodeGenInfo = 4046 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4047 false, 4048 CodeGenOpts.NumRegisterParameters)); 4049 4050 case llvm::Triple::Win32: 4051 return *(TheTargetCodeGenInfo = 4052 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4053 CodeGenOpts.NumRegisterParameters)); 4054 4055 default: 4056 return *(TheTargetCodeGenInfo = 4057 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4058 false, 4059 CodeGenOpts.NumRegisterParameters)); 4060 } 4061 } 4062 4063 case llvm::Triple::x86_64: { 4064 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4065 4066 switch (Triple.getOS()) { 4067 case llvm::Triple::Win32: 4068 case llvm::Triple::MinGW32: 4069 case llvm::Triple::Cygwin: 4070 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4071 default: 4072 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4073 HasAVX)); 4074 } 4075 } 4076 case llvm::Triple::hexagon: 4077 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4078 } 4079} 4080