TargetInfo.cpp revision 5f14fcbd45870585a136ae735d29d0e085c0d7f8
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "clang/AST/RecordLayout.h" 19#include "clang/Frontend/CodeGenOptions.h" 20#include "llvm/Type.h" 21#include "llvm/DataLayout.h" 22#include "llvm/ADT/Triple.h" 23#include "llvm/Support/raw_ostream.h" 24using namespace clang; 25using namespace CodeGen; 26 27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37} 38 39static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42} 43 44ABIInfo::~ABIInfo() {} 45 46ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48} 49 50llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52} 53 54const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56} 57 58 59void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86} 87 88TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90// If someone can figure out a general rule for this, that would be great. 91// It's probably just doomed to be platform-dependent, though. 92unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99} 100 101bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108} 109 110static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112/// isEmptyField - Return true iff a the field is "empty", that is it 113/// is an unnamed bit-field or an (array of) empty record(s). 114static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142} 143 144/// isEmptyRecord - Return true iff a structure contains only empty 145/// fields. Note that a structure with a flexible array member is not 146/// considered empty. 147static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167} 168 169/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170/// a non-trivial destructor or a non-trivial copy constructor. 171static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177} 178 179/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180/// a record type with either a non-trivial destructor or a non-trivial copy 181/// constructor. 182static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188} 189 190/// isSingleElementStruct - Determine if a structure is a "single 191/// element struct", i.e. it has exactly one non-empty field or 192/// exactly one field which is itself a single element 193/// struct. Structures with flexible array members are never 194/// considered single element structs. 195/// 196/// \return The field declaration for the single non-empty field, if 197/// it exists. 198static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266} 267 268static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276} 277 278/// canExpandIndirectArgument - Test whether an argument type which is to be 279/// passed indirectly (on the stack) would have the equivalent layout if it was 280/// expanded into separate arguments. If so, we prefer to do the latter to avoid 281/// inhibiting optimizations. 282/// 283// FIXME: This predicate is missing many cases, currently it just follows 284// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285// should probably make this smarter, or better yet make the LLVM backend 286// capable of handling it. 287static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323} 324 325namespace { 326/// DefaultABIInfo - The default implementation for ABI specific 327/// details. This implementation provides information which results in 328/// self-consistent and sensible LLVM IR generation, but does not 329/// conform to any particular ABI. 330class DefaultABIInfo : public ABIInfo { 331public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346}; 347 348class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352}; 353 354llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357} 358 359ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375} 376 377ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390} 391 392//===----------------------------------------------------------------------===// 393// le32/PNaCl bitcode ABI Implementation 394//===----------------------------------------------------------------------===// 395 396class PNaClABIInfo : public ABIInfo { 397 public: 398 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 399 400 ABIArgInfo classifyReturnType(QualType RetTy) const; 401 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 402 403 virtual void computeInfo(CGFunctionInfo &FI) const; 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406}; 407 408class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 409 public: 410 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 411 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 412}; 413 414void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 415 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 416 417 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 418 419 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 420 it != ie; ++it) 421 it->info = classifyArgumentType(it->type, FreeRegs); 422 } 423 424llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 425 CodeGenFunction &CGF) const { 426 return 0; 427} 428 429ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 430 unsigned &FreeRegs) const { 431 if (isAggregateTypeForABI(Ty)) { 432 // Records with non trivial destructors/constructors should not be passed 433 // by value. 434 FreeRegs = 0; 435 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 436 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 437 438 return ABIArgInfo::getIndirect(0); 439 } 440 441 // Treat an enum type as its underlying type. 442 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 443 Ty = EnumTy->getDecl()->getIntegerType(); 444 445 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 446 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 447 448 // Regparm regs hold 32 bits. 449 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 450 if (SizeInRegs == 0) return BaseInfo; 451 if (SizeInRegs > FreeRegs) { 452 FreeRegs = 0; 453 return BaseInfo; 454 } 455 FreeRegs -= SizeInRegs; 456 return BaseInfo.isDirect() ? 457 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 458 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 459} 460 461ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 462 if (RetTy->isVoidType()) 463 return ABIArgInfo::getIgnore(); 464 465 if (isAggregateTypeForABI(RetTy)) 466 return ABIArgInfo::getIndirect(0); 467 468 // Treat an enum type as its underlying type. 469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 470 RetTy = EnumTy->getDecl()->getIntegerType(); 471 472 return (RetTy->isPromotableIntegerType() ? 473 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 474} 475 476/// UseX86_MMXType - Return true if this is an MMX type that should use the 477/// special x86_mmx type. 478bool UseX86_MMXType(llvm::Type *IRType) { 479 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 480 // special x86_mmx type. 481 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 482 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 483 IRType->getScalarSizeInBits() != 64; 484} 485 486static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 487 StringRef Constraint, 488 llvm::Type* Ty) { 489 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 490 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 491 return Ty; 492} 493 494//===----------------------------------------------------------------------===// 495// X86-32 ABI Implementation 496//===----------------------------------------------------------------------===// 497 498/// X86_32ABIInfo - The X86-32 ABI information. 499class X86_32ABIInfo : public ABIInfo { 500 enum Class { 501 Integer, 502 Float 503 }; 504 505 static const unsigned MinABIStackAlignInBytes = 4; 506 507 bool IsDarwinVectorABI; 508 bool IsSmallStructInRegABI; 509 bool IsMMXDisabled; 510 bool IsWin32FloatStructABI; 511 unsigned DefaultNumRegisterParameters; 512 513 static bool isRegisterSize(unsigned Size) { 514 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 515 } 516 517 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 518 unsigned callingConvention); 519 520 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 521 /// such that the argument will be passed in memory. 522 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 523 unsigned &FreeRegs) const; 524 525 /// \brief Return the alignment to use for the given type on the stack. 526 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 527 528 Class classify(QualType Ty) const; 529 ABIArgInfo classifyReturnType(QualType RetTy, 530 unsigned callingConvention) const; 531 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 532 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs) const; 533 534public: 535 536 virtual void computeInfo(CGFunctionInfo &FI) const; 537 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 538 CodeGenFunction &CGF) const; 539 540 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 541 unsigned r) 542 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 543 IsMMXDisabled(m), IsWin32FloatStructABI(w), 544 DefaultNumRegisterParameters(r) {} 545}; 546 547class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 548public: 549 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 550 bool d, bool p, bool m, bool w, unsigned r) 551 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 552 553 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 554 CodeGen::CodeGenModule &CGM) const; 555 556 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 557 // Darwin uses different dwarf register numbers for EH. 558 if (CGM.isTargetDarwin()) return 5; 559 560 return 4; 561 } 562 563 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 564 llvm::Value *Address) const; 565 566 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 567 StringRef Constraint, 568 llvm::Type* Ty) const { 569 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 570 } 571 572}; 573 574} 575 576/// shouldReturnTypeInRegister - Determine if the given type should be 577/// passed in a register (for the Darwin ABI). 578bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 579 ASTContext &Context, 580 unsigned callingConvention) { 581 uint64_t Size = Context.getTypeSize(Ty); 582 583 // Type must be register sized. 584 if (!isRegisterSize(Size)) 585 return false; 586 587 if (Ty->isVectorType()) { 588 // 64- and 128- bit vectors inside structures are not returned in 589 // registers. 590 if (Size == 64 || Size == 128) 591 return false; 592 593 return true; 594 } 595 596 // If this is a builtin, pointer, enum, complex type, member pointer, or 597 // member function pointer it is ok. 598 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 599 Ty->isAnyComplexType() || Ty->isEnumeralType() || 600 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 601 return true; 602 603 // Arrays are treated like records. 604 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 605 return shouldReturnTypeInRegister(AT->getElementType(), Context, 606 callingConvention); 607 608 // Otherwise, it must be a record type. 609 const RecordType *RT = Ty->getAs<RecordType>(); 610 if (!RT) return false; 611 612 // FIXME: Traverse bases here too. 613 614 // For thiscall conventions, structures will never be returned in 615 // a register. This is for compatibility with the MSVC ABI 616 if (callingConvention == llvm::CallingConv::X86_ThisCall && 617 RT->isStructureType()) { 618 return false; 619 } 620 621 // Structure types are passed in register if all fields would be 622 // passed in a register. 623 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 624 e = RT->getDecl()->field_end(); i != e; ++i) { 625 const FieldDecl *FD = *i; 626 627 // Empty fields are ignored. 628 if (isEmptyField(Context, FD, true)) 629 continue; 630 631 // Check fields recursively. 632 if (!shouldReturnTypeInRegister(FD->getType(), Context, 633 callingConvention)) 634 return false; 635 } 636 return true; 637} 638 639ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 640 unsigned callingConvention) const { 641 if (RetTy->isVoidType()) 642 return ABIArgInfo::getIgnore(); 643 644 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 645 // On Darwin, some vectors are returned in registers. 646 if (IsDarwinVectorABI) { 647 uint64_t Size = getContext().getTypeSize(RetTy); 648 649 // 128-bit vectors are a special case; they are returned in 650 // registers and we need to make sure to pick a type the LLVM 651 // backend will like. 652 if (Size == 128) 653 return ABIArgInfo::getDirect(llvm::VectorType::get( 654 llvm::Type::getInt64Ty(getVMContext()), 2)); 655 656 // Always return in register if it fits in a general purpose 657 // register, or if it is 64 bits and has a single element. 658 if ((Size == 8 || Size == 16 || Size == 32) || 659 (Size == 64 && VT->getNumElements() == 1)) 660 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 661 Size)); 662 663 return ABIArgInfo::getIndirect(0); 664 } 665 666 return ABIArgInfo::getDirect(); 667 } 668 669 if (isAggregateTypeForABI(RetTy)) { 670 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 671 // Structures with either a non-trivial destructor or a non-trivial 672 // copy constructor are always indirect. 673 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 674 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 675 676 // Structures with flexible arrays are always indirect. 677 if (RT->getDecl()->hasFlexibleArrayMember()) 678 return ABIArgInfo::getIndirect(0); 679 } 680 681 // If specified, structs and unions are always indirect. 682 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 683 return ABIArgInfo::getIndirect(0); 684 685 // Small structures which are register sized are generally returned 686 // in a register. 687 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 688 callingConvention)) { 689 uint64_t Size = getContext().getTypeSize(RetTy); 690 691 // As a special-case, if the struct is a "single-element" struct, and 692 // the field is of type "float" or "double", return it in a 693 // floating-point register. (MSVC does not apply this special case.) 694 // We apply a similar transformation for pointer types to improve the 695 // quality of the generated IR. 696 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 697 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 698 || SeltTy->hasPointerRepresentation()) 699 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 700 701 // FIXME: We should be able to narrow this integer in cases with dead 702 // padding. 703 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 704 } 705 706 return ABIArgInfo::getIndirect(0); 707 } 708 709 // Treat an enum type as its underlying type. 710 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 711 RetTy = EnumTy->getDecl()->getIntegerType(); 712 713 return (RetTy->isPromotableIntegerType() ? 714 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 715} 716 717static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 718 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 719} 720 721static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 722 const RecordType *RT = Ty->getAs<RecordType>(); 723 if (!RT) 724 return 0; 725 const RecordDecl *RD = RT->getDecl(); 726 727 // If this is a C++ record, check the bases first. 728 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 729 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 730 e = CXXRD->bases_end(); i != e; ++i) 731 if (!isRecordWithSSEVectorType(Context, i->getType())) 732 return false; 733 734 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 735 i != e; ++i) { 736 QualType FT = i->getType(); 737 738 if (isSSEVectorType(Context, FT)) 739 return true; 740 741 if (isRecordWithSSEVectorType(Context, FT)) 742 return true; 743 } 744 745 return false; 746} 747 748unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 749 unsigned Align) const { 750 // Otherwise, if the alignment is less than or equal to the minimum ABI 751 // alignment, just use the default; the backend will handle this. 752 if (Align <= MinABIStackAlignInBytes) 753 return 0; // Use default alignment. 754 755 // On non-Darwin, the stack type alignment is always 4. 756 if (!IsDarwinVectorABI) { 757 // Set explicit alignment, since we may need to realign the top. 758 return MinABIStackAlignInBytes; 759 } 760 761 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 762 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 763 isRecordWithSSEVectorType(getContext(), Ty))) 764 return 16; 765 766 return MinABIStackAlignInBytes; 767} 768 769ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 770 unsigned &FreeRegs) const { 771 if (!ByVal) { 772 if (FreeRegs) { 773 --FreeRegs; // Non byval indirects just use one pointer. 774 return ABIArgInfo::getIndirectInReg(0, false); 775 } 776 return ABIArgInfo::getIndirect(0, false); 777 } 778 779 // Compute the byval alignment. 780 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 781 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 782 if (StackAlign == 0) 783 return ABIArgInfo::getIndirect(4); 784 785 // If the stack alignment is less than the type alignment, realign the 786 // argument. 787 if (StackAlign < TypeAlign) 788 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 789 /*Realign=*/true); 790 791 return ABIArgInfo::getIndirect(StackAlign); 792} 793 794X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 795 const Type *T = isSingleElementStruct(Ty, getContext()); 796 if (!T) 797 T = Ty.getTypePtr(); 798 799 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 800 BuiltinType::Kind K = BT->getKind(); 801 if (K == BuiltinType::Float || K == BuiltinType::Double) 802 return Float; 803 } 804 return Integer; 805} 806 807bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs) const { 808 Class C = classify(Ty); 809 if (C == Float) 810 return false; 811 812 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 813 814 if (SizeInRegs == 0) 815 return false; 816 817 if (SizeInRegs > FreeRegs) { 818 FreeRegs = 0; 819 return false; 820 } 821 822 FreeRegs -= SizeInRegs; 823 return true; 824} 825 826ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 827 unsigned &FreeRegs) const { 828 // FIXME: Set alignment on indirect arguments. 829 if (isAggregateTypeForABI(Ty)) { 830 // Structures with flexible arrays are always indirect. 831 if (const RecordType *RT = Ty->getAs<RecordType>()) { 832 // Structures with either a non-trivial destructor or a non-trivial 833 // copy constructor are always indirect. 834 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 835 return getIndirectResult(Ty, false, FreeRegs); 836 837 if (RT->getDecl()->hasFlexibleArrayMember()) 838 return getIndirectResult(Ty, true, FreeRegs); 839 } 840 841 // Ignore empty structs/unions. 842 if (isEmptyRecord(getContext(), Ty, true)) 843 return ABIArgInfo::getIgnore(); 844 845 if (shouldUseInReg(Ty, FreeRegs)) { 846 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 847 llvm::LLVMContext &LLVMContext = getVMContext(); 848 llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext); 849 SmallVector<llvm::Type*, 3> Elements; 850 for (unsigned I = 0; I < SizeInRegs; ++I) 851 Elements.push_back(Int32); 852 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 853 return ABIArgInfo::getDirectInReg(Result); 854 } 855 856 // Expand small (<= 128-bit) record types when we know that the stack layout 857 // of those arguments will match the struct. This is important because the 858 // LLVM backend isn't smart enough to remove byval, which inhibits many 859 // optimizations. 860 if (getContext().getTypeSize(Ty) <= 4*32 && 861 canExpandIndirectArgument(Ty, getContext())) 862 return ABIArgInfo::getExpand(); 863 864 return getIndirectResult(Ty, true, FreeRegs); 865 } 866 867 if (const VectorType *VT = Ty->getAs<VectorType>()) { 868 // On Darwin, some vectors are passed in memory, we handle this by passing 869 // it as an i8/i16/i32/i64. 870 if (IsDarwinVectorABI) { 871 uint64_t Size = getContext().getTypeSize(Ty); 872 if ((Size == 8 || Size == 16 || Size == 32) || 873 (Size == 64 && VT->getNumElements() == 1)) 874 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 875 Size)); 876 } 877 878 llvm::Type *IRType = CGT.ConvertType(Ty); 879 if (UseX86_MMXType(IRType)) { 880 if (IsMMXDisabled) 881 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 882 64)); 883 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 884 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 885 return AAI; 886 } 887 888 return ABIArgInfo::getDirect(); 889 } 890 891 892 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 893 Ty = EnumTy->getDecl()->getIntegerType(); 894 895 bool InReg = shouldUseInReg(Ty, FreeRegs); 896 897 if (Ty->isPromotableIntegerType()) { 898 if (InReg) 899 return ABIArgInfo::getExtendInReg(); 900 return ABIArgInfo::getExtend(); 901 } 902 if (InReg) 903 return ABIArgInfo::getDirectInReg(); 904 return ABIArgInfo::getDirect(); 905} 906 907void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 908 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 909 FI.getCallingConvention()); 910 911 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 912 DefaultNumRegisterParameters; 913 914 // If the return value is indirect, then the hidden argument is consuming one 915 // integer register. 916 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 917 --FreeRegs; 918 ABIArgInfo &Old = FI.getReturnInfo(); 919 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 920 Old.getIndirectByVal(), 921 Old.getIndirectRealign()); 922 } 923 924 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 925 it != ie; ++it) 926 it->info = classifyArgumentType(it->type, FreeRegs); 927} 928 929llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 930 CodeGenFunction &CGF) const { 931 llvm::Type *BPP = CGF.Int8PtrPtrTy; 932 933 CGBuilderTy &Builder = CGF.Builder; 934 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 935 "ap"); 936 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 937 938 // Compute if the address needs to be aligned 939 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 940 Align = getTypeStackAlignInBytes(Ty, Align); 941 Align = std::max(Align, 4U); 942 if (Align > 4) { 943 // addr = (addr + align - 1) & -align; 944 llvm::Value *Offset = 945 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 946 Addr = CGF.Builder.CreateGEP(Addr, Offset); 947 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 948 CGF.Int32Ty); 949 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 950 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 951 Addr->getType(), 952 "ap.cur.aligned"); 953 } 954 955 llvm::Type *PTy = 956 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 957 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 958 959 uint64_t Offset = 960 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 961 llvm::Value *NextAddr = 962 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 963 "ap.next"); 964 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 965 966 return AddrTyped; 967} 968 969void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 970 llvm::GlobalValue *GV, 971 CodeGen::CodeGenModule &CGM) const { 972 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 973 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 974 // Get the LLVM function. 975 llvm::Function *Fn = cast<llvm::Function>(GV); 976 977 // Now add the 'alignstack' attribute with a value of 16. 978 llvm::AttrBuilder B; 979 B.addStackAlignmentAttr(16); 980 Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, 981 llvm::Attributes::get(CGM.getLLVMContext(), B)); 982 } 983 } 984} 985 986bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 987 CodeGen::CodeGenFunction &CGF, 988 llvm::Value *Address) const { 989 CodeGen::CGBuilderTy &Builder = CGF.Builder; 990 991 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 992 993 // 0-7 are the eight integer registers; the order is different 994 // on Darwin (for EH), but the range is the same. 995 // 8 is %eip. 996 AssignToArrayRange(Builder, Address, Four8, 0, 8); 997 998 if (CGF.CGM.isTargetDarwin()) { 999 // 12-16 are st(0..4). Not sure why we stop at 4. 1000 // These have size 16, which is sizeof(long double) on 1001 // platforms with 8-byte alignment for that type. 1002 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1003 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1004 1005 } else { 1006 // 9 is %eflags, which doesn't get a size on Darwin for some 1007 // reason. 1008 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1009 1010 // 11-16 are st(0..5). Not sure why we stop at 5. 1011 // These have size 12, which is sizeof(long double) on 1012 // platforms with 4-byte alignment for that type. 1013 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1014 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1015 } 1016 1017 return false; 1018} 1019 1020//===----------------------------------------------------------------------===// 1021// X86-64 ABI Implementation 1022//===----------------------------------------------------------------------===// 1023 1024 1025namespace { 1026/// X86_64ABIInfo - The X86_64 ABI information. 1027class X86_64ABIInfo : public ABIInfo { 1028 enum Class { 1029 Integer = 0, 1030 SSE, 1031 SSEUp, 1032 X87, 1033 X87Up, 1034 ComplexX87, 1035 NoClass, 1036 Memory 1037 }; 1038 1039 /// merge - Implement the X86_64 ABI merging algorithm. 1040 /// 1041 /// Merge an accumulating classification \arg Accum with a field 1042 /// classification \arg Field. 1043 /// 1044 /// \param Accum - The accumulating classification. This should 1045 /// always be either NoClass or the result of a previous merge 1046 /// call. In addition, this should never be Memory (the caller 1047 /// should just return Memory for the aggregate). 1048 static Class merge(Class Accum, Class Field); 1049 1050 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1051 /// 1052 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1053 /// final MEMORY or SSE classes when necessary. 1054 /// 1055 /// \param AggregateSize - The size of the current aggregate in 1056 /// the classification process. 1057 /// 1058 /// \param Lo - The classification for the parts of the type 1059 /// residing in the low word of the containing object. 1060 /// 1061 /// \param Hi - The classification for the parts of the type 1062 /// residing in the higher words of the containing object. 1063 /// 1064 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1065 1066 /// classify - Determine the x86_64 register classes in which the 1067 /// given type T should be passed. 1068 /// 1069 /// \param Lo - The classification for the parts of the type 1070 /// residing in the low word of the containing object. 1071 /// 1072 /// \param Hi - The classification for the parts of the type 1073 /// residing in the high word of the containing object. 1074 /// 1075 /// \param OffsetBase - The bit offset of this type in the 1076 /// containing object. Some parameters are classified different 1077 /// depending on whether they straddle an eightbyte boundary. 1078 /// 1079 /// If a word is unused its result will be NoClass; if a type should 1080 /// be passed in Memory then at least the classification of \arg Lo 1081 /// will be Memory. 1082 /// 1083 /// The \arg Lo class will be NoClass iff the argument is ignored. 1084 /// 1085 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1086 /// also be ComplexX87. 1087 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1088 1089 llvm::Type *GetByteVectorType(QualType Ty) const; 1090 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1091 unsigned IROffset, QualType SourceTy, 1092 unsigned SourceOffset) const; 1093 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1094 unsigned IROffset, QualType SourceTy, 1095 unsigned SourceOffset) const; 1096 1097 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1098 /// such that the argument will be returned in memory. 1099 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1100 1101 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1102 /// such that the argument will be passed in memory. 1103 /// 1104 /// \param freeIntRegs - The number of free integer registers remaining 1105 /// available. 1106 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1107 1108 ABIArgInfo classifyReturnType(QualType RetTy) const; 1109 1110 ABIArgInfo classifyArgumentType(QualType Ty, 1111 unsigned freeIntRegs, 1112 unsigned &neededInt, 1113 unsigned &neededSSE) const; 1114 1115 bool IsIllegalVectorType(QualType Ty) const; 1116 1117 /// The 0.98 ABI revision clarified a lot of ambiguities, 1118 /// unfortunately in ways that were not always consistent with 1119 /// certain previous compilers. In particular, platforms which 1120 /// required strict binary compatibility with older versions of GCC 1121 /// may need to exempt themselves. 1122 bool honorsRevision0_98() const { 1123 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1124 } 1125 1126 bool HasAVX; 1127 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1128 // 64-bit hardware. 1129 bool Has64BitPointers; 1130 1131public: 1132 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1133 ABIInfo(CGT), HasAVX(hasavx), 1134 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1135 } 1136 1137 bool isPassedUsingAVXType(QualType type) const { 1138 unsigned neededInt, neededSSE; 1139 // The freeIntRegs argument doesn't matter here. 1140 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1141 if (info.isDirect()) { 1142 llvm::Type *ty = info.getCoerceToType(); 1143 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1144 return (vectorTy->getBitWidth() > 128); 1145 } 1146 return false; 1147 } 1148 1149 virtual void computeInfo(CGFunctionInfo &FI) const; 1150 1151 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1152 CodeGenFunction &CGF) const; 1153}; 1154 1155/// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1156class WinX86_64ABIInfo : public ABIInfo { 1157 1158 ABIArgInfo classify(QualType Ty) const; 1159 1160public: 1161 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1162 1163 virtual void computeInfo(CGFunctionInfo &FI) const; 1164 1165 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1166 CodeGenFunction &CGF) const; 1167}; 1168 1169class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1170public: 1171 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1172 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1173 1174 const X86_64ABIInfo &getABIInfo() const { 1175 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1176 } 1177 1178 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1179 return 7; 1180 } 1181 1182 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1183 llvm::Value *Address) const { 1184 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1185 1186 // 0-15 are the 16 integer registers. 1187 // 16 is %rip. 1188 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1189 return false; 1190 } 1191 1192 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1193 StringRef Constraint, 1194 llvm::Type* Ty) const { 1195 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1196 } 1197 1198 bool isNoProtoCallVariadic(const CallArgList &args, 1199 const FunctionNoProtoType *fnType) const { 1200 // The default CC on x86-64 sets %al to the number of SSA 1201 // registers used, and GCC sets this when calling an unprototyped 1202 // function, so we override the default behavior. However, don't do 1203 // that when AVX types are involved: the ABI explicitly states it is 1204 // undefined, and it doesn't work in practice because of how the ABI 1205 // defines varargs anyway. 1206 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1207 bool HasAVXType = false; 1208 for (CallArgList::const_iterator 1209 it = args.begin(), ie = args.end(); it != ie; ++it) { 1210 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1211 HasAVXType = true; 1212 break; 1213 } 1214 } 1215 1216 if (!HasAVXType) 1217 return true; 1218 } 1219 1220 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1221 } 1222 1223}; 1224 1225class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1226public: 1227 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1228 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1229 1230 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1231 return 7; 1232 } 1233 1234 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1235 llvm::Value *Address) const { 1236 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1237 1238 // 0-15 are the 16 integer registers. 1239 // 16 is %rip. 1240 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1241 return false; 1242 } 1243}; 1244 1245} 1246 1247void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1248 Class &Hi) const { 1249 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1250 // 1251 // (a) If one of the classes is Memory, the whole argument is passed in 1252 // memory. 1253 // 1254 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1255 // memory. 1256 // 1257 // (c) If the size of the aggregate exceeds two eightbytes and the first 1258 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1259 // argument is passed in memory. NOTE: This is necessary to keep the 1260 // ABI working for processors that don't support the __m256 type. 1261 // 1262 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1263 // 1264 // Some of these are enforced by the merging logic. Others can arise 1265 // only with unions; for example: 1266 // union { _Complex double; unsigned; } 1267 // 1268 // Note that clauses (b) and (c) were added in 0.98. 1269 // 1270 if (Hi == Memory) 1271 Lo = Memory; 1272 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1273 Lo = Memory; 1274 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1275 Lo = Memory; 1276 if (Hi == SSEUp && Lo != SSE) 1277 Hi = SSE; 1278} 1279 1280X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1281 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1282 // classified recursively so that always two fields are 1283 // considered. The resulting class is calculated according to 1284 // the classes of the fields in the eightbyte: 1285 // 1286 // (a) If both classes are equal, this is the resulting class. 1287 // 1288 // (b) If one of the classes is NO_CLASS, the resulting class is 1289 // the other class. 1290 // 1291 // (c) If one of the classes is MEMORY, the result is the MEMORY 1292 // class. 1293 // 1294 // (d) If one of the classes is INTEGER, the result is the 1295 // INTEGER. 1296 // 1297 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1298 // MEMORY is used as class. 1299 // 1300 // (f) Otherwise class SSE is used. 1301 1302 // Accum should never be memory (we should have returned) or 1303 // ComplexX87 (because this cannot be passed in a structure). 1304 assert((Accum != Memory && Accum != ComplexX87) && 1305 "Invalid accumulated classification during merge."); 1306 if (Accum == Field || Field == NoClass) 1307 return Accum; 1308 if (Field == Memory) 1309 return Memory; 1310 if (Accum == NoClass) 1311 return Field; 1312 if (Accum == Integer || Field == Integer) 1313 return Integer; 1314 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1315 Accum == X87 || Accum == X87Up) 1316 return Memory; 1317 return SSE; 1318} 1319 1320void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1321 Class &Lo, Class &Hi) const { 1322 // FIXME: This code can be simplified by introducing a simple value class for 1323 // Class pairs with appropriate constructor methods for the various 1324 // situations. 1325 1326 // FIXME: Some of the split computations are wrong; unaligned vectors 1327 // shouldn't be passed in registers for example, so there is no chance they 1328 // can straddle an eightbyte. Verify & simplify. 1329 1330 Lo = Hi = NoClass; 1331 1332 Class &Current = OffsetBase < 64 ? Lo : Hi; 1333 Current = Memory; 1334 1335 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1336 BuiltinType::Kind k = BT->getKind(); 1337 1338 if (k == BuiltinType::Void) { 1339 Current = NoClass; 1340 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1341 Lo = Integer; 1342 Hi = Integer; 1343 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1344 Current = Integer; 1345 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1346 (k == BuiltinType::LongDouble && 1347 getContext().getTargetInfo().getTriple().getOS() == 1348 llvm::Triple::NativeClient)) { 1349 Current = SSE; 1350 } else if (k == BuiltinType::LongDouble) { 1351 Lo = X87; 1352 Hi = X87Up; 1353 } 1354 // FIXME: _Decimal32 and _Decimal64 are SSE. 1355 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1356 return; 1357 } 1358 1359 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1360 // Classify the underlying integer type. 1361 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1362 return; 1363 } 1364 1365 if (Ty->hasPointerRepresentation()) { 1366 Current = Integer; 1367 return; 1368 } 1369 1370 if (Ty->isMemberPointerType()) { 1371 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1372 Lo = Hi = Integer; 1373 else 1374 Current = Integer; 1375 return; 1376 } 1377 1378 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1379 uint64_t Size = getContext().getTypeSize(VT); 1380 if (Size == 32) { 1381 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1382 // float> as integer. 1383 Current = Integer; 1384 1385 // If this type crosses an eightbyte boundary, it should be 1386 // split. 1387 uint64_t EB_Real = (OffsetBase) / 64; 1388 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1389 if (EB_Real != EB_Imag) 1390 Hi = Lo; 1391 } else if (Size == 64) { 1392 // gcc passes <1 x double> in memory. :( 1393 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1394 return; 1395 1396 // gcc passes <1 x long long> as INTEGER. 1397 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1398 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1399 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1400 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1401 Current = Integer; 1402 else 1403 Current = SSE; 1404 1405 // If this type crosses an eightbyte boundary, it should be 1406 // split. 1407 if (OffsetBase && OffsetBase != 64) 1408 Hi = Lo; 1409 } else if (Size == 128 || (HasAVX && Size == 256)) { 1410 // Arguments of 256-bits are split into four eightbyte chunks. The 1411 // least significant one belongs to class SSE and all the others to class 1412 // SSEUP. The original Lo and Hi design considers that types can't be 1413 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1414 // This design isn't correct for 256-bits, but since there're no cases 1415 // where the upper parts would need to be inspected, avoid adding 1416 // complexity and just consider Hi to match the 64-256 part. 1417 Lo = SSE; 1418 Hi = SSEUp; 1419 } 1420 return; 1421 } 1422 1423 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1424 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1425 1426 uint64_t Size = getContext().getTypeSize(Ty); 1427 if (ET->isIntegralOrEnumerationType()) { 1428 if (Size <= 64) 1429 Current = Integer; 1430 else if (Size <= 128) 1431 Lo = Hi = Integer; 1432 } else if (ET == getContext().FloatTy) 1433 Current = SSE; 1434 else if (ET == getContext().DoubleTy || 1435 (ET == getContext().LongDoubleTy && 1436 getContext().getTargetInfo().getTriple().getOS() == 1437 llvm::Triple::NativeClient)) 1438 Lo = Hi = SSE; 1439 else if (ET == getContext().LongDoubleTy) 1440 Current = ComplexX87; 1441 1442 // If this complex type crosses an eightbyte boundary then it 1443 // should be split. 1444 uint64_t EB_Real = (OffsetBase) / 64; 1445 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1446 if (Hi == NoClass && EB_Real != EB_Imag) 1447 Hi = Lo; 1448 1449 return; 1450 } 1451 1452 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1453 // Arrays are treated like structures. 1454 1455 uint64_t Size = getContext().getTypeSize(Ty); 1456 1457 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1458 // than four eightbytes, ..., it has class MEMORY. 1459 if (Size > 256) 1460 return; 1461 1462 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1463 // fields, it has class MEMORY. 1464 // 1465 // Only need to check alignment of array base. 1466 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1467 return; 1468 1469 // Otherwise implement simplified merge. We could be smarter about 1470 // this, but it isn't worth it and would be harder to verify. 1471 Current = NoClass; 1472 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1473 uint64_t ArraySize = AT->getSize().getZExtValue(); 1474 1475 // The only case a 256-bit wide vector could be used is when the array 1476 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1477 // to work for sizes wider than 128, early check and fallback to memory. 1478 if (Size > 128 && EltSize != 256) 1479 return; 1480 1481 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1482 Class FieldLo, FieldHi; 1483 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1484 Lo = merge(Lo, FieldLo); 1485 Hi = merge(Hi, FieldHi); 1486 if (Lo == Memory || Hi == Memory) 1487 break; 1488 } 1489 1490 postMerge(Size, Lo, Hi); 1491 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1492 return; 1493 } 1494 1495 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1496 uint64_t Size = getContext().getTypeSize(Ty); 1497 1498 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1499 // than four eightbytes, ..., it has class MEMORY. 1500 if (Size > 256) 1501 return; 1502 1503 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1504 // copy constructor or a non-trivial destructor, it is passed by invisible 1505 // reference. 1506 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1507 return; 1508 1509 const RecordDecl *RD = RT->getDecl(); 1510 1511 // Assume variable sized types are passed in memory. 1512 if (RD->hasFlexibleArrayMember()) 1513 return; 1514 1515 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1516 1517 // Reset Lo class, this will be recomputed. 1518 Current = NoClass; 1519 1520 // If this is a C++ record, classify the bases first. 1521 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1522 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1523 e = CXXRD->bases_end(); i != e; ++i) { 1524 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1525 "Unexpected base class!"); 1526 const CXXRecordDecl *Base = 1527 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1528 1529 // Classify this field. 1530 // 1531 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1532 // single eightbyte, each is classified separately. Each eightbyte gets 1533 // initialized to class NO_CLASS. 1534 Class FieldLo, FieldHi; 1535 uint64_t Offset = 1536 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1537 classify(i->getType(), Offset, FieldLo, FieldHi); 1538 Lo = merge(Lo, FieldLo); 1539 Hi = merge(Hi, FieldHi); 1540 if (Lo == Memory || Hi == Memory) 1541 break; 1542 } 1543 } 1544 1545 // Classify the fields one at a time, merging the results. 1546 unsigned idx = 0; 1547 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1548 i != e; ++i, ++idx) { 1549 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1550 bool BitField = i->isBitField(); 1551 1552 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1553 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1554 // 1555 // The only case a 256-bit wide vector could be used is when the struct 1556 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1557 // to work for sizes wider than 128, early check and fallback to memory. 1558 // 1559 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1560 Lo = Memory; 1561 return; 1562 } 1563 // Note, skip this test for bit-fields, see below. 1564 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1565 Lo = Memory; 1566 return; 1567 } 1568 1569 // Classify this field. 1570 // 1571 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1572 // exceeds a single eightbyte, each is classified 1573 // separately. Each eightbyte gets initialized to class 1574 // NO_CLASS. 1575 Class FieldLo, FieldHi; 1576 1577 // Bit-fields require special handling, they do not force the 1578 // structure to be passed in memory even if unaligned, and 1579 // therefore they can straddle an eightbyte. 1580 if (BitField) { 1581 // Ignore padding bit-fields. 1582 if (i->isUnnamedBitfield()) 1583 continue; 1584 1585 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1586 uint64_t Size = i->getBitWidthValue(getContext()); 1587 1588 uint64_t EB_Lo = Offset / 64; 1589 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1590 FieldLo = FieldHi = NoClass; 1591 if (EB_Lo) { 1592 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1593 FieldLo = NoClass; 1594 FieldHi = Integer; 1595 } else { 1596 FieldLo = Integer; 1597 FieldHi = EB_Hi ? Integer : NoClass; 1598 } 1599 } else 1600 classify(i->getType(), Offset, FieldLo, FieldHi); 1601 Lo = merge(Lo, FieldLo); 1602 Hi = merge(Hi, FieldHi); 1603 if (Lo == Memory || Hi == Memory) 1604 break; 1605 } 1606 1607 postMerge(Size, Lo, Hi); 1608 } 1609} 1610 1611ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1612 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1613 // place naturally. 1614 if (!isAggregateTypeForABI(Ty)) { 1615 // Treat an enum type as its underlying type. 1616 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1617 Ty = EnumTy->getDecl()->getIntegerType(); 1618 1619 return (Ty->isPromotableIntegerType() ? 1620 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1621 } 1622 1623 return ABIArgInfo::getIndirect(0); 1624} 1625 1626bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1627 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1628 uint64_t Size = getContext().getTypeSize(VecTy); 1629 unsigned LargestVector = HasAVX ? 256 : 128; 1630 if (Size <= 64 || Size > LargestVector) 1631 return true; 1632 } 1633 1634 return false; 1635} 1636 1637ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1638 unsigned freeIntRegs) const { 1639 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1640 // place naturally. 1641 // 1642 // This assumption is optimistic, as there could be free registers available 1643 // when we need to pass this argument in memory, and LLVM could try to pass 1644 // the argument in the free register. This does not seem to happen currently, 1645 // but this code would be much safer if we could mark the argument with 1646 // 'onstack'. See PR12193. 1647 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1648 // Treat an enum type as its underlying type. 1649 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1650 Ty = EnumTy->getDecl()->getIntegerType(); 1651 1652 return (Ty->isPromotableIntegerType() ? 1653 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1654 } 1655 1656 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1657 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1658 1659 // Compute the byval alignment. We specify the alignment of the byval in all 1660 // cases so that the mid-level optimizer knows the alignment of the byval. 1661 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1662 1663 // Attempt to avoid passing indirect results using byval when possible. This 1664 // is important for good codegen. 1665 // 1666 // We do this by coercing the value into a scalar type which the backend can 1667 // handle naturally (i.e., without using byval). 1668 // 1669 // For simplicity, we currently only do this when we have exhausted all of the 1670 // free integer registers. Doing this when there are free integer registers 1671 // would require more care, as we would have to ensure that the coerced value 1672 // did not claim the unused register. That would require either reording the 1673 // arguments to the function (so that any subsequent inreg values came first), 1674 // or only doing this optimization when there were no following arguments that 1675 // might be inreg. 1676 // 1677 // We currently expect it to be rare (particularly in well written code) for 1678 // arguments to be passed on the stack when there are still free integer 1679 // registers available (this would typically imply large structs being passed 1680 // by value), so this seems like a fair tradeoff for now. 1681 // 1682 // We can revisit this if the backend grows support for 'onstack' parameter 1683 // attributes. See PR12193. 1684 if (freeIntRegs == 0) { 1685 uint64_t Size = getContext().getTypeSize(Ty); 1686 1687 // If this type fits in an eightbyte, coerce it into the matching integral 1688 // type, which will end up on the stack (with alignment 8). 1689 if (Align == 8 && Size <= 64) 1690 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1691 Size)); 1692 } 1693 1694 return ABIArgInfo::getIndirect(Align); 1695} 1696 1697/// GetByteVectorType - The ABI specifies that a value should be passed in an 1698/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1699/// vector register. 1700llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1701 llvm::Type *IRType = CGT.ConvertType(Ty); 1702 1703 // Wrapper structs that just contain vectors are passed just like vectors, 1704 // strip them off if present. 1705 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1706 while (STy && STy->getNumElements() == 1) { 1707 IRType = STy->getElementType(0); 1708 STy = dyn_cast<llvm::StructType>(IRType); 1709 } 1710 1711 // If the preferred type is a 16-byte vector, prefer to pass it. 1712 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1713 llvm::Type *EltTy = VT->getElementType(); 1714 unsigned BitWidth = VT->getBitWidth(); 1715 if ((BitWidth >= 128 && BitWidth <= 256) && 1716 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1717 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1718 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1719 EltTy->isIntegerTy(128))) 1720 return VT; 1721 } 1722 1723 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1724} 1725 1726/// BitsContainNoUserData - Return true if the specified [start,end) bit range 1727/// is known to either be off the end of the specified type or being in 1728/// alignment padding. The user type specified is known to be at most 128 bits 1729/// in size, and have passed through X86_64ABIInfo::classify with a successful 1730/// classification that put one of the two halves in the INTEGER class. 1731/// 1732/// It is conservatively correct to return false. 1733static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1734 unsigned EndBit, ASTContext &Context) { 1735 // If the bytes being queried are off the end of the type, there is no user 1736 // data hiding here. This handles analysis of builtins, vectors and other 1737 // types that don't contain interesting padding. 1738 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1739 if (TySize <= StartBit) 1740 return true; 1741 1742 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1743 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1744 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1745 1746 // Check each element to see if the element overlaps with the queried range. 1747 for (unsigned i = 0; i != NumElts; ++i) { 1748 // If the element is after the span we care about, then we're done.. 1749 unsigned EltOffset = i*EltSize; 1750 if (EltOffset >= EndBit) break; 1751 1752 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1753 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1754 EndBit-EltOffset, Context)) 1755 return false; 1756 } 1757 // If it overlaps no elements, then it is safe to process as padding. 1758 return true; 1759 } 1760 1761 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1762 const RecordDecl *RD = RT->getDecl(); 1763 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1764 1765 // If this is a C++ record, check the bases first. 1766 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1767 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1768 e = CXXRD->bases_end(); i != e; ++i) { 1769 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1770 "Unexpected base class!"); 1771 const CXXRecordDecl *Base = 1772 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1773 1774 // If the base is after the span we care about, ignore it. 1775 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1776 if (BaseOffset >= EndBit) continue; 1777 1778 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1779 if (!BitsContainNoUserData(i->getType(), BaseStart, 1780 EndBit-BaseOffset, Context)) 1781 return false; 1782 } 1783 } 1784 1785 // Verify that no field has data that overlaps the region of interest. Yes 1786 // this could be sped up a lot by being smarter about queried fields, 1787 // however we're only looking at structs up to 16 bytes, so we don't care 1788 // much. 1789 unsigned idx = 0; 1790 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1791 i != e; ++i, ++idx) { 1792 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1793 1794 // If we found a field after the region we care about, then we're done. 1795 if (FieldOffset >= EndBit) break; 1796 1797 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1798 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1799 Context)) 1800 return false; 1801 } 1802 1803 // If nothing in this record overlapped the area of interest, then we're 1804 // clean. 1805 return true; 1806 } 1807 1808 return false; 1809} 1810 1811/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1812/// float member at the specified offset. For example, {int,{float}} has a 1813/// float at offset 4. It is conservatively correct for this routine to return 1814/// false. 1815static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1816 const llvm::DataLayout &TD) { 1817 // Base case if we find a float. 1818 if (IROffset == 0 && IRType->isFloatTy()) 1819 return true; 1820 1821 // If this is a struct, recurse into the field at the specified offset. 1822 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1823 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1824 unsigned Elt = SL->getElementContainingOffset(IROffset); 1825 IROffset -= SL->getElementOffset(Elt); 1826 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1827 } 1828 1829 // If this is an array, recurse into the field at the specified offset. 1830 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1831 llvm::Type *EltTy = ATy->getElementType(); 1832 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1833 IROffset -= IROffset/EltSize*EltSize; 1834 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1835 } 1836 1837 return false; 1838} 1839 1840 1841/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1842/// low 8 bytes of an XMM register, corresponding to the SSE class. 1843llvm::Type *X86_64ABIInfo:: 1844GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1845 QualType SourceTy, unsigned SourceOffset) const { 1846 // The only three choices we have are either double, <2 x float>, or float. We 1847 // pass as float if the last 4 bytes is just padding. This happens for 1848 // structs that contain 3 floats. 1849 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1850 SourceOffset*8+64, getContext())) 1851 return llvm::Type::getFloatTy(getVMContext()); 1852 1853 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1854 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1855 // case. 1856 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1857 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1858 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1859 1860 return llvm::Type::getDoubleTy(getVMContext()); 1861} 1862 1863 1864/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1865/// an 8-byte GPR. This means that we either have a scalar or we are talking 1866/// about the high or low part of an up-to-16-byte struct. This routine picks 1867/// the best LLVM IR type to represent this, which may be i64 or may be anything 1868/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1869/// etc). 1870/// 1871/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1872/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1873/// the 8-byte value references. PrefType may be null. 1874/// 1875/// SourceTy is the source level type for the entire argument. SourceOffset is 1876/// an offset into this that we're processing (which is always either 0 or 8). 1877/// 1878llvm::Type *X86_64ABIInfo:: 1879GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1880 QualType SourceTy, unsigned SourceOffset) const { 1881 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1882 // returning an 8-byte unit starting with it. See if we can safely use it. 1883 if (IROffset == 0) { 1884 // Pointers and int64's always fill the 8-byte unit. 1885 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1886 IRType->isIntegerTy(64)) 1887 return IRType; 1888 1889 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1890 // goodness in the source type is just tail padding. This is allowed to 1891 // kick in for struct {double,int} on the int, but not on 1892 // struct{double,int,int} because we wouldn't return the second int. We 1893 // have to do this analysis on the source type because we can't depend on 1894 // unions being lowered a specific way etc. 1895 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1896 IRType->isIntegerTy(32) || 1897 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1898 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1899 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1900 1901 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1902 SourceOffset*8+64, getContext())) 1903 return IRType; 1904 } 1905 } 1906 1907 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1908 // If this is a struct, recurse into the field at the specified offset. 1909 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1910 if (IROffset < SL->getSizeInBytes()) { 1911 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1912 IROffset -= SL->getElementOffset(FieldIdx); 1913 1914 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1915 SourceTy, SourceOffset); 1916 } 1917 } 1918 1919 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1920 llvm::Type *EltTy = ATy->getElementType(); 1921 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1922 unsigned EltOffset = IROffset/EltSize*EltSize; 1923 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1924 SourceOffset); 1925 } 1926 1927 // Okay, we don't have any better idea of what to pass, so we pass this in an 1928 // integer register that isn't too big to fit the rest of the struct. 1929 unsigned TySizeInBytes = 1930 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1931 1932 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1933 1934 // It is always safe to classify this as an integer type up to i64 that 1935 // isn't larger than the structure. 1936 return llvm::IntegerType::get(getVMContext(), 1937 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1938} 1939 1940 1941/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1942/// be used as elements of a two register pair to pass or return, return a 1943/// first class aggregate to represent them. For example, if the low part of 1944/// a by-value argument should be passed as i32* and the high part as float, 1945/// return {i32*, float}. 1946static llvm::Type * 1947GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1948 const llvm::DataLayout &TD) { 1949 // In order to correctly satisfy the ABI, we need to the high part to start 1950 // at offset 8. If the high and low parts we inferred are both 4-byte types 1951 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1952 // the second element at offset 8. Check for this: 1953 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1954 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1955 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1956 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1957 1958 // To handle this, we have to increase the size of the low part so that the 1959 // second element will start at an 8 byte offset. We can't increase the size 1960 // of the second element because it might make us access off the end of the 1961 // struct. 1962 if (HiStart != 8) { 1963 // There are only two sorts of types the ABI generation code can produce for 1964 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1965 // Promote these to a larger type. 1966 if (Lo->isFloatTy()) 1967 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1968 else { 1969 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1970 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1971 } 1972 } 1973 1974 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1975 1976 1977 // Verify that the second element is at an 8-byte offset. 1978 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1979 "Invalid x86-64 argument pair!"); 1980 return Result; 1981} 1982 1983ABIArgInfo X86_64ABIInfo:: 1984classifyReturnType(QualType RetTy) const { 1985 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1986 // classification algorithm. 1987 X86_64ABIInfo::Class Lo, Hi; 1988 classify(RetTy, 0, Lo, Hi); 1989 1990 // Check some invariants. 1991 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1992 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1993 1994 llvm::Type *ResType = 0; 1995 switch (Lo) { 1996 case NoClass: 1997 if (Hi == NoClass) 1998 return ABIArgInfo::getIgnore(); 1999 // If the low part is just padding, it takes no register, leave ResType 2000 // null. 2001 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2002 "Unknown missing lo part"); 2003 break; 2004 2005 case SSEUp: 2006 case X87Up: 2007 llvm_unreachable("Invalid classification for lo word."); 2008 2009 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2010 // hidden argument. 2011 case Memory: 2012 return getIndirectReturnResult(RetTy); 2013 2014 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2015 // available register of the sequence %rax, %rdx is used. 2016 case Integer: 2017 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2018 2019 // If we have a sign or zero extended integer, make sure to return Extend 2020 // so that the parameter gets the right LLVM IR attributes. 2021 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2022 // Treat an enum type as its underlying type. 2023 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2024 RetTy = EnumTy->getDecl()->getIntegerType(); 2025 2026 if (RetTy->isIntegralOrEnumerationType() && 2027 RetTy->isPromotableIntegerType()) 2028 return ABIArgInfo::getExtend(); 2029 } 2030 break; 2031 2032 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2033 // available SSE register of the sequence %xmm0, %xmm1 is used. 2034 case SSE: 2035 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2036 break; 2037 2038 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2039 // returned on the X87 stack in %st0 as 80-bit x87 number. 2040 case X87: 2041 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2042 break; 2043 2044 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2045 // part of the value is returned in %st0 and the imaginary part in 2046 // %st1. 2047 case ComplexX87: 2048 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2049 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2050 llvm::Type::getX86_FP80Ty(getVMContext()), 2051 NULL); 2052 break; 2053 } 2054 2055 llvm::Type *HighPart = 0; 2056 switch (Hi) { 2057 // Memory was handled previously and X87 should 2058 // never occur as a hi class. 2059 case Memory: 2060 case X87: 2061 llvm_unreachable("Invalid classification for hi word."); 2062 2063 case ComplexX87: // Previously handled. 2064 case NoClass: 2065 break; 2066 2067 case Integer: 2068 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2069 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2070 return ABIArgInfo::getDirect(HighPart, 8); 2071 break; 2072 case SSE: 2073 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2074 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2075 return ABIArgInfo::getDirect(HighPart, 8); 2076 break; 2077 2078 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2079 // is passed in the next available eightbyte chunk if the last used 2080 // vector register. 2081 // 2082 // SSEUP should always be preceded by SSE, just widen. 2083 case SSEUp: 2084 assert(Lo == SSE && "Unexpected SSEUp classification."); 2085 ResType = GetByteVectorType(RetTy); 2086 break; 2087 2088 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2089 // returned together with the previous X87 value in %st0. 2090 case X87Up: 2091 // If X87Up is preceded by X87, we don't need to do 2092 // anything. However, in some cases with unions it may not be 2093 // preceded by X87. In such situations we follow gcc and pass the 2094 // extra bits in an SSE reg. 2095 if (Lo != X87) { 2096 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2097 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2098 return ABIArgInfo::getDirect(HighPart, 8); 2099 } 2100 break; 2101 } 2102 2103 // If a high part was specified, merge it together with the low part. It is 2104 // known to pass in the high eightbyte of the result. We do this by forming a 2105 // first class struct aggregate with the high and low part: {low, high} 2106 if (HighPart) 2107 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2108 2109 return ABIArgInfo::getDirect(ResType); 2110} 2111 2112ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2113 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2114 const 2115{ 2116 X86_64ABIInfo::Class Lo, Hi; 2117 classify(Ty, 0, Lo, Hi); 2118 2119 // Check some invariants. 2120 // FIXME: Enforce these by construction. 2121 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2122 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2123 2124 neededInt = 0; 2125 neededSSE = 0; 2126 llvm::Type *ResType = 0; 2127 switch (Lo) { 2128 case NoClass: 2129 if (Hi == NoClass) 2130 return ABIArgInfo::getIgnore(); 2131 // If the low part is just padding, it takes no register, leave ResType 2132 // null. 2133 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2134 "Unknown missing lo part"); 2135 break; 2136 2137 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2138 // on the stack. 2139 case Memory: 2140 2141 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2142 // COMPLEX_X87, it is passed in memory. 2143 case X87: 2144 case ComplexX87: 2145 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2146 ++neededInt; 2147 return getIndirectResult(Ty, freeIntRegs); 2148 2149 case SSEUp: 2150 case X87Up: 2151 llvm_unreachable("Invalid classification for lo word."); 2152 2153 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2154 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2155 // and %r9 is used. 2156 case Integer: 2157 ++neededInt; 2158 2159 // Pick an 8-byte type based on the preferred type. 2160 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2161 2162 // If we have a sign or zero extended integer, make sure to return Extend 2163 // so that the parameter gets the right LLVM IR attributes. 2164 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2165 // Treat an enum type as its underlying type. 2166 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2167 Ty = EnumTy->getDecl()->getIntegerType(); 2168 2169 if (Ty->isIntegralOrEnumerationType() && 2170 Ty->isPromotableIntegerType()) 2171 return ABIArgInfo::getExtend(); 2172 } 2173 2174 break; 2175 2176 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2177 // available SSE register is used, the registers are taken in the 2178 // order from %xmm0 to %xmm7. 2179 case SSE: { 2180 llvm::Type *IRType = CGT.ConvertType(Ty); 2181 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2182 ++neededSSE; 2183 break; 2184 } 2185 } 2186 2187 llvm::Type *HighPart = 0; 2188 switch (Hi) { 2189 // Memory was handled previously, ComplexX87 and X87 should 2190 // never occur as hi classes, and X87Up must be preceded by X87, 2191 // which is passed in memory. 2192 case Memory: 2193 case X87: 2194 case ComplexX87: 2195 llvm_unreachable("Invalid classification for hi word."); 2196 2197 case NoClass: break; 2198 2199 case Integer: 2200 ++neededInt; 2201 // Pick an 8-byte type based on the preferred type. 2202 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2203 2204 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2205 return ABIArgInfo::getDirect(HighPart, 8); 2206 break; 2207 2208 // X87Up generally doesn't occur here (long double is passed in 2209 // memory), except in situations involving unions. 2210 case X87Up: 2211 case SSE: 2212 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2213 2214 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2215 return ABIArgInfo::getDirect(HighPart, 8); 2216 2217 ++neededSSE; 2218 break; 2219 2220 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2221 // eightbyte is passed in the upper half of the last used SSE 2222 // register. This only happens when 128-bit vectors are passed. 2223 case SSEUp: 2224 assert(Lo == SSE && "Unexpected SSEUp classification"); 2225 ResType = GetByteVectorType(Ty); 2226 break; 2227 } 2228 2229 // If a high part was specified, merge it together with the low part. It is 2230 // known to pass in the high eightbyte of the result. We do this by forming a 2231 // first class struct aggregate with the high and low part: {low, high} 2232 if (HighPart) 2233 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2234 2235 return ABIArgInfo::getDirect(ResType); 2236} 2237 2238void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2239 2240 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2241 2242 // Keep track of the number of assigned registers. 2243 unsigned freeIntRegs = 6, freeSSERegs = 8; 2244 2245 // If the return value is indirect, then the hidden argument is consuming one 2246 // integer register. 2247 if (FI.getReturnInfo().isIndirect()) 2248 --freeIntRegs; 2249 2250 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2251 // get assigned (in left-to-right order) for passing as follows... 2252 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2253 it != ie; ++it) { 2254 unsigned neededInt, neededSSE; 2255 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2256 neededSSE); 2257 2258 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2259 // eightbyte of an argument, the whole argument is passed on the 2260 // stack. If registers have already been assigned for some 2261 // eightbytes of such an argument, the assignments get reverted. 2262 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2263 freeIntRegs -= neededInt; 2264 freeSSERegs -= neededSSE; 2265 } else { 2266 it->info = getIndirectResult(it->type, freeIntRegs); 2267 } 2268 } 2269} 2270 2271static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2272 QualType Ty, 2273 CodeGenFunction &CGF) { 2274 llvm::Value *overflow_arg_area_p = 2275 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2276 llvm::Value *overflow_arg_area = 2277 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2278 2279 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2280 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2281 // It isn't stated explicitly in the standard, but in practice we use 2282 // alignment greater than 16 where necessary. 2283 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2284 if (Align > 8) { 2285 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2286 llvm::Value *Offset = 2287 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2288 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2289 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2290 CGF.Int64Ty); 2291 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2292 overflow_arg_area = 2293 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2294 overflow_arg_area->getType(), 2295 "overflow_arg_area.align"); 2296 } 2297 2298 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2299 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2300 llvm::Value *Res = 2301 CGF.Builder.CreateBitCast(overflow_arg_area, 2302 llvm::PointerType::getUnqual(LTy)); 2303 2304 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2305 // l->overflow_arg_area + sizeof(type). 2306 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2307 // an 8 byte boundary. 2308 2309 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2310 llvm::Value *Offset = 2311 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2312 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2313 "overflow_arg_area.next"); 2314 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2315 2316 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2317 return Res; 2318} 2319 2320llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2321 CodeGenFunction &CGF) const { 2322 // Assume that va_list type is correct; should be pointer to LLVM type: 2323 // struct { 2324 // i32 gp_offset; 2325 // i32 fp_offset; 2326 // i8* overflow_arg_area; 2327 // i8* reg_save_area; 2328 // }; 2329 unsigned neededInt, neededSSE; 2330 2331 Ty = CGF.getContext().getCanonicalType(Ty); 2332 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2333 2334 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2335 // in the registers. If not go to step 7. 2336 if (!neededInt && !neededSSE) 2337 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2338 2339 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2340 // general purpose registers needed to pass type and num_fp to hold 2341 // the number of floating point registers needed. 2342 2343 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2344 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2345 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2346 // 2347 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2348 // register save space). 2349 2350 llvm::Value *InRegs = 0; 2351 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2352 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2353 if (neededInt) { 2354 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2355 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2356 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2357 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2358 } 2359 2360 if (neededSSE) { 2361 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2362 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2363 llvm::Value *FitsInFP = 2364 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2365 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2366 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2367 } 2368 2369 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2370 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2371 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2372 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2373 2374 // Emit code to load the value if it was passed in registers. 2375 2376 CGF.EmitBlock(InRegBlock); 2377 2378 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2379 // an offset of l->gp_offset and/or l->fp_offset. This may require 2380 // copying to a temporary location in case the parameter is passed 2381 // in different register classes or requires an alignment greater 2382 // than 8 for general purpose registers and 16 for XMM registers. 2383 // 2384 // FIXME: This really results in shameful code when we end up needing to 2385 // collect arguments from different places; often what should result in a 2386 // simple assembling of a structure from scattered addresses has many more 2387 // loads than necessary. Can we clean this up? 2388 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2389 llvm::Value *RegAddr = 2390 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2391 "reg_save_area"); 2392 if (neededInt && neededSSE) { 2393 // FIXME: Cleanup. 2394 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2395 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2396 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2397 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2398 llvm::Type *TyLo = ST->getElementType(0); 2399 llvm::Type *TyHi = ST->getElementType(1); 2400 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2401 "Unexpected ABI info for mixed regs"); 2402 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2403 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2404 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2405 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2406 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2407 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2408 llvm::Value *V = 2409 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2410 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2411 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2412 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2413 2414 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2415 llvm::PointerType::getUnqual(LTy)); 2416 } else if (neededInt) { 2417 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2418 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2419 llvm::PointerType::getUnqual(LTy)); 2420 } else if (neededSSE == 1) { 2421 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2422 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2423 llvm::PointerType::getUnqual(LTy)); 2424 } else { 2425 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2426 // SSE registers are spaced 16 bytes apart in the register save 2427 // area, we need to collect the two eightbytes together. 2428 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2429 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2430 llvm::Type *DoubleTy = CGF.DoubleTy; 2431 llvm::Type *DblPtrTy = 2432 llvm::PointerType::getUnqual(DoubleTy); 2433 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2434 DoubleTy, NULL); 2435 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2436 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2437 DblPtrTy)); 2438 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2439 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2440 DblPtrTy)); 2441 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2442 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2443 llvm::PointerType::getUnqual(LTy)); 2444 } 2445 2446 // AMD64-ABI 3.5.7p5: Step 5. Set: 2447 // l->gp_offset = l->gp_offset + num_gp * 8 2448 // l->fp_offset = l->fp_offset + num_fp * 16. 2449 if (neededInt) { 2450 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2451 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2452 gp_offset_p); 2453 } 2454 if (neededSSE) { 2455 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2456 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2457 fp_offset_p); 2458 } 2459 CGF.EmitBranch(ContBlock); 2460 2461 // Emit code to load the value if it was passed in memory. 2462 2463 CGF.EmitBlock(InMemBlock); 2464 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2465 2466 // Return the appropriate result. 2467 2468 CGF.EmitBlock(ContBlock); 2469 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2470 "vaarg.addr"); 2471 ResAddr->addIncoming(RegAddr, InRegBlock); 2472 ResAddr->addIncoming(MemAddr, InMemBlock); 2473 return ResAddr; 2474} 2475 2476ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2477 2478 if (Ty->isVoidType()) 2479 return ABIArgInfo::getIgnore(); 2480 2481 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2482 Ty = EnumTy->getDecl()->getIntegerType(); 2483 2484 uint64_t Size = getContext().getTypeSize(Ty); 2485 2486 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2487 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2488 RT->getDecl()->hasFlexibleArrayMember()) 2489 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2490 2491 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2492 if (Size == 128 && 2493 getContext().getTargetInfo().getTriple().getOS() 2494 == llvm::Triple::MinGW32) 2495 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2496 Size)); 2497 2498 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2499 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2500 if (Size <= 64 && 2501 (Size & (Size - 1)) == 0) 2502 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2503 Size)); 2504 2505 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2506 } 2507 2508 if (Ty->isPromotableIntegerType()) 2509 return ABIArgInfo::getExtend(); 2510 2511 return ABIArgInfo::getDirect(); 2512} 2513 2514void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2515 2516 QualType RetTy = FI.getReturnType(); 2517 FI.getReturnInfo() = classify(RetTy); 2518 2519 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2520 it != ie; ++it) 2521 it->info = classify(it->type); 2522} 2523 2524llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2525 CodeGenFunction &CGF) const { 2526 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2527 2528 CGBuilderTy &Builder = CGF.Builder; 2529 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2530 "ap"); 2531 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2532 llvm::Type *PTy = 2533 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2534 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2535 2536 uint64_t Offset = 2537 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2538 llvm::Value *NextAddr = 2539 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2540 "ap.next"); 2541 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2542 2543 return AddrTyped; 2544} 2545 2546namespace { 2547 2548class NaClX86_64ABIInfo : public ABIInfo { 2549 public: 2550 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2551 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2552 virtual void computeInfo(CGFunctionInfo &FI) const; 2553 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2554 CodeGenFunction &CGF) const; 2555 private: 2556 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2557 X86_64ABIInfo NInfo; // Used for everything else. 2558}; 2559 2560class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2561 public: 2562 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2563 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2564}; 2565 2566} 2567 2568void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2569 if (FI.getASTCallingConvention() == CC_PnaclCall) 2570 PInfo.computeInfo(FI); 2571 else 2572 NInfo.computeInfo(FI); 2573} 2574 2575llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2576 CodeGenFunction &CGF) const { 2577 // Always use the native convention; calling pnacl-style varargs functions 2578 // is unuspported. 2579 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2580} 2581 2582 2583// PowerPC-32 2584 2585namespace { 2586class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2587public: 2588 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2589 2590 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2591 // This is recovered from gcc output. 2592 return 1; // r1 is the dedicated stack pointer 2593 } 2594 2595 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2596 llvm::Value *Address) const; 2597}; 2598 2599} 2600 2601bool 2602PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2603 llvm::Value *Address) const { 2604 // This is calculated from the LLVM and GCC tables and verified 2605 // against gcc output. AFAIK all ABIs use the same encoding. 2606 2607 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2608 2609 llvm::IntegerType *i8 = CGF.Int8Ty; 2610 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2611 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2612 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2613 2614 // 0-31: r0-31, the 4-byte general-purpose registers 2615 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2616 2617 // 32-63: fp0-31, the 8-byte floating-point registers 2618 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2619 2620 // 64-76 are various 4-byte special-purpose registers: 2621 // 64: mq 2622 // 65: lr 2623 // 66: ctr 2624 // 67: ap 2625 // 68-75 cr0-7 2626 // 76: xer 2627 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2628 2629 // 77-108: v0-31, the 16-byte vector registers 2630 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2631 2632 // 109: vrsave 2633 // 110: vscr 2634 // 111: spe_acc 2635 // 112: spefscr 2636 // 113: sfp 2637 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2638 2639 return false; 2640} 2641 2642// PowerPC-64 2643 2644namespace { 2645/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2646class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2647 2648public: 2649 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2650 2651 // TODO: We can add more logic to computeInfo to improve performance. 2652 // Example: For aggregate arguments that fit in a register, we could 2653 // use getDirectInReg (as is done below for structs containing a single 2654 // floating-point value) to avoid pushing them to memory on function 2655 // entry. This would require changing the logic in PPCISelLowering 2656 // when lowering the parameters in the caller and args in the callee. 2657 virtual void computeInfo(CGFunctionInfo &FI) const { 2658 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2659 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2660 it != ie; ++it) { 2661 // We rely on the default argument classification for the most part. 2662 // One exception: An aggregate containing a single floating-point 2663 // item must be passed in a register if one is available. 2664 const Type *T = isSingleElementStruct(it->type, getContext()); 2665 if (T) { 2666 const BuiltinType *BT = T->getAs<BuiltinType>(); 2667 if (BT && BT->isFloatingPoint()) { 2668 QualType QT(T, 0); 2669 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2670 continue; 2671 } 2672 } 2673 it->info = classifyArgumentType(it->type); 2674 } 2675 } 2676 2677 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2678 QualType Ty, 2679 CodeGenFunction &CGF) const; 2680}; 2681 2682class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2683public: 2684 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2685 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2686 2687 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2688 // This is recovered from gcc output. 2689 return 1; // r1 is the dedicated stack pointer 2690 } 2691 2692 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2693 llvm::Value *Address) const; 2694}; 2695 2696class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2697public: 2698 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2699 2700 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2701 // This is recovered from gcc output. 2702 return 1; // r1 is the dedicated stack pointer 2703 } 2704 2705 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2706 llvm::Value *Address) const; 2707}; 2708 2709} 2710 2711// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2712llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2713 QualType Ty, 2714 CodeGenFunction &CGF) const { 2715 llvm::Type *BP = CGF.Int8PtrTy; 2716 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2717 2718 CGBuilderTy &Builder = CGF.Builder; 2719 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2720 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2721 2722 // Handle address alignment for type alignment > 64 bits. Although 2723 // long double normally requires 16-byte alignment, this is not the 2724 // case when it is passed as an argument; so handle that special case. 2725 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 2726 unsigned TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2727 2728 if (TyAlign > 8 && (!BT || !BT->isFloatingPoint())) { 2729 assert((TyAlign & (TyAlign - 1)) == 0 && 2730 "Alignment is not power of 2!"); 2731 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2732 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(TyAlign - 1)); 2733 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(~(TyAlign - 1))); 2734 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2735 } 2736 2737 // Update the va_list pointer. 2738 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2739 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2740 llvm::Value *NextAddr = 2741 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2742 "ap.next"); 2743 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2744 2745 // If the argument is smaller than 8 bytes, it is right-adjusted in 2746 // its doubleword slot. Adjust the pointer to pick it up from the 2747 // correct offset. 2748 if (SizeInBytes < 8) { 2749 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2750 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2751 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2752 } 2753 2754 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2755 return Builder.CreateBitCast(Addr, PTy); 2756} 2757 2758static bool 2759PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2760 llvm::Value *Address) { 2761 // This is calculated from the LLVM and GCC tables and verified 2762 // against gcc output. AFAIK all ABIs use the same encoding. 2763 2764 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2765 2766 llvm::IntegerType *i8 = CGF.Int8Ty; 2767 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2768 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2769 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2770 2771 // 0-31: r0-31, the 8-byte general-purpose registers 2772 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2773 2774 // 32-63: fp0-31, the 8-byte floating-point registers 2775 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2776 2777 // 64-76 are various 4-byte special-purpose registers: 2778 // 64: mq 2779 // 65: lr 2780 // 66: ctr 2781 // 67: ap 2782 // 68-75 cr0-7 2783 // 76: xer 2784 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2785 2786 // 77-108: v0-31, the 16-byte vector registers 2787 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2788 2789 // 109: vrsave 2790 // 110: vscr 2791 // 111: spe_acc 2792 // 112: spefscr 2793 // 113: sfp 2794 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2795 2796 return false; 2797} 2798 2799bool 2800PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2801 CodeGen::CodeGenFunction &CGF, 2802 llvm::Value *Address) const { 2803 2804 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2805} 2806 2807bool 2808PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2809 llvm::Value *Address) const { 2810 2811 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2812} 2813 2814//===----------------------------------------------------------------------===// 2815// ARM ABI Implementation 2816//===----------------------------------------------------------------------===// 2817 2818namespace { 2819 2820class ARMABIInfo : public ABIInfo { 2821public: 2822 enum ABIKind { 2823 APCS = 0, 2824 AAPCS = 1, 2825 AAPCS_VFP 2826 }; 2827 2828private: 2829 ABIKind Kind; 2830 2831public: 2832 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2833 2834 bool isEABI() const { 2835 StringRef Env = 2836 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2837 return (Env == "gnueabi" || Env == "eabi" || 2838 Env == "android" || Env == "androideabi"); 2839 } 2840 2841private: 2842 ABIKind getABIKind() const { return Kind; } 2843 2844 ABIArgInfo classifyReturnType(QualType RetTy) const; 2845 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2846 bool isIllegalVectorType(QualType Ty) const; 2847 2848 virtual void computeInfo(CGFunctionInfo &FI) const; 2849 2850 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2851 CodeGenFunction &CGF) const; 2852}; 2853 2854class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2855public: 2856 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2857 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2858 2859 const ARMABIInfo &getABIInfo() const { 2860 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2861 } 2862 2863 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2864 return 13; 2865 } 2866 2867 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2868 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2869 } 2870 2871 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2872 llvm::Value *Address) const { 2873 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2874 2875 // 0-15 are the 16 integer registers. 2876 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2877 return false; 2878 } 2879 2880 unsigned getSizeOfUnwindException() const { 2881 if (getABIInfo().isEABI()) return 88; 2882 return TargetCodeGenInfo::getSizeOfUnwindException(); 2883 } 2884}; 2885 2886} 2887 2888void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2889 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2890 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2891 it != ie; ++it) 2892 it->info = classifyArgumentType(it->type); 2893 2894 // Always honor user-specified calling convention. 2895 if (FI.getCallingConvention() != llvm::CallingConv::C) 2896 return; 2897 2898 // Calling convention as default by an ABI. 2899 llvm::CallingConv::ID DefaultCC; 2900 if (isEABI()) 2901 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2902 else 2903 DefaultCC = llvm::CallingConv::ARM_APCS; 2904 2905 // If user did not ask for specific calling convention explicitly (e.g. via 2906 // pcs attribute), set effective calling convention if it's different than ABI 2907 // default. 2908 switch (getABIKind()) { 2909 case APCS: 2910 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2911 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2912 break; 2913 case AAPCS: 2914 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2915 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2916 break; 2917 case AAPCS_VFP: 2918 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2919 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2920 break; 2921 } 2922} 2923 2924/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2925/// aggregate. If HAMembers is non-null, the number of base elements 2926/// contained in the type is returned through it; this is used for the 2927/// recursive calls that check aggregate component types. 2928static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2929 ASTContext &Context, 2930 uint64_t *HAMembers = 0) { 2931 uint64_t Members = 0; 2932 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2933 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2934 return false; 2935 Members *= AT->getSize().getZExtValue(); 2936 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2937 const RecordDecl *RD = RT->getDecl(); 2938 if (RD->hasFlexibleArrayMember()) 2939 return false; 2940 2941 Members = 0; 2942 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2943 i != e; ++i) { 2944 const FieldDecl *FD = *i; 2945 uint64_t FldMembers; 2946 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2947 return false; 2948 2949 Members = (RD->isUnion() ? 2950 std::max(Members, FldMembers) : Members + FldMembers); 2951 } 2952 } else { 2953 Members = 1; 2954 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2955 Members = 2; 2956 Ty = CT->getElementType(); 2957 } 2958 2959 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2960 // double, or 64-bit or 128-bit vectors. 2961 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2962 if (BT->getKind() != BuiltinType::Float && 2963 BT->getKind() != BuiltinType::Double && 2964 BT->getKind() != BuiltinType::LongDouble) 2965 return false; 2966 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2967 unsigned VecSize = Context.getTypeSize(VT); 2968 if (VecSize != 64 && VecSize != 128) 2969 return false; 2970 } else { 2971 return false; 2972 } 2973 2974 // The base type must be the same for all members. Vector types of the 2975 // same total size are treated as being equivalent here. 2976 const Type *TyPtr = Ty.getTypePtr(); 2977 if (!Base) 2978 Base = TyPtr; 2979 if (Base != TyPtr && 2980 (!Base->isVectorType() || !TyPtr->isVectorType() || 2981 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2982 return false; 2983 } 2984 2985 // Homogeneous Aggregates can have at most 4 members of the base type. 2986 if (HAMembers) 2987 *HAMembers = Members; 2988 2989 return (Members > 0 && Members <= 4); 2990} 2991 2992ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2993 // Handle illegal vector types here. 2994 if (isIllegalVectorType(Ty)) { 2995 uint64_t Size = getContext().getTypeSize(Ty); 2996 if (Size <= 32) { 2997 llvm::Type *ResType = 2998 llvm::Type::getInt32Ty(getVMContext()); 2999 return ABIArgInfo::getDirect(ResType); 3000 } 3001 if (Size == 64) { 3002 llvm::Type *ResType = llvm::VectorType::get( 3003 llvm::Type::getInt32Ty(getVMContext()), 2); 3004 return ABIArgInfo::getDirect(ResType); 3005 } 3006 if (Size == 128) { 3007 llvm::Type *ResType = llvm::VectorType::get( 3008 llvm::Type::getInt32Ty(getVMContext()), 4); 3009 return ABIArgInfo::getDirect(ResType); 3010 } 3011 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3012 } 3013 3014 if (!isAggregateTypeForABI(Ty)) { 3015 // Treat an enum type as its underlying type. 3016 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3017 Ty = EnumTy->getDecl()->getIntegerType(); 3018 3019 return (Ty->isPromotableIntegerType() ? 3020 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3021 } 3022 3023 // Ignore empty records. 3024 if (isEmptyRecord(getContext(), Ty, true)) 3025 return ABIArgInfo::getIgnore(); 3026 3027 // Structures with either a non-trivial destructor or a non-trivial 3028 // copy constructor are always indirect. 3029 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3030 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3031 3032 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3033 // Homogeneous Aggregates need to be expanded. 3034 const Type *Base = 0; 3035 if (isHomogeneousAggregate(Ty, Base, getContext())) { 3036 assert(Base && "Base class should be set for homogeneous aggregate"); 3037 return ABIArgInfo::getExpand(); 3038 } 3039 } 3040 3041 // Support byval for ARM. 3042 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 3043 getContext().getTypeAlign(Ty) > 64) { 3044 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3045 } 3046 3047 // Otherwise, pass by coercing to a structure of the appropriate size. 3048 llvm::Type* ElemTy; 3049 unsigned SizeRegs; 3050 // FIXME: Try to match the types of the arguments more accurately where 3051 // we can. 3052 if (getContext().getTypeAlign(Ty) <= 32) { 3053 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3054 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3055 } else { 3056 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3057 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3058 } 3059 3060 llvm::Type *STy = 3061 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3062 return ABIArgInfo::getDirect(STy); 3063} 3064 3065static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3066 llvm::LLVMContext &VMContext) { 3067 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3068 // is called integer-like if its size is less than or equal to one word, and 3069 // the offset of each of its addressable sub-fields is zero. 3070 3071 uint64_t Size = Context.getTypeSize(Ty); 3072 3073 // Check that the type fits in a word. 3074 if (Size > 32) 3075 return false; 3076 3077 // FIXME: Handle vector types! 3078 if (Ty->isVectorType()) 3079 return false; 3080 3081 // Float types are never treated as "integer like". 3082 if (Ty->isRealFloatingType()) 3083 return false; 3084 3085 // If this is a builtin or pointer type then it is ok. 3086 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3087 return true; 3088 3089 // Small complex integer types are "integer like". 3090 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3091 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3092 3093 // Single element and zero sized arrays should be allowed, by the definition 3094 // above, but they are not. 3095 3096 // Otherwise, it must be a record type. 3097 const RecordType *RT = Ty->getAs<RecordType>(); 3098 if (!RT) return false; 3099 3100 // Ignore records with flexible arrays. 3101 const RecordDecl *RD = RT->getDecl(); 3102 if (RD->hasFlexibleArrayMember()) 3103 return false; 3104 3105 // Check that all sub-fields are at offset 0, and are themselves "integer 3106 // like". 3107 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3108 3109 bool HadField = false; 3110 unsigned idx = 0; 3111 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3112 i != e; ++i, ++idx) { 3113 const FieldDecl *FD = *i; 3114 3115 // Bit-fields are not addressable, we only need to verify they are "integer 3116 // like". We still have to disallow a subsequent non-bitfield, for example: 3117 // struct { int : 0; int x } 3118 // is non-integer like according to gcc. 3119 if (FD->isBitField()) { 3120 if (!RD->isUnion()) 3121 HadField = true; 3122 3123 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3124 return false; 3125 3126 continue; 3127 } 3128 3129 // Check if this field is at offset 0. 3130 if (Layout.getFieldOffset(idx) != 0) 3131 return false; 3132 3133 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3134 return false; 3135 3136 // Only allow at most one field in a structure. This doesn't match the 3137 // wording above, but follows gcc in situations with a field following an 3138 // empty structure. 3139 if (!RD->isUnion()) { 3140 if (HadField) 3141 return false; 3142 3143 HadField = true; 3144 } 3145 } 3146 3147 return true; 3148} 3149 3150ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3151 if (RetTy->isVoidType()) 3152 return ABIArgInfo::getIgnore(); 3153 3154 // Large vector types should be returned via memory. 3155 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3156 return ABIArgInfo::getIndirect(0); 3157 3158 if (!isAggregateTypeForABI(RetTy)) { 3159 // Treat an enum type as its underlying type. 3160 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3161 RetTy = EnumTy->getDecl()->getIntegerType(); 3162 3163 return (RetTy->isPromotableIntegerType() ? 3164 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3165 } 3166 3167 // Structures with either a non-trivial destructor or a non-trivial 3168 // copy constructor are always indirect. 3169 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3170 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3171 3172 // Are we following APCS? 3173 if (getABIKind() == APCS) { 3174 if (isEmptyRecord(getContext(), RetTy, false)) 3175 return ABIArgInfo::getIgnore(); 3176 3177 // Complex types are all returned as packed integers. 3178 // 3179 // FIXME: Consider using 2 x vector types if the back end handles them 3180 // correctly. 3181 if (RetTy->isAnyComplexType()) 3182 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3183 getContext().getTypeSize(RetTy))); 3184 3185 // Integer like structures are returned in r0. 3186 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3187 // Return in the smallest viable integer type. 3188 uint64_t Size = getContext().getTypeSize(RetTy); 3189 if (Size <= 8) 3190 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3191 if (Size <= 16) 3192 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3193 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3194 } 3195 3196 // Otherwise return in memory. 3197 return ABIArgInfo::getIndirect(0); 3198 } 3199 3200 // Otherwise this is an AAPCS variant. 3201 3202 if (isEmptyRecord(getContext(), RetTy, true)) 3203 return ABIArgInfo::getIgnore(); 3204 3205 // Check for homogeneous aggregates with AAPCS-VFP. 3206 if (getABIKind() == AAPCS_VFP) { 3207 const Type *Base = 0; 3208 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3209 assert(Base && "Base class should be set for homogeneous aggregate"); 3210 // Homogeneous Aggregates are returned directly. 3211 return ABIArgInfo::getDirect(); 3212 } 3213 } 3214 3215 // Aggregates <= 4 bytes are returned in r0; other aggregates 3216 // are returned indirectly. 3217 uint64_t Size = getContext().getTypeSize(RetTy); 3218 if (Size <= 32) { 3219 // Return in the smallest viable integer type. 3220 if (Size <= 8) 3221 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3222 if (Size <= 16) 3223 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3224 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3225 } 3226 3227 return ABIArgInfo::getIndirect(0); 3228} 3229 3230/// isIllegalVector - check whether Ty is an illegal vector type. 3231bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3232 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3233 // Check whether VT is legal. 3234 unsigned NumElements = VT->getNumElements(); 3235 uint64_t Size = getContext().getTypeSize(VT); 3236 // NumElements should be power of 2. 3237 if ((NumElements & (NumElements - 1)) != 0) 3238 return true; 3239 // Size should be greater than 32 bits. 3240 return Size <= 32; 3241 } 3242 return false; 3243} 3244 3245llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3246 CodeGenFunction &CGF) const { 3247 llvm::Type *BP = CGF.Int8PtrTy; 3248 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3249 3250 CGBuilderTy &Builder = CGF.Builder; 3251 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3252 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3253 3254 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3255 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3256 bool IsIndirect = false; 3257 3258 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3259 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3260 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3261 getABIKind() == ARMABIInfo::AAPCS) 3262 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3263 else 3264 TyAlign = 4; 3265 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3266 if (isIllegalVectorType(Ty) && Size > 16) { 3267 IsIndirect = true; 3268 Size = 4; 3269 TyAlign = 4; 3270 } 3271 3272 // Handle address alignment for ABI alignment > 4 bytes. 3273 if (TyAlign > 4) { 3274 assert((TyAlign & (TyAlign - 1)) == 0 && 3275 "Alignment is not power of 2!"); 3276 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3277 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3278 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3279 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3280 } 3281 3282 uint64_t Offset = 3283 llvm::RoundUpToAlignment(Size, 4); 3284 llvm::Value *NextAddr = 3285 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3286 "ap.next"); 3287 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3288 3289 if (IsIndirect) 3290 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3291 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3292 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3293 // may not be correctly aligned for the vector type. We create an aligned 3294 // temporary space and copy the content over from ap.cur to the temporary 3295 // space. This is necessary if the natural alignment of the type is greater 3296 // than the ABI alignment. 3297 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3298 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3299 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3300 "var.align"); 3301 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3302 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3303 Builder.CreateMemCpy(Dst, Src, 3304 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3305 TyAlign, false); 3306 Addr = AlignedTemp; //The content is in aligned location. 3307 } 3308 llvm::Type *PTy = 3309 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3310 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3311 3312 return AddrTyped; 3313} 3314 3315namespace { 3316 3317class NaClARMABIInfo : public ABIInfo { 3318 public: 3319 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3320 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3321 virtual void computeInfo(CGFunctionInfo &FI) const; 3322 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3323 CodeGenFunction &CGF) const; 3324 private: 3325 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3326 ARMABIInfo NInfo; // Used for everything else. 3327}; 3328 3329class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3330 public: 3331 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3332 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3333}; 3334 3335} 3336 3337void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3338 if (FI.getASTCallingConvention() == CC_PnaclCall) 3339 PInfo.computeInfo(FI); 3340 else 3341 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3342} 3343 3344llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3345 CodeGenFunction &CGF) const { 3346 // Always use the native convention; calling pnacl-style varargs functions 3347 // is unsupported. 3348 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3349} 3350 3351//===----------------------------------------------------------------------===// 3352// NVPTX ABI Implementation 3353//===----------------------------------------------------------------------===// 3354 3355namespace { 3356 3357class NVPTXABIInfo : public ABIInfo { 3358public: 3359 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3360 3361 ABIArgInfo classifyReturnType(QualType RetTy) const; 3362 ABIArgInfo classifyArgumentType(QualType Ty) const; 3363 3364 virtual void computeInfo(CGFunctionInfo &FI) const; 3365 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3366 CodeGenFunction &CFG) const; 3367}; 3368 3369class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3370public: 3371 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3372 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3373 3374 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3375 CodeGen::CodeGenModule &M) const; 3376}; 3377 3378ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3379 if (RetTy->isVoidType()) 3380 return ABIArgInfo::getIgnore(); 3381 if (isAggregateTypeForABI(RetTy)) 3382 return ABIArgInfo::getIndirect(0); 3383 return ABIArgInfo::getDirect(); 3384} 3385 3386ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3387 if (isAggregateTypeForABI(Ty)) 3388 return ABIArgInfo::getIndirect(0); 3389 3390 return ABIArgInfo::getDirect(); 3391} 3392 3393void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3394 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3395 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3396 it != ie; ++it) 3397 it->info = classifyArgumentType(it->type); 3398 3399 // Always honor user-specified calling convention. 3400 if (FI.getCallingConvention() != llvm::CallingConv::C) 3401 return; 3402 3403 // Calling convention as default by an ABI. 3404 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3405 // but we should switch to NVVM metadata later on. 3406 llvm::CallingConv::ID DefaultCC; 3407 const LangOptions &LangOpts = getContext().getLangOpts(); 3408 if (LangOpts.OpenCL || LangOpts.CUDA) { 3409 // If we are in OpenCL or CUDA mode, then default to device functions 3410 DefaultCC = llvm::CallingConv::PTX_Device; 3411 } else { 3412 // If we are in standard C/C++ mode, use the triple to decide on the default 3413 StringRef Env = 3414 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3415 if (Env == "device") 3416 DefaultCC = llvm::CallingConv::PTX_Device; 3417 else 3418 DefaultCC = llvm::CallingConv::PTX_Kernel; 3419 } 3420 FI.setEffectiveCallingConvention(DefaultCC); 3421 3422} 3423 3424llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3425 CodeGenFunction &CFG) const { 3426 llvm_unreachable("NVPTX does not support varargs"); 3427} 3428 3429void NVPTXTargetCodeGenInfo:: 3430SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3431 CodeGen::CodeGenModule &M) const{ 3432 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3433 if (!FD) return; 3434 3435 llvm::Function *F = cast<llvm::Function>(GV); 3436 3437 // Perform special handling in OpenCL mode 3438 if (M.getLangOpts().OpenCL) { 3439 // Use OpenCL function attributes to set proper calling conventions 3440 // By default, all functions are device functions 3441 if (FD->hasAttr<OpenCLKernelAttr>()) { 3442 // OpenCL __kernel functions get a kernel calling convention 3443 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3444 // And kernel functions are not subject to inlining 3445 F->addFnAttr(llvm::Attributes::NoInline); 3446 } 3447 } 3448 3449 // Perform special handling in CUDA mode. 3450 if (M.getLangOpts().CUDA) { 3451 // CUDA __global__ functions get a kernel calling convention. Since 3452 // __global__ functions cannot be called from the device, we do not 3453 // need to set the noinline attribute. 3454 if (FD->getAttr<CUDAGlobalAttr>()) 3455 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3456 } 3457} 3458 3459} 3460 3461//===----------------------------------------------------------------------===// 3462// MBlaze ABI Implementation 3463//===----------------------------------------------------------------------===// 3464 3465namespace { 3466 3467class MBlazeABIInfo : public ABIInfo { 3468public: 3469 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3470 3471 bool isPromotableIntegerType(QualType Ty) const; 3472 3473 ABIArgInfo classifyReturnType(QualType RetTy) const; 3474 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3475 3476 virtual void computeInfo(CGFunctionInfo &FI) const { 3477 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3478 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3479 it != ie; ++it) 3480 it->info = classifyArgumentType(it->type); 3481 } 3482 3483 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3484 CodeGenFunction &CGF) const; 3485}; 3486 3487class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3488public: 3489 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3490 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3491 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3492 CodeGen::CodeGenModule &M) const; 3493}; 3494 3495} 3496 3497bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3498 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3499 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3500 switch (BT->getKind()) { 3501 case BuiltinType::Bool: 3502 case BuiltinType::Char_S: 3503 case BuiltinType::Char_U: 3504 case BuiltinType::SChar: 3505 case BuiltinType::UChar: 3506 case BuiltinType::Short: 3507 case BuiltinType::UShort: 3508 return true; 3509 default: 3510 return false; 3511 } 3512 return false; 3513} 3514 3515llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3516 CodeGenFunction &CGF) const { 3517 // FIXME: Implement 3518 return 0; 3519} 3520 3521 3522ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3523 if (RetTy->isVoidType()) 3524 return ABIArgInfo::getIgnore(); 3525 if (isAggregateTypeForABI(RetTy)) 3526 return ABIArgInfo::getIndirect(0); 3527 3528 return (isPromotableIntegerType(RetTy) ? 3529 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3530} 3531 3532ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3533 if (isAggregateTypeForABI(Ty)) 3534 return ABIArgInfo::getIndirect(0); 3535 3536 return (isPromotableIntegerType(Ty) ? 3537 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3538} 3539 3540void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3541 llvm::GlobalValue *GV, 3542 CodeGen::CodeGenModule &M) 3543 const { 3544 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3545 if (!FD) return; 3546 3547 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3548 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3549 CC = llvm::CallingConv::MBLAZE_INTR; 3550 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3551 CC = llvm::CallingConv::MBLAZE_SVOL; 3552 3553 if (CC != llvm::CallingConv::C) { 3554 // Handle 'interrupt_handler' attribute: 3555 llvm::Function *F = cast<llvm::Function>(GV); 3556 3557 // Step 1: Set ISR calling convention. 3558 F->setCallingConv(CC); 3559 3560 // Step 2: Add attributes goodness. 3561 F->addFnAttr(llvm::Attributes::NoInline); 3562 } 3563 3564 // Step 3: Emit _interrupt_handler alias. 3565 if (CC == llvm::CallingConv::MBLAZE_INTR) 3566 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3567 "_interrupt_handler", GV, &M.getModule()); 3568} 3569 3570 3571//===----------------------------------------------------------------------===// 3572// MSP430 ABI Implementation 3573//===----------------------------------------------------------------------===// 3574 3575namespace { 3576 3577class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3578public: 3579 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3580 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3581 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3582 CodeGen::CodeGenModule &M) const; 3583}; 3584 3585} 3586 3587void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3588 llvm::GlobalValue *GV, 3589 CodeGen::CodeGenModule &M) const { 3590 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3591 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3592 // Handle 'interrupt' attribute: 3593 llvm::Function *F = cast<llvm::Function>(GV); 3594 3595 // Step 1: Set ISR calling convention. 3596 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3597 3598 // Step 2: Add attributes goodness. 3599 F->addFnAttr(llvm::Attributes::NoInline); 3600 3601 // Step 3: Emit ISR vector alias. 3602 unsigned Num = attr->getNumber() + 0xffe0; 3603 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3604 "vector_" + Twine::utohexstr(Num), 3605 GV, &M.getModule()); 3606 } 3607 } 3608} 3609 3610//===----------------------------------------------------------------------===// 3611// MIPS ABI Implementation. This works for both little-endian and 3612// big-endian variants. 3613//===----------------------------------------------------------------------===// 3614 3615namespace { 3616class MipsABIInfo : public ABIInfo { 3617 bool IsO32; 3618 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3619 void CoerceToIntArgs(uint64_t TySize, 3620 SmallVector<llvm::Type*, 8> &ArgList) const; 3621 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3622 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3623 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3624public: 3625 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3626 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3627 StackAlignInBytes(IsO32 ? 8 : 16) {} 3628 3629 ABIArgInfo classifyReturnType(QualType RetTy) const; 3630 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3631 virtual void computeInfo(CGFunctionInfo &FI) const; 3632 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3633 CodeGenFunction &CGF) const; 3634}; 3635 3636class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3637 unsigned SizeOfUnwindException; 3638public: 3639 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3640 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3641 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3642 3643 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3644 return 29; 3645 } 3646 3647 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3648 llvm::Value *Address) const; 3649 3650 unsigned getSizeOfUnwindException() const { 3651 return SizeOfUnwindException; 3652 } 3653}; 3654} 3655 3656void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3657 SmallVector<llvm::Type*, 8> &ArgList) const { 3658 llvm::IntegerType *IntTy = 3659 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3660 3661 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3662 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3663 ArgList.push_back(IntTy); 3664 3665 // If necessary, add one more integer type to ArgList. 3666 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3667 3668 if (R) 3669 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3670} 3671 3672// In N32/64, an aligned double precision floating point field is passed in 3673// a register. 3674llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3675 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3676 3677 if (IsO32) { 3678 CoerceToIntArgs(TySize, ArgList); 3679 return llvm::StructType::get(getVMContext(), ArgList); 3680 } 3681 3682 if (Ty->isComplexType()) 3683 return CGT.ConvertType(Ty); 3684 3685 const RecordType *RT = Ty->getAs<RecordType>(); 3686 3687 // Unions/vectors are passed in integer registers. 3688 if (!RT || !RT->isStructureOrClassType()) { 3689 CoerceToIntArgs(TySize, ArgList); 3690 return llvm::StructType::get(getVMContext(), ArgList); 3691 } 3692 3693 const RecordDecl *RD = RT->getDecl(); 3694 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3695 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3696 3697 uint64_t LastOffset = 0; 3698 unsigned idx = 0; 3699 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3700 3701 // Iterate over fields in the struct/class and check if there are any aligned 3702 // double fields. 3703 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3704 i != e; ++i, ++idx) { 3705 const QualType Ty = i->getType(); 3706 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3707 3708 if (!BT || BT->getKind() != BuiltinType::Double) 3709 continue; 3710 3711 uint64_t Offset = Layout.getFieldOffset(idx); 3712 if (Offset % 64) // Ignore doubles that are not aligned. 3713 continue; 3714 3715 // Add ((Offset - LastOffset) / 64) args of type i64. 3716 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3717 ArgList.push_back(I64); 3718 3719 // Add double type. 3720 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3721 LastOffset = Offset + 64; 3722 } 3723 3724 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3725 ArgList.append(IntArgList.begin(), IntArgList.end()); 3726 3727 return llvm::StructType::get(getVMContext(), ArgList); 3728} 3729 3730llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3731 assert((Offset % MinABIStackAlignInBytes) == 0); 3732 3733 if ((Align - 1) & Offset) 3734 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3735 3736 return 0; 3737} 3738 3739ABIArgInfo 3740MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3741 uint64_t OrigOffset = Offset; 3742 uint64_t TySize = getContext().getTypeSize(Ty); 3743 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3744 3745 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3746 (uint64_t)StackAlignInBytes); 3747 Offset = llvm::RoundUpToAlignment(Offset, Align); 3748 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3749 3750 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3751 // Ignore empty aggregates. 3752 if (TySize == 0) 3753 return ABIArgInfo::getIgnore(); 3754 3755 // Records with non trivial destructors/constructors should not be passed 3756 // by value. 3757 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3758 Offset = OrigOffset + MinABIStackAlignInBytes; 3759 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3760 } 3761 3762 // If we have reached here, aggregates are passed directly by coercing to 3763 // another structure type. Padding is inserted if the offset of the 3764 // aggregate is unaligned. 3765 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3766 getPaddingType(Align, OrigOffset)); 3767 } 3768 3769 // Treat an enum type as its underlying type. 3770 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3771 Ty = EnumTy->getDecl()->getIntegerType(); 3772 3773 if (Ty->isPromotableIntegerType()) 3774 return ABIArgInfo::getExtend(); 3775 3776 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3777} 3778 3779llvm::Type* 3780MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3781 const RecordType *RT = RetTy->getAs<RecordType>(); 3782 SmallVector<llvm::Type*, 8> RTList; 3783 3784 if (RT && RT->isStructureOrClassType()) { 3785 const RecordDecl *RD = RT->getDecl(); 3786 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3787 unsigned FieldCnt = Layout.getFieldCount(); 3788 3789 // N32/64 returns struct/classes in floating point registers if the 3790 // following conditions are met: 3791 // 1. The size of the struct/class is no larger than 128-bit. 3792 // 2. The struct/class has one or two fields all of which are floating 3793 // point types. 3794 // 3. The offset of the first field is zero (this follows what gcc does). 3795 // 3796 // Any other composite results are returned in integer registers. 3797 // 3798 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3799 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3800 for (; b != e; ++b) { 3801 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3802 3803 if (!BT || !BT->isFloatingPoint()) 3804 break; 3805 3806 RTList.push_back(CGT.ConvertType(b->getType())); 3807 } 3808 3809 if (b == e) 3810 return llvm::StructType::get(getVMContext(), RTList, 3811 RD->hasAttr<PackedAttr>()); 3812 3813 RTList.clear(); 3814 } 3815 } 3816 3817 CoerceToIntArgs(Size, RTList); 3818 return llvm::StructType::get(getVMContext(), RTList); 3819} 3820 3821ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3822 uint64_t Size = getContext().getTypeSize(RetTy); 3823 3824 if (RetTy->isVoidType() || Size == 0) 3825 return ABIArgInfo::getIgnore(); 3826 3827 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3828 if (Size <= 128) { 3829 if (RetTy->isAnyComplexType()) 3830 return ABIArgInfo::getDirect(); 3831 3832 // O32 returns integer vectors in registers. 3833 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3834 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3835 3836 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3837 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3838 } 3839 3840 return ABIArgInfo::getIndirect(0); 3841 } 3842 3843 // Treat an enum type as its underlying type. 3844 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3845 RetTy = EnumTy->getDecl()->getIntegerType(); 3846 3847 return (RetTy->isPromotableIntegerType() ? 3848 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3849} 3850 3851void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3852 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3853 RetInfo = classifyReturnType(FI.getReturnType()); 3854 3855 // Check if a pointer to an aggregate is passed as a hidden argument. 3856 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3857 3858 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3859 it != ie; ++it) 3860 it->info = classifyArgumentType(it->type, Offset); 3861} 3862 3863llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3864 CodeGenFunction &CGF) const { 3865 llvm::Type *BP = CGF.Int8PtrTy; 3866 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3867 3868 CGBuilderTy &Builder = CGF.Builder; 3869 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3870 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3871 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3872 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3873 llvm::Value *AddrTyped; 3874 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3875 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3876 3877 if (TypeAlign > MinABIStackAlignInBytes) { 3878 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3879 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3880 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3881 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3882 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3883 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3884 } 3885 else 3886 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3887 3888 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3889 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3890 uint64_t Offset = 3891 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3892 llvm::Value *NextAddr = 3893 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3894 "ap.next"); 3895 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3896 3897 return AddrTyped; 3898} 3899 3900bool 3901MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3902 llvm::Value *Address) const { 3903 // This information comes from gcc's implementation, which seems to 3904 // as canonical as it gets. 3905 3906 // Everything on MIPS is 4 bytes. Double-precision FP registers 3907 // are aliased to pairs of single-precision FP registers. 3908 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3909 3910 // 0-31 are the general purpose registers, $0 - $31. 3911 // 32-63 are the floating-point registers, $f0 - $f31. 3912 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3913 // 66 is the (notional, I think) register for signal-handler return. 3914 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3915 3916 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3917 // They are one bit wide and ignored here. 3918 3919 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3920 // (coprocessor 1 is the FP unit) 3921 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3922 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3923 // 176-181 are the DSP accumulator registers. 3924 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3925 return false; 3926} 3927 3928//===----------------------------------------------------------------------===// 3929// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3930// Currently subclassed only to implement custom OpenCL C function attribute 3931// handling. 3932//===----------------------------------------------------------------------===// 3933 3934namespace { 3935 3936class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3937public: 3938 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3939 : DefaultTargetCodeGenInfo(CGT) {} 3940 3941 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3942 CodeGen::CodeGenModule &M) const; 3943}; 3944 3945void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3946 llvm::GlobalValue *GV, 3947 CodeGen::CodeGenModule &M) const { 3948 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3949 if (!FD) return; 3950 3951 llvm::Function *F = cast<llvm::Function>(GV); 3952 3953 if (M.getLangOpts().OpenCL) { 3954 if (FD->hasAttr<OpenCLKernelAttr>()) { 3955 // OpenCL C Kernel functions are not subject to inlining 3956 F->addFnAttr(llvm::Attributes::NoInline); 3957 3958 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3959 3960 // Convert the reqd_work_group_size() attributes to metadata. 3961 llvm::LLVMContext &Context = F->getContext(); 3962 llvm::NamedMDNode *OpenCLMetadata = 3963 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3964 3965 SmallVector<llvm::Value*, 5> Operands; 3966 Operands.push_back(F); 3967 3968 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3969 llvm::APInt(32, 3970 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3971 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3972 llvm::APInt(32, 3973 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3974 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3975 llvm::APInt(32, 3976 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3977 3978 // Add a boolean constant operand for "required" (true) or "hint" (false) 3979 // for implementing the work_group_size_hint attr later. Currently 3980 // always true as the hint is not yet implemented. 3981 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3982 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3983 } 3984 } 3985 } 3986} 3987 3988} 3989 3990//===----------------------------------------------------------------------===// 3991// Hexagon ABI Implementation 3992//===----------------------------------------------------------------------===// 3993 3994namespace { 3995 3996class HexagonABIInfo : public ABIInfo { 3997 3998 3999public: 4000 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4001 4002private: 4003 4004 ABIArgInfo classifyReturnType(QualType RetTy) const; 4005 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4006 4007 virtual void computeInfo(CGFunctionInfo &FI) const; 4008 4009 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4010 CodeGenFunction &CGF) const; 4011}; 4012 4013class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4014public: 4015 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4016 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4017 4018 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4019 return 29; 4020 } 4021}; 4022 4023} 4024 4025void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4026 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4027 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4028 it != ie; ++it) 4029 it->info = classifyArgumentType(it->type); 4030} 4031 4032ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4033 if (!isAggregateTypeForABI(Ty)) { 4034 // Treat an enum type as its underlying type. 4035 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4036 Ty = EnumTy->getDecl()->getIntegerType(); 4037 4038 return (Ty->isPromotableIntegerType() ? 4039 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4040 } 4041 4042 // Ignore empty records. 4043 if (isEmptyRecord(getContext(), Ty, true)) 4044 return ABIArgInfo::getIgnore(); 4045 4046 // Structures with either a non-trivial destructor or a non-trivial 4047 // copy constructor are always indirect. 4048 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4049 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4050 4051 uint64_t Size = getContext().getTypeSize(Ty); 4052 if (Size > 64) 4053 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4054 // Pass in the smallest viable integer type. 4055 else if (Size > 32) 4056 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4057 else if (Size > 16) 4058 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4059 else if (Size > 8) 4060 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4061 else 4062 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4063} 4064 4065ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4066 if (RetTy->isVoidType()) 4067 return ABIArgInfo::getIgnore(); 4068 4069 // Large vector types should be returned via memory. 4070 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4071 return ABIArgInfo::getIndirect(0); 4072 4073 if (!isAggregateTypeForABI(RetTy)) { 4074 // Treat an enum type as its underlying type. 4075 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4076 RetTy = EnumTy->getDecl()->getIntegerType(); 4077 4078 return (RetTy->isPromotableIntegerType() ? 4079 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4080 } 4081 4082 // Structures with either a non-trivial destructor or a non-trivial 4083 // copy constructor are always indirect. 4084 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4085 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4086 4087 if (isEmptyRecord(getContext(), RetTy, true)) 4088 return ABIArgInfo::getIgnore(); 4089 4090 // Aggregates <= 8 bytes are returned in r0; other aggregates 4091 // are returned indirectly. 4092 uint64_t Size = getContext().getTypeSize(RetTy); 4093 if (Size <= 64) { 4094 // Return in the smallest viable integer type. 4095 if (Size <= 8) 4096 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4097 if (Size <= 16) 4098 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4099 if (Size <= 32) 4100 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4101 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4102 } 4103 4104 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4105} 4106 4107llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4108 CodeGenFunction &CGF) const { 4109 // FIXME: Need to handle alignment 4110 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4111 4112 CGBuilderTy &Builder = CGF.Builder; 4113 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4114 "ap"); 4115 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4116 llvm::Type *PTy = 4117 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4118 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4119 4120 uint64_t Offset = 4121 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4122 llvm::Value *NextAddr = 4123 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4124 "ap.next"); 4125 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4126 4127 return AddrTyped; 4128} 4129 4130 4131const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4132 if (TheTargetCodeGenInfo) 4133 return *TheTargetCodeGenInfo; 4134 4135 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4136 switch (Triple.getArch()) { 4137 default: 4138 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4139 4140 case llvm::Triple::le32: 4141 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4142 case llvm::Triple::mips: 4143 case llvm::Triple::mipsel: 4144 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4145 4146 case llvm::Triple::mips64: 4147 case llvm::Triple::mips64el: 4148 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4149 4150 case llvm::Triple::arm: 4151 case llvm::Triple::thumb: 4152 { 4153 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4154 4155 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4156 Kind = ARMABIInfo::APCS; 4157 else if (CodeGenOpts.FloatABI == "hard") 4158 Kind = ARMABIInfo::AAPCS_VFP; 4159 4160 switch (Triple.getOS()) { 4161 case llvm::Triple::NativeClient: 4162 return *(TheTargetCodeGenInfo = 4163 new NaClARMTargetCodeGenInfo(Types, Kind)); 4164 default: 4165 return *(TheTargetCodeGenInfo = 4166 new ARMTargetCodeGenInfo(Types, Kind)); 4167 } 4168 } 4169 4170 case llvm::Triple::ppc: 4171 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4172 case llvm::Triple::ppc64: 4173 if (Triple.isOSBinFormatELF()) 4174 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4175 else 4176 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4177 4178 case llvm::Triple::nvptx: 4179 case llvm::Triple::nvptx64: 4180 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4181 4182 case llvm::Triple::mblaze: 4183 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4184 4185 case llvm::Triple::msp430: 4186 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4187 4188 case llvm::Triple::tce: 4189 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4190 4191 case llvm::Triple::x86: { 4192 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4193 4194 if (Triple.isOSDarwin()) 4195 return *(TheTargetCodeGenInfo = 4196 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4197 CodeGenOpts.NumRegisterParameters)); 4198 4199 switch (Triple.getOS()) { 4200 case llvm::Triple::Cygwin: 4201 case llvm::Triple::MinGW32: 4202 case llvm::Triple::AuroraUX: 4203 case llvm::Triple::DragonFly: 4204 case llvm::Triple::FreeBSD: 4205 case llvm::Triple::OpenBSD: 4206 case llvm::Triple::Bitrig: 4207 return *(TheTargetCodeGenInfo = 4208 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4209 false, 4210 CodeGenOpts.NumRegisterParameters)); 4211 4212 case llvm::Triple::Win32: 4213 return *(TheTargetCodeGenInfo = 4214 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4215 CodeGenOpts.NumRegisterParameters)); 4216 4217 default: 4218 return *(TheTargetCodeGenInfo = 4219 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4220 false, 4221 CodeGenOpts.NumRegisterParameters)); 4222 } 4223 } 4224 4225 case llvm::Triple::x86_64: { 4226 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4227 4228 switch (Triple.getOS()) { 4229 case llvm::Triple::Win32: 4230 case llvm::Triple::MinGW32: 4231 case llvm::Triple::Cygwin: 4232 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4233 case llvm::Triple::NativeClient: 4234 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4235 default: 4236 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4237 HasAVX)); 4238 } 4239 } 4240 case llvm::Triple::hexagon: 4241 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4242 } 4243} 4244