TargetInfo.cpp revision 60e25804d14a52c173548f0f6c66d3d831cb901c
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "clang/AST/RecordLayout.h" 19#include "clang/Frontend/CodeGenOptions.h" 20#include "llvm/Type.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/ADT/Triple.h" 23#include "llvm/Support/raw_ostream.h" 24using namespace clang; 25using namespace CodeGen; 26 27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37} 38 39static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42} 43 44ABIInfo::~ABIInfo() {} 45 46ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48} 49 50llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52} 53 54const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56} 57 58 59void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86} 87 88TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90// If someone can figure out a general rule for this, that would be great. 91// It's probably just doomed to be platform-dependent, though. 92unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99} 100 101bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108} 109 110static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112/// isEmptyField - Return true iff a the field is "empty", that is it 113/// is an unnamed bit-field or an (array of) empty record(s). 114static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142} 143 144/// isEmptyRecord - Return true iff a structure contains only empty 145/// fields. Note that a structure with a flexible array member is not 146/// considered empty. 147static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167} 168 169/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170/// a non-trivial destructor or a non-trivial copy constructor. 171static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177} 178 179/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180/// a record type with either a non-trivial destructor or a non-trivial copy 181/// constructor. 182static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188} 189 190/// isSingleElementStruct - Determine if a structure is a "single 191/// element struct", i.e. it has exactly one non-empty field or 192/// exactly one field which is itself a single element 193/// struct. Structures with flexible array members are never 194/// considered single element structs. 195/// 196/// \return The field declaration for the single non-empty field, if 197/// it exists. 198static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266} 267 268static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276} 277 278/// canExpandIndirectArgument - Test whether an argument type which is to be 279/// passed indirectly (on the stack) would have the equivalent layout if it was 280/// expanded into separate arguments. If so, we prefer to do the latter to avoid 281/// inhibiting optimizations. 282/// 283// FIXME: This predicate is missing many cases, currently it just follows 284// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285// should probably make this smarter, or better yet make the LLVM backend 286// capable of handling it. 287static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323} 324 325namespace { 326/// DefaultABIInfo - The default implementation for ABI specific 327/// details. This implementation provides information which results in 328/// self-consistent and sensible LLVM IR generation, but does not 329/// conform to any particular ABI. 330class DefaultABIInfo : public ABIInfo { 331public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346}; 347 348class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352}; 353 354llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357} 358 359ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375} 376 377ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390} 391 392/// UseX86_MMXType - Return true if this is an MMX type that should use the 393/// special x86_mmx type. 394bool UseX86_MMXType(llvm::Type *IRType) { 395 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 396 // special x86_mmx type. 397 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 398 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 399 IRType->getScalarSizeInBits() != 64; 400} 401 402static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 403 StringRef Constraint, 404 llvm::Type* Ty) { 405 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 406 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 407 return Ty; 408} 409 410//===----------------------------------------------------------------------===// 411// X86-32 ABI Implementation 412//===----------------------------------------------------------------------===// 413 414/// X86_32ABIInfo - The X86-32 ABI information. 415class X86_32ABIInfo : public ABIInfo { 416 enum Class { 417 Integer, 418 Float 419 }; 420 421 static const unsigned MinABIStackAlignInBytes = 4; 422 423 bool IsDarwinVectorABI; 424 bool IsSmallStructInRegABI; 425 bool IsMMXDisabled; 426 bool IsWin32FloatStructABI; 427 unsigned DefaultNumRegisterParameters; 428 429 static bool isRegisterSize(unsigned Size) { 430 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 431 } 432 433 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 434 unsigned callingConvention); 435 436 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 437 /// such that the argument will be passed in memory. 438 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 439 440 /// \brief Return the alignment to use for the given type on the stack. 441 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 442 443 Class classify(QualType Ty) const; 444 ABIArgInfo classifyReturnType(QualType RetTy, 445 unsigned callingConvention) const; 446 ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy, 447 unsigned &FreeRegs) const; 448 ABIArgInfo classifyArgumentType(QualType RetTy) const; 449 450public: 451 452 virtual void computeInfo(CGFunctionInfo &FI) const; 453 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 454 CodeGenFunction &CGF) const; 455 456 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 457 unsigned r) 458 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 459 IsMMXDisabled(m), IsWin32FloatStructABI(w), 460 DefaultNumRegisterParameters(r) {} 461}; 462 463class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 464public: 465 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 466 bool d, bool p, bool m, bool w, unsigned r) 467 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 468 469 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 470 CodeGen::CodeGenModule &CGM) const; 471 472 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 473 // Darwin uses different dwarf register numbers for EH. 474 if (CGM.isTargetDarwin()) return 5; 475 476 return 4; 477 } 478 479 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 480 llvm::Value *Address) const; 481 482 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 483 StringRef Constraint, 484 llvm::Type* Ty) const { 485 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 486 } 487 488}; 489 490} 491 492/// shouldReturnTypeInRegister - Determine if the given type should be 493/// passed in a register (for the Darwin ABI). 494bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 495 ASTContext &Context, 496 unsigned callingConvention) { 497 uint64_t Size = Context.getTypeSize(Ty); 498 499 // Type must be register sized. 500 if (!isRegisterSize(Size)) 501 return false; 502 503 if (Ty->isVectorType()) { 504 // 64- and 128- bit vectors inside structures are not returned in 505 // registers. 506 if (Size == 64 || Size == 128) 507 return false; 508 509 return true; 510 } 511 512 // If this is a builtin, pointer, enum, complex type, member pointer, or 513 // member function pointer it is ok. 514 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 515 Ty->isAnyComplexType() || Ty->isEnumeralType() || 516 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 517 return true; 518 519 // Arrays are treated like records. 520 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 521 return shouldReturnTypeInRegister(AT->getElementType(), Context, 522 callingConvention); 523 524 // Otherwise, it must be a record type. 525 const RecordType *RT = Ty->getAs<RecordType>(); 526 if (!RT) return false; 527 528 // FIXME: Traverse bases here too. 529 530 // For thiscall conventions, structures will never be returned in 531 // a register. This is for compatibility with the MSVC ABI 532 if (callingConvention == llvm::CallingConv::X86_ThisCall && 533 RT->isStructureType()) { 534 return false; 535 } 536 537 // Structure types are passed in register if all fields would be 538 // passed in a register. 539 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 540 e = RT->getDecl()->field_end(); i != e; ++i) { 541 const FieldDecl *FD = *i; 542 543 // Empty fields are ignored. 544 if (isEmptyField(Context, FD, true)) 545 continue; 546 547 // Check fields recursively. 548 if (!shouldReturnTypeInRegister(FD->getType(), Context, 549 callingConvention)) 550 return false; 551 } 552 return true; 553} 554 555ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 556 unsigned callingConvention) const { 557 if (RetTy->isVoidType()) 558 return ABIArgInfo::getIgnore(); 559 560 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 561 // On Darwin, some vectors are returned in registers. 562 if (IsDarwinVectorABI) { 563 uint64_t Size = getContext().getTypeSize(RetTy); 564 565 // 128-bit vectors are a special case; they are returned in 566 // registers and we need to make sure to pick a type the LLVM 567 // backend will like. 568 if (Size == 128) 569 return ABIArgInfo::getDirect(llvm::VectorType::get( 570 llvm::Type::getInt64Ty(getVMContext()), 2)); 571 572 // Always return in register if it fits in a general purpose 573 // register, or if it is 64 bits and has a single element. 574 if ((Size == 8 || Size == 16 || Size == 32) || 575 (Size == 64 && VT->getNumElements() == 1)) 576 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 577 Size)); 578 579 return ABIArgInfo::getIndirect(0); 580 } 581 582 return ABIArgInfo::getDirect(); 583 } 584 585 if (isAggregateTypeForABI(RetTy)) { 586 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 587 // Structures with either a non-trivial destructor or a non-trivial 588 // copy constructor are always indirect. 589 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 590 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 591 592 // Structures with flexible arrays are always indirect. 593 if (RT->getDecl()->hasFlexibleArrayMember()) 594 return ABIArgInfo::getIndirect(0); 595 } 596 597 // If specified, structs and unions are always indirect. 598 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 599 return ABIArgInfo::getIndirect(0); 600 601 // Small structures which are register sized are generally returned 602 // in a register. 603 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 604 callingConvention)) { 605 uint64_t Size = getContext().getTypeSize(RetTy); 606 607 // As a special-case, if the struct is a "single-element" struct, and 608 // the field is of type "float" or "double", return it in a 609 // floating-point register. (MSVC does not apply this special case.) 610 // We apply a similar transformation for pointer types to improve the 611 // quality of the generated IR. 612 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 613 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 614 || SeltTy->hasPointerRepresentation()) 615 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 616 617 // FIXME: We should be able to narrow this integer in cases with dead 618 // padding. 619 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 620 } 621 622 return ABIArgInfo::getIndirect(0); 623 } 624 625 // Treat an enum type as its underlying type. 626 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 627 RetTy = EnumTy->getDecl()->getIntegerType(); 628 629 return (RetTy->isPromotableIntegerType() ? 630 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 631} 632 633static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 634 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 635} 636 637static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 638 const RecordType *RT = Ty->getAs<RecordType>(); 639 if (!RT) 640 return 0; 641 const RecordDecl *RD = RT->getDecl(); 642 643 // If this is a C++ record, check the bases first. 644 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 645 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 646 e = CXXRD->bases_end(); i != e; ++i) 647 if (!isRecordWithSSEVectorType(Context, i->getType())) 648 return false; 649 650 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 651 i != e; ++i) { 652 QualType FT = i->getType(); 653 654 if (isSSEVectorType(Context, FT)) 655 return true; 656 657 if (isRecordWithSSEVectorType(Context, FT)) 658 return true; 659 } 660 661 return false; 662} 663 664unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 665 unsigned Align) const { 666 // Otherwise, if the alignment is less than or equal to the minimum ABI 667 // alignment, just use the default; the backend will handle this. 668 if (Align <= MinABIStackAlignInBytes) 669 return 0; // Use default alignment. 670 671 // On non-Darwin, the stack type alignment is always 4. 672 if (!IsDarwinVectorABI) { 673 // Set explicit alignment, since we may need to realign the top. 674 return MinABIStackAlignInBytes; 675 } 676 677 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 678 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 679 isRecordWithSSEVectorType(getContext(), Ty))) 680 return 16; 681 682 return MinABIStackAlignInBytes; 683} 684 685ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 686 if (!ByVal) 687 return ABIArgInfo::getIndirect(0, false); 688 689 // Compute the byval alignment. 690 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 691 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 692 if (StackAlign == 0) 693 return ABIArgInfo::getIndirect(4); 694 695 // If the stack alignment is less than the type alignment, realign the 696 // argument. 697 if (StackAlign < TypeAlign) 698 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 699 /*Realign=*/true); 700 701 return ABIArgInfo::getIndirect(StackAlign); 702} 703 704X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 705 const Type *T = isSingleElementStruct(Ty, getContext()); 706 if (!T) 707 T = Ty.getTypePtr(); 708 709 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 710 BuiltinType::Kind K = BT->getKind(); 711 if (K == BuiltinType::Float || K == BuiltinType::Double) 712 return Float; 713 } 714 return Integer; 715} 716 717ABIArgInfo 718X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty, 719 unsigned &FreeRegs) const { 720 // Common case first. 721 if (FreeRegs == 0) 722 return classifyArgumentType(Ty); 723 724 Class C = classify(Ty); 725 if (C == Float) 726 return classifyArgumentType(Ty); 727 728 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 729 if (SizeInRegs == 0) 730 return classifyArgumentType(Ty); 731 732 if (SizeInRegs > FreeRegs) { 733 FreeRegs = 0; 734 return classifyArgumentType(Ty); 735 } 736 assert(SizeInRegs >= 1 && SizeInRegs <= 3); 737 FreeRegs -= SizeInRegs; 738 739 // If it is a simple scalar, keep the type so that we produce a cleaner IR. 740 ABIArgInfo Foo = classifyArgumentType(Ty); 741 if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType()) 742 return ABIArgInfo::getDirectInReg(Foo.getCoerceToType()); 743 if (Foo.isExtend()) 744 return ABIArgInfo::getExtendInReg(Foo.getCoerceToType()); 745 746 llvm::LLVMContext &LLVMContext = getVMContext(); 747 llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext); 748 SmallVector<llvm::Type*, 3> Elements; 749 for (unsigned I = 0; I < SizeInRegs; ++I) 750 Elements.push_back(Int32); 751 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 752 return ABIArgInfo::getDirectInReg(Result); 753} 754 755ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 756 // FIXME: Set alignment on indirect arguments. 757 if (isAggregateTypeForABI(Ty)) { 758 // Structures with flexible arrays are always indirect. 759 if (const RecordType *RT = Ty->getAs<RecordType>()) { 760 // Structures with either a non-trivial destructor or a non-trivial 761 // copy constructor are always indirect. 762 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 763 return getIndirectResult(Ty, /*ByVal=*/false); 764 765 if (RT->getDecl()->hasFlexibleArrayMember()) 766 return getIndirectResult(Ty); 767 } 768 769 // Ignore empty structs/unions. 770 if (isEmptyRecord(getContext(), Ty, true)) 771 return ABIArgInfo::getIgnore(); 772 773 // Expand small (<= 128-bit) record types when we know that the stack layout 774 // of those arguments will match the struct. This is important because the 775 // LLVM backend isn't smart enough to remove byval, which inhibits many 776 // optimizations. 777 if (getContext().getTypeSize(Ty) <= 4*32 && 778 canExpandIndirectArgument(Ty, getContext())) 779 return ABIArgInfo::getExpand(); 780 781 return getIndirectResult(Ty); 782 } 783 784 if (const VectorType *VT = Ty->getAs<VectorType>()) { 785 // On Darwin, some vectors are passed in memory, we handle this by passing 786 // it as an i8/i16/i32/i64. 787 if (IsDarwinVectorABI) { 788 uint64_t Size = getContext().getTypeSize(Ty); 789 if ((Size == 8 || Size == 16 || Size == 32) || 790 (Size == 64 && VT->getNumElements() == 1)) 791 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 792 Size)); 793 } 794 795 llvm::Type *IRType = CGT.ConvertType(Ty); 796 if (UseX86_MMXType(IRType)) { 797 if (IsMMXDisabled) 798 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 799 64)); 800 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 801 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 802 return AAI; 803 } 804 805 return ABIArgInfo::getDirect(); 806 } 807 808 809 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 810 Ty = EnumTy->getDecl()->getIntegerType(); 811 812 return (Ty->isPromotableIntegerType() ? 813 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 814} 815 816void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 817 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 818 FI.getCallingConvention()); 819 820 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 821 DefaultNumRegisterParameters; 822 823 // If the return value is indirect, then the hidden argument is consuming one 824 // integer register. 825 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 826 --FreeRegs; 827 ABIArgInfo &Old = FI.getReturnInfo(); 828 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 829 Old.getIndirectByVal(), 830 Old.getIndirectRealign()); 831 } 832 833 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 834 it != ie; ++it) 835 it->info = classifyArgumentTypeWithReg(it->type, FreeRegs); 836} 837 838llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 839 CodeGenFunction &CGF) const { 840 llvm::Type *BPP = CGF.Int8PtrPtrTy; 841 842 CGBuilderTy &Builder = CGF.Builder; 843 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 844 "ap"); 845 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 846 847 // Compute if the address needs to be aligned 848 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 849 Align = getTypeStackAlignInBytes(Ty, Align); 850 Align = std::max(Align, 4U); 851 if (Align > 4) { 852 // addr = (addr + align - 1) & -align; 853 llvm::Value *Offset = 854 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 855 Addr = CGF.Builder.CreateGEP(Addr, Offset); 856 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 857 CGF.Int32Ty); 858 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 859 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 860 Addr->getType(), 861 "ap.cur.aligned"); 862 } 863 864 llvm::Type *PTy = 865 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 866 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 867 868 uint64_t Offset = 869 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 870 llvm::Value *NextAddr = 871 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 872 "ap.next"); 873 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 874 875 return AddrTyped; 876} 877 878void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 879 llvm::GlobalValue *GV, 880 CodeGen::CodeGenModule &CGM) const { 881 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 882 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 883 // Get the LLVM function. 884 llvm::Function *Fn = cast<llvm::Function>(GV); 885 886 // Now add the 'alignstack' attribute with a value of 16. 887 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 888 } 889 } 890} 891 892bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 893 CodeGen::CodeGenFunction &CGF, 894 llvm::Value *Address) const { 895 CodeGen::CGBuilderTy &Builder = CGF.Builder; 896 897 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 898 899 // 0-7 are the eight integer registers; the order is different 900 // on Darwin (for EH), but the range is the same. 901 // 8 is %eip. 902 AssignToArrayRange(Builder, Address, Four8, 0, 8); 903 904 if (CGF.CGM.isTargetDarwin()) { 905 // 12-16 are st(0..4). Not sure why we stop at 4. 906 // These have size 16, which is sizeof(long double) on 907 // platforms with 8-byte alignment for that type. 908 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 909 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 910 911 } else { 912 // 9 is %eflags, which doesn't get a size on Darwin for some 913 // reason. 914 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 915 916 // 11-16 are st(0..5). Not sure why we stop at 5. 917 // These have size 12, which is sizeof(long double) on 918 // platforms with 4-byte alignment for that type. 919 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 920 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 921 } 922 923 return false; 924} 925 926//===----------------------------------------------------------------------===// 927// X86-64 ABI Implementation 928//===----------------------------------------------------------------------===// 929 930 931namespace { 932/// X86_64ABIInfo - The X86_64 ABI information. 933class X86_64ABIInfo : public ABIInfo { 934 enum Class { 935 Integer = 0, 936 SSE, 937 SSEUp, 938 X87, 939 X87Up, 940 ComplexX87, 941 NoClass, 942 Memory 943 }; 944 945 /// merge - Implement the X86_64 ABI merging algorithm. 946 /// 947 /// Merge an accumulating classification \arg Accum with a field 948 /// classification \arg Field. 949 /// 950 /// \param Accum - The accumulating classification. This should 951 /// always be either NoClass or the result of a previous merge 952 /// call. In addition, this should never be Memory (the caller 953 /// should just return Memory for the aggregate). 954 static Class merge(Class Accum, Class Field); 955 956 /// postMerge - Implement the X86_64 ABI post merging algorithm. 957 /// 958 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 959 /// final MEMORY or SSE classes when necessary. 960 /// 961 /// \param AggregateSize - The size of the current aggregate in 962 /// the classification process. 963 /// 964 /// \param Lo - The classification for the parts of the type 965 /// residing in the low word of the containing object. 966 /// 967 /// \param Hi - The classification for the parts of the type 968 /// residing in the higher words of the containing object. 969 /// 970 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 971 972 /// classify - Determine the x86_64 register classes in which the 973 /// given type T should be passed. 974 /// 975 /// \param Lo - The classification for the parts of the type 976 /// residing in the low word of the containing object. 977 /// 978 /// \param Hi - The classification for the parts of the type 979 /// residing in the high word of the containing object. 980 /// 981 /// \param OffsetBase - The bit offset of this type in the 982 /// containing object. Some parameters are classified different 983 /// depending on whether they straddle an eightbyte boundary. 984 /// 985 /// If a word is unused its result will be NoClass; if a type should 986 /// be passed in Memory then at least the classification of \arg Lo 987 /// will be Memory. 988 /// 989 /// The \arg Lo class will be NoClass iff the argument is ignored. 990 /// 991 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 992 /// also be ComplexX87. 993 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 994 995 llvm::Type *GetByteVectorType(QualType Ty) const; 996 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 997 unsigned IROffset, QualType SourceTy, 998 unsigned SourceOffset) const; 999 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1000 unsigned IROffset, QualType SourceTy, 1001 unsigned SourceOffset) const; 1002 1003 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1004 /// such that the argument will be returned in memory. 1005 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1006 1007 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1008 /// such that the argument will be passed in memory. 1009 /// 1010 /// \param freeIntRegs - The number of free integer registers remaining 1011 /// available. 1012 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1013 1014 ABIArgInfo classifyReturnType(QualType RetTy) const; 1015 1016 ABIArgInfo classifyArgumentType(QualType Ty, 1017 unsigned freeIntRegs, 1018 unsigned &neededInt, 1019 unsigned &neededSSE) const; 1020 1021 bool IsIllegalVectorType(QualType Ty) const; 1022 1023 /// The 0.98 ABI revision clarified a lot of ambiguities, 1024 /// unfortunately in ways that were not always consistent with 1025 /// certain previous compilers. In particular, platforms which 1026 /// required strict binary compatibility with older versions of GCC 1027 /// may need to exempt themselves. 1028 bool honorsRevision0_98() const { 1029 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1030 } 1031 1032 bool HasAVX; 1033 1034public: 1035 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1036 ABIInfo(CGT), HasAVX(hasavx) {} 1037 1038 bool isPassedUsingAVXType(QualType type) const { 1039 unsigned neededInt, neededSSE; 1040 // The freeIntRegs argument doesn't matter here. 1041 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1042 if (info.isDirect()) { 1043 llvm::Type *ty = info.getCoerceToType(); 1044 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1045 return (vectorTy->getBitWidth() > 128); 1046 } 1047 return false; 1048 } 1049 1050 virtual void computeInfo(CGFunctionInfo &FI) const; 1051 1052 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1053 CodeGenFunction &CGF) const; 1054}; 1055 1056/// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1057class WinX86_64ABIInfo : public ABIInfo { 1058 1059 ABIArgInfo classify(QualType Ty) const; 1060 1061public: 1062 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1063 1064 virtual void computeInfo(CGFunctionInfo &FI) const; 1065 1066 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1067 CodeGenFunction &CGF) const; 1068}; 1069 1070class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1071public: 1072 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1073 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1074 1075 const X86_64ABIInfo &getABIInfo() const { 1076 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1077 } 1078 1079 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1080 return 7; 1081 } 1082 1083 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1084 llvm::Value *Address) const { 1085 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1086 1087 // 0-15 are the 16 integer registers. 1088 // 16 is %rip. 1089 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1090 return false; 1091 } 1092 1093 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1094 StringRef Constraint, 1095 llvm::Type* Ty) const { 1096 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1097 } 1098 1099 bool isNoProtoCallVariadic(const CallArgList &args, 1100 const FunctionNoProtoType *fnType) const { 1101 // The default CC on x86-64 sets %al to the number of SSA 1102 // registers used, and GCC sets this when calling an unprototyped 1103 // function, so we override the default behavior. However, don't do 1104 // that when AVX types are involved: the ABI explicitly states it is 1105 // undefined, and it doesn't work in practice because of how the ABI 1106 // defines varargs anyway. 1107 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1108 bool HasAVXType = false; 1109 for (CallArgList::const_iterator 1110 it = args.begin(), ie = args.end(); it != ie; ++it) { 1111 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1112 HasAVXType = true; 1113 break; 1114 } 1115 } 1116 1117 if (!HasAVXType) 1118 return true; 1119 } 1120 1121 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1122 } 1123 1124}; 1125 1126class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1127public: 1128 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1129 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1130 1131 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1132 return 7; 1133 } 1134 1135 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1136 llvm::Value *Address) const { 1137 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1138 1139 // 0-15 are the 16 integer registers. 1140 // 16 is %rip. 1141 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1142 return false; 1143 } 1144}; 1145 1146} 1147 1148void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1149 Class &Hi) const { 1150 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1151 // 1152 // (a) If one of the classes is Memory, the whole argument is passed in 1153 // memory. 1154 // 1155 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1156 // memory. 1157 // 1158 // (c) If the size of the aggregate exceeds two eightbytes and the first 1159 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1160 // argument is passed in memory. NOTE: This is necessary to keep the 1161 // ABI working for processors that don't support the __m256 type. 1162 // 1163 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1164 // 1165 // Some of these are enforced by the merging logic. Others can arise 1166 // only with unions; for example: 1167 // union { _Complex double; unsigned; } 1168 // 1169 // Note that clauses (b) and (c) were added in 0.98. 1170 // 1171 if (Hi == Memory) 1172 Lo = Memory; 1173 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1174 Lo = Memory; 1175 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1176 Lo = Memory; 1177 if (Hi == SSEUp && Lo != SSE) 1178 Hi = SSE; 1179} 1180 1181X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1182 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1183 // classified recursively so that always two fields are 1184 // considered. The resulting class is calculated according to 1185 // the classes of the fields in the eightbyte: 1186 // 1187 // (a) If both classes are equal, this is the resulting class. 1188 // 1189 // (b) If one of the classes is NO_CLASS, the resulting class is 1190 // the other class. 1191 // 1192 // (c) If one of the classes is MEMORY, the result is the MEMORY 1193 // class. 1194 // 1195 // (d) If one of the classes is INTEGER, the result is the 1196 // INTEGER. 1197 // 1198 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1199 // MEMORY is used as class. 1200 // 1201 // (f) Otherwise class SSE is used. 1202 1203 // Accum should never be memory (we should have returned) or 1204 // ComplexX87 (because this cannot be passed in a structure). 1205 assert((Accum != Memory && Accum != ComplexX87) && 1206 "Invalid accumulated classification during merge."); 1207 if (Accum == Field || Field == NoClass) 1208 return Accum; 1209 if (Field == Memory) 1210 return Memory; 1211 if (Accum == NoClass) 1212 return Field; 1213 if (Accum == Integer || Field == Integer) 1214 return Integer; 1215 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1216 Accum == X87 || Accum == X87Up) 1217 return Memory; 1218 return SSE; 1219} 1220 1221void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1222 Class &Lo, Class &Hi) const { 1223 // FIXME: This code can be simplified by introducing a simple value class for 1224 // Class pairs with appropriate constructor methods for the various 1225 // situations. 1226 1227 // FIXME: Some of the split computations are wrong; unaligned vectors 1228 // shouldn't be passed in registers for example, so there is no chance they 1229 // can straddle an eightbyte. Verify & simplify. 1230 1231 Lo = Hi = NoClass; 1232 1233 Class &Current = OffsetBase < 64 ? Lo : Hi; 1234 Current = Memory; 1235 1236 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1237 BuiltinType::Kind k = BT->getKind(); 1238 1239 if (k == BuiltinType::Void) { 1240 Current = NoClass; 1241 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1242 Lo = Integer; 1243 Hi = Integer; 1244 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1245 Current = Integer; 1246 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1247 Current = SSE; 1248 } else if (k == BuiltinType::LongDouble) { 1249 Lo = X87; 1250 Hi = X87Up; 1251 } 1252 // FIXME: _Decimal32 and _Decimal64 are SSE. 1253 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1254 return; 1255 } 1256 1257 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1258 // Classify the underlying integer type. 1259 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1260 return; 1261 } 1262 1263 if (Ty->hasPointerRepresentation()) { 1264 Current = Integer; 1265 return; 1266 } 1267 1268 if (Ty->isMemberPointerType()) { 1269 if (Ty->isMemberFunctionPointerType()) 1270 Lo = Hi = Integer; 1271 else 1272 Current = Integer; 1273 return; 1274 } 1275 1276 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1277 uint64_t Size = getContext().getTypeSize(VT); 1278 if (Size == 32) { 1279 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1280 // float> as integer. 1281 Current = Integer; 1282 1283 // If this type crosses an eightbyte boundary, it should be 1284 // split. 1285 uint64_t EB_Real = (OffsetBase) / 64; 1286 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1287 if (EB_Real != EB_Imag) 1288 Hi = Lo; 1289 } else if (Size == 64) { 1290 // gcc passes <1 x double> in memory. :( 1291 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1292 return; 1293 1294 // gcc passes <1 x long long> as INTEGER. 1295 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1296 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1297 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1298 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1299 Current = Integer; 1300 else 1301 Current = SSE; 1302 1303 // If this type crosses an eightbyte boundary, it should be 1304 // split. 1305 if (OffsetBase && OffsetBase != 64) 1306 Hi = Lo; 1307 } else if (Size == 128 || (HasAVX && Size == 256)) { 1308 // Arguments of 256-bits are split into four eightbyte chunks. The 1309 // least significant one belongs to class SSE and all the others to class 1310 // SSEUP. The original Lo and Hi design considers that types can't be 1311 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1312 // This design isn't correct for 256-bits, but since there're no cases 1313 // where the upper parts would need to be inspected, avoid adding 1314 // complexity and just consider Hi to match the 64-256 part. 1315 Lo = SSE; 1316 Hi = SSEUp; 1317 } 1318 return; 1319 } 1320 1321 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1322 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1323 1324 uint64_t Size = getContext().getTypeSize(Ty); 1325 if (ET->isIntegralOrEnumerationType()) { 1326 if (Size <= 64) 1327 Current = Integer; 1328 else if (Size <= 128) 1329 Lo = Hi = Integer; 1330 } else if (ET == getContext().FloatTy) 1331 Current = SSE; 1332 else if (ET == getContext().DoubleTy) 1333 Lo = Hi = SSE; 1334 else if (ET == getContext().LongDoubleTy) 1335 Current = ComplexX87; 1336 1337 // If this complex type crosses an eightbyte boundary then it 1338 // should be split. 1339 uint64_t EB_Real = (OffsetBase) / 64; 1340 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1341 if (Hi == NoClass && EB_Real != EB_Imag) 1342 Hi = Lo; 1343 1344 return; 1345 } 1346 1347 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1348 // Arrays are treated like structures. 1349 1350 uint64_t Size = getContext().getTypeSize(Ty); 1351 1352 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1353 // than four eightbytes, ..., it has class MEMORY. 1354 if (Size > 256) 1355 return; 1356 1357 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1358 // fields, it has class MEMORY. 1359 // 1360 // Only need to check alignment of array base. 1361 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1362 return; 1363 1364 // Otherwise implement simplified merge. We could be smarter about 1365 // this, but it isn't worth it and would be harder to verify. 1366 Current = NoClass; 1367 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1368 uint64_t ArraySize = AT->getSize().getZExtValue(); 1369 1370 // The only case a 256-bit wide vector could be used is when the array 1371 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1372 // to work for sizes wider than 128, early check and fallback to memory. 1373 if (Size > 128 && EltSize != 256) 1374 return; 1375 1376 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1377 Class FieldLo, FieldHi; 1378 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1379 Lo = merge(Lo, FieldLo); 1380 Hi = merge(Hi, FieldHi); 1381 if (Lo == Memory || Hi == Memory) 1382 break; 1383 } 1384 1385 postMerge(Size, Lo, Hi); 1386 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1387 return; 1388 } 1389 1390 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1391 uint64_t Size = getContext().getTypeSize(Ty); 1392 1393 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1394 // than four eightbytes, ..., it has class MEMORY. 1395 if (Size > 256) 1396 return; 1397 1398 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1399 // copy constructor or a non-trivial destructor, it is passed by invisible 1400 // reference. 1401 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1402 return; 1403 1404 const RecordDecl *RD = RT->getDecl(); 1405 1406 // Assume variable sized types are passed in memory. 1407 if (RD->hasFlexibleArrayMember()) 1408 return; 1409 1410 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1411 1412 // Reset Lo class, this will be recomputed. 1413 Current = NoClass; 1414 1415 // If this is a C++ record, classify the bases first. 1416 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1417 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1418 e = CXXRD->bases_end(); i != e; ++i) { 1419 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1420 "Unexpected base class!"); 1421 const CXXRecordDecl *Base = 1422 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1423 1424 // Classify this field. 1425 // 1426 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1427 // single eightbyte, each is classified separately. Each eightbyte gets 1428 // initialized to class NO_CLASS. 1429 Class FieldLo, FieldHi; 1430 uint64_t Offset = 1431 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1432 classify(i->getType(), Offset, FieldLo, FieldHi); 1433 Lo = merge(Lo, FieldLo); 1434 Hi = merge(Hi, FieldHi); 1435 if (Lo == Memory || Hi == Memory) 1436 break; 1437 } 1438 } 1439 1440 // Classify the fields one at a time, merging the results. 1441 unsigned idx = 0; 1442 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1443 i != e; ++i, ++idx) { 1444 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1445 bool BitField = i->isBitField(); 1446 1447 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1448 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1449 // 1450 // The only case a 256-bit wide vector could be used is when the struct 1451 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1452 // to work for sizes wider than 128, early check and fallback to memory. 1453 // 1454 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1455 Lo = Memory; 1456 return; 1457 } 1458 // Note, skip this test for bit-fields, see below. 1459 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1460 Lo = Memory; 1461 return; 1462 } 1463 1464 // Classify this field. 1465 // 1466 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1467 // exceeds a single eightbyte, each is classified 1468 // separately. Each eightbyte gets initialized to class 1469 // NO_CLASS. 1470 Class FieldLo, FieldHi; 1471 1472 // Bit-fields require special handling, they do not force the 1473 // structure to be passed in memory even if unaligned, and 1474 // therefore they can straddle an eightbyte. 1475 if (BitField) { 1476 // Ignore padding bit-fields. 1477 if (i->isUnnamedBitfield()) 1478 continue; 1479 1480 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1481 uint64_t Size = i->getBitWidthValue(getContext()); 1482 1483 uint64_t EB_Lo = Offset / 64; 1484 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1485 FieldLo = FieldHi = NoClass; 1486 if (EB_Lo) { 1487 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1488 FieldLo = NoClass; 1489 FieldHi = Integer; 1490 } else { 1491 FieldLo = Integer; 1492 FieldHi = EB_Hi ? Integer : NoClass; 1493 } 1494 } else 1495 classify(i->getType(), Offset, FieldLo, FieldHi); 1496 Lo = merge(Lo, FieldLo); 1497 Hi = merge(Hi, FieldHi); 1498 if (Lo == Memory || Hi == Memory) 1499 break; 1500 } 1501 1502 postMerge(Size, Lo, Hi); 1503 } 1504} 1505 1506ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1507 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1508 // place naturally. 1509 if (!isAggregateTypeForABI(Ty)) { 1510 // Treat an enum type as its underlying type. 1511 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1512 Ty = EnumTy->getDecl()->getIntegerType(); 1513 1514 return (Ty->isPromotableIntegerType() ? 1515 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1516 } 1517 1518 return ABIArgInfo::getIndirect(0); 1519} 1520 1521bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1522 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1523 uint64_t Size = getContext().getTypeSize(VecTy); 1524 unsigned LargestVector = HasAVX ? 256 : 128; 1525 if (Size <= 64 || Size > LargestVector) 1526 return true; 1527 } 1528 1529 return false; 1530} 1531 1532ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1533 unsigned freeIntRegs) const { 1534 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1535 // place naturally. 1536 // 1537 // This assumption is optimistic, as there could be free registers available 1538 // when we need to pass this argument in memory, and LLVM could try to pass 1539 // the argument in the free register. This does not seem to happen currently, 1540 // but this code would be much safer if we could mark the argument with 1541 // 'onstack'. See PR12193. 1542 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1543 // Treat an enum type as its underlying type. 1544 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1545 Ty = EnumTy->getDecl()->getIntegerType(); 1546 1547 return (Ty->isPromotableIntegerType() ? 1548 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1549 } 1550 1551 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1552 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1553 1554 // Compute the byval alignment. We specify the alignment of the byval in all 1555 // cases so that the mid-level optimizer knows the alignment of the byval. 1556 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1557 1558 // Attempt to avoid passing indirect results using byval when possible. This 1559 // is important for good codegen. 1560 // 1561 // We do this by coercing the value into a scalar type which the backend can 1562 // handle naturally (i.e., without using byval). 1563 // 1564 // For simplicity, we currently only do this when we have exhausted all of the 1565 // free integer registers. Doing this when there are free integer registers 1566 // would require more care, as we would have to ensure that the coerced value 1567 // did not claim the unused register. That would require either reording the 1568 // arguments to the function (so that any subsequent inreg values came first), 1569 // or only doing this optimization when there were no following arguments that 1570 // might be inreg. 1571 // 1572 // We currently expect it to be rare (particularly in well written code) for 1573 // arguments to be passed on the stack when there are still free integer 1574 // registers available (this would typically imply large structs being passed 1575 // by value), so this seems like a fair tradeoff for now. 1576 // 1577 // We can revisit this if the backend grows support for 'onstack' parameter 1578 // attributes. See PR12193. 1579 if (freeIntRegs == 0) { 1580 uint64_t Size = getContext().getTypeSize(Ty); 1581 1582 // If this type fits in an eightbyte, coerce it into the matching integral 1583 // type, which will end up on the stack (with alignment 8). 1584 if (Align == 8 && Size <= 64) 1585 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1586 Size)); 1587 } 1588 1589 return ABIArgInfo::getIndirect(Align); 1590} 1591 1592/// GetByteVectorType - The ABI specifies that a value should be passed in an 1593/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1594/// vector register. 1595llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1596 llvm::Type *IRType = CGT.ConvertType(Ty); 1597 1598 // Wrapper structs that just contain vectors are passed just like vectors, 1599 // strip them off if present. 1600 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1601 while (STy && STy->getNumElements() == 1) { 1602 IRType = STy->getElementType(0); 1603 STy = dyn_cast<llvm::StructType>(IRType); 1604 } 1605 1606 // If the preferred type is a 16-byte vector, prefer to pass it. 1607 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1608 llvm::Type *EltTy = VT->getElementType(); 1609 unsigned BitWidth = VT->getBitWidth(); 1610 if ((BitWidth >= 128 && BitWidth <= 256) && 1611 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1612 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1613 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1614 EltTy->isIntegerTy(128))) 1615 return VT; 1616 } 1617 1618 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1619} 1620 1621/// BitsContainNoUserData - Return true if the specified [start,end) bit range 1622/// is known to either be off the end of the specified type or being in 1623/// alignment padding. The user type specified is known to be at most 128 bits 1624/// in size, and have passed through X86_64ABIInfo::classify with a successful 1625/// classification that put one of the two halves in the INTEGER class. 1626/// 1627/// It is conservatively correct to return false. 1628static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1629 unsigned EndBit, ASTContext &Context) { 1630 // If the bytes being queried are off the end of the type, there is no user 1631 // data hiding here. This handles analysis of builtins, vectors and other 1632 // types that don't contain interesting padding. 1633 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1634 if (TySize <= StartBit) 1635 return true; 1636 1637 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1638 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1639 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1640 1641 // Check each element to see if the element overlaps with the queried range. 1642 for (unsigned i = 0; i != NumElts; ++i) { 1643 // If the element is after the span we care about, then we're done.. 1644 unsigned EltOffset = i*EltSize; 1645 if (EltOffset >= EndBit) break; 1646 1647 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1648 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1649 EndBit-EltOffset, Context)) 1650 return false; 1651 } 1652 // If it overlaps no elements, then it is safe to process as padding. 1653 return true; 1654 } 1655 1656 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1657 const RecordDecl *RD = RT->getDecl(); 1658 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1659 1660 // If this is a C++ record, check the bases first. 1661 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1662 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1663 e = CXXRD->bases_end(); i != e; ++i) { 1664 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1665 "Unexpected base class!"); 1666 const CXXRecordDecl *Base = 1667 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1668 1669 // If the base is after the span we care about, ignore it. 1670 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1671 if (BaseOffset >= EndBit) continue; 1672 1673 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1674 if (!BitsContainNoUserData(i->getType(), BaseStart, 1675 EndBit-BaseOffset, Context)) 1676 return false; 1677 } 1678 } 1679 1680 // Verify that no field has data that overlaps the region of interest. Yes 1681 // this could be sped up a lot by being smarter about queried fields, 1682 // however we're only looking at structs up to 16 bytes, so we don't care 1683 // much. 1684 unsigned idx = 0; 1685 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1686 i != e; ++i, ++idx) { 1687 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1688 1689 // If we found a field after the region we care about, then we're done. 1690 if (FieldOffset >= EndBit) break; 1691 1692 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1693 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1694 Context)) 1695 return false; 1696 } 1697 1698 // If nothing in this record overlapped the area of interest, then we're 1699 // clean. 1700 return true; 1701 } 1702 1703 return false; 1704} 1705 1706/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1707/// float member at the specified offset. For example, {int,{float}} has a 1708/// float at offset 4. It is conservatively correct for this routine to return 1709/// false. 1710static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1711 const llvm::TargetData &TD) { 1712 // Base case if we find a float. 1713 if (IROffset == 0 && IRType->isFloatTy()) 1714 return true; 1715 1716 // If this is a struct, recurse into the field at the specified offset. 1717 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1718 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1719 unsigned Elt = SL->getElementContainingOffset(IROffset); 1720 IROffset -= SL->getElementOffset(Elt); 1721 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1722 } 1723 1724 // If this is an array, recurse into the field at the specified offset. 1725 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1726 llvm::Type *EltTy = ATy->getElementType(); 1727 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1728 IROffset -= IROffset/EltSize*EltSize; 1729 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1730 } 1731 1732 return false; 1733} 1734 1735 1736/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1737/// low 8 bytes of an XMM register, corresponding to the SSE class. 1738llvm::Type *X86_64ABIInfo:: 1739GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1740 QualType SourceTy, unsigned SourceOffset) const { 1741 // The only three choices we have are either double, <2 x float>, or float. We 1742 // pass as float if the last 4 bytes is just padding. This happens for 1743 // structs that contain 3 floats. 1744 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1745 SourceOffset*8+64, getContext())) 1746 return llvm::Type::getFloatTy(getVMContext()); 1747 1748 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1749 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1750 // case. 1751 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1752 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1753 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1754 1755 return llvm::Type::getDoubleTy(getVMContext()); 1756} 1757 1758 1759/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1760/// an 8-byte GPR. This means that we either have a scalar or we are talking 1761/// about the high or low part of an up-to-16-byte struct. This routine picks 1762/// the best LLVM IR type to represent this, which may be i64 or may be anything 1763/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1764/// etc). 1765/// 1766/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1767/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1768/// the 8-byte value references. PrefType may be null. 1769/// 1770/// SourceTy is the source level type for the entire argument. SourceOffset is 1771/// an offset into this that we're processing (which is always either 0 or 8). 1772/// 1773llvm::Type *X86_64ABIInfo:: 1774GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1775 QualType SourceTy, unsigned SourceOffset) const { 1776 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1777 // returning an 8-byte unit starting with it. See if we can safely use it. 1778 if (IROffset == 0) { 1779 // Pointers and int64's always fill the 8-byte unit. 1780 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1781 return IRType; 1782 1783 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1784 // goodness in the source type is just tail padding. This is allowed to 1785 // kick in for struct {double,int} on the int, but not on 1786 // struct{double,int,int} because we wouldn't return the second int. We 1787 // have to do this analysis on the source type because we can't depend on 1788 // unions being lowered a specific way etc. 1789 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1790 IRType->isIntegerTy(32)) { 1791 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1792 1793 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1794 SourceOffset*8+64, getContext())) 1795 return IRType; 1796 } 1797 } 1798 1799 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1800 // If this is a struct, recurse into the field at the specified offset. 1801 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1802 if (IROffset < SL->getSizeInBytes()) { 1803 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1804 IROffset -= SL->getElementOffset(FieldIdx); 1805 1806 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1807 SourceTy, SourceOffset); 1808 } 1809 } 1810 1811 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1812 llvm::Type *EltTy = ATy->getElementType(); 1813 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1814 unsigned EltOffset = IROffset/EltSize*EltSize; 1815 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1816 SourceOffset); 1817 } 1818 1819 // Okay, we don't have any better idea of what to pass, so we pass this in an 1820 // integer register that isn't too big to fit the rest of the struct. 1821 unsigned TySizeInBytes = 1822 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1823 1824 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1825 1826 // It is always safe to classify this as an integer type up to i64 that 1827 // isn't larger than the structure. 1828 return llvm::IntegerType::get(getVMContext(), 1829 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1830} 1831 1832 1833/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1834/// be used as elements of a two register pair to pass or return, return a 1835/// first class aggregate to represent them. For example, if the low part of 1836/// a by-value argument should be passed as i32* and the high part as float, 1837/// return {i32*, float}. 1838static llvm::Type * 1839GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1840 const llvm::TargetData &TD) { 1841 // In order to correctly satisfy the ABI, we need to the high part to start 1842 // at offset 8. If the high and low parts we inferred are both 4-byte types 1843 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1844 // the second element at offset 8. Check for this: 1845 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1846 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1847 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1848 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1849 1850 // To handle this, we have to increase the size of the low part so that the 1851 // second element will start at an 8 byte offset. We can't increase the size 1852 // of the second element because it might make us access off the end of the 1853 // struct. 1854 if (HiStart != 8) { 1855 // There are only two sorts of types the ABI generation code can produce for 1856 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1857 // Promote these to a larger type. 1858 if (Lo->isFloatTy()) 1859 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1860 else { 1861 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1862 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1863 } 1864 } 1865 1866 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1867 1868 1869 // Verify that the second element is at an 8-byte offset. 1870 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1871 "Invalid x86-64 argument pair!"); 1872 return Result; 1873} 1874 1875ABIArgInfo X86_64ABIInfo:: 1876classifyReturnType(QualType RetTy) const { 1877 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1878 // classification algorithm. 1879 X86_64ABIInfo::Class Lo, Hi; 1880 classify(RetTy, 0, Lo, Hi); 1881 1882 // Check some invariants. 1883 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1884 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1885 1886 llvm::Type *ResType = 0; 1887 switch (Lo) { 1888 case NoClass: 1889 if (Hi == NoClass) 1890 return ABIArgInfo::getIgnore(); 1891 // If the low part is just padding, it takes no register, leave ResType 1892 // null. 1893 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1894 "Unknown missing lo part"); 1895 break; 1896 1897 case SSEUp: 1898 case X87Up: 1899 llvm_unreachable("Invalid classification for lo word."); 1900 1901 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1902 // hidden argument. 1903 case Memory: 1904 return getIndirectReturnResult(RetTy); 1905 1906 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1907 // available register of the sequence %rax, %rdx is used. 1908 case Integer: 1909 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1910 1911 // If we have a sign or zero extended integer, make sure to return Extend 1912 // so that the parameter gets the right LLVM IR attributes. 1913 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1914 // Treat an enum type as its underlying type. 1915 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1916 RetTy = EnumTy->getDecl()->getIntegerType(); 1917 1918 if (RetTy->isIntegralOrEnumerationType() && 1919 RetTy->isPromotableIntegerType()) 1920 return ABIArgInfo::getExtend(); 1921 } 1922 break; 1923 1924 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1925 // available SSE register of the sequence %xmm0, %xmm1 is used. 1926 case SSE: 1927 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1928 break; 1929 1930 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1931 // returned on the X87 stack in %st0 as 80-bit x87 number. 1932 case X87: 1933 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1934 break; 1935 1936 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1937 // part of the value is returned in %st0 and the imaginary part in 1938 // %st1. 1939 case ComplexX87: 1940 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1941 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 1942 llvm::Type::getX86_FP80Ty(getVMContext()), 1943 NULL); 1944 break; 1945 } 1946 1947 llvm::Type *HighPart = 0; 1948 switch (Hi) { 1949 // Memory was handled previously and X87 should 1950 // never occur as a hi class. 1951 case Memory: 1952 case X87: 1953 llvm_unreachable("Invalid classification for hi word."); 1954 1955 case ComplexX87: // Previously handled. 1956 case NoClass: 1957 break; 1958 1959 case Integer: 1960 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1961 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1962 return ABIArgInfo::getDirect(HighPart, 8); 1963 break; 1964 case SSE: 1965 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1966 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1967 return ABIArgInfo::getDirect(HighPart, 8); 1968 break; 1969 1970 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1971 // is passed in the next available eightbyte chunk if the last used 1972 // vector register. 1973 // 1974 // SSEUP should always be preceded by SSE, just widen. 1975 case SSEUp: 1976 assert(Lo == SSE && "Unexpected SSEUp classification."); 1977 ResType = GetByteVectorType(RetTy); 1978 break; 1979 1980 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1981 // returned together with the previous X87 value in %st0. 1982 case X87Up: 1983 // If X87Up is preceded by X87, we don't need to do 1984 // anything. However, in some cases with unions it may not be 1985 // preceded by X87. In such situations we follow gcc and pass the 1986 // extra bits in an SSE reg. 1987 if (Lo != X87) { 1988 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1989 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1990 return ABIArgInfo::getDirect(HighPart, 8); 1991 } 1992 break; 1993 } 1994 1995 // If a high part was specified, merge it together with the low part. It is 1996 // known to pass in the high eightbyte of the result. We do this by forming a 1997 // first class struct aggregate with the high and low part: {low, high} 1998 if (HighPart) 1999 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 2000 2001 return ABIArgInfo::getDirect(ResType); 2002} 2003 2004ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2005 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2006 const 2007{ 2008 X86_64ABIInfo::Class Lo, Hi; 2009 classify(Ty, 0, Lo, Hi); 2010 2011 // Check some invariants. 2012 // FIXME: Enforce these by construction. 2013 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2014 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2015 2016 neededInt = 0; 2017 neededSSE = 0; 2018 llvm::Type *ResType = 0; 2019 switch (Lo) { 2020 case NoClass: 2021 if (Hi == NoClass) 2022 return ABIArgInfo::getIgnore(); 2023 // If the low part is just padding, it takes no register, leave ResType 2024 // null. 2025 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2026 "Unknown missing lo part"); 2027 break; 2028 2029 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2030 // on the stack. 2031 case Memory: 2032 2033 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2034 // COMPLEX_X87, it is passed in memory. 2035 case X87: 2036 case ComplexX87: 2037 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2038 ++neededInt; 2039 return getIndirectResult(Ty, freeIntRegs); 2040 2041 case SSEUp: 2042 case X87Up: 2043 llvm_unreachable("Invalid classification for lo word."); 2044 2045 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2046 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2047 // and %r9 is used. 2048 case Integer: 2049 ++neededInt; 2050 2051 // Pick an 8-byte type based on the preferred type. 2052 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2053 2054 // If we have a sign or zero extended integer, make sure to return Extend 2055 // so that the parameter gets the right LLVM IR attributes. 2056 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2057 // Treat an enum type as its underlying type. 2058 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2059 Ty = EnumTy->getDecl()->getIntegerType(); 2060 2061 if (Ty->isIntegralOrEnumerationType() && 2062 Ty->isPromotableIntegerType()) 2063 return ABIArgInfo::getExtend(); 2064 } 2065 2066 break; 2067 2068 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2069 // available SSE register is used, the registers are taken in the 2070 // order from %xmm0 to %xmm7. 2071 case SSE: { 2072 llvm::Type *IRType = CGT.ConvertType(Ty); 2073 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2074 ++neededSSE; 2075 break; 2076 } 2077 } 2078 2079 llvm::Type *HighPart = 0; 2080 switch (Hi) { 2081 // Memory was handled previously, ComplexX87 and X87 should 2082 // never occur as hi classes, and X87Up must be preceded by X87, 2083 // which is passed in memory. 2084 case Memory: 2085 case X87: 2086 case ComplexX87: 2087 llvm_unreachable("Invalid classification for hi word."); 2088 2089 case NoClass: break; 2090 2091 case Integer: 2092 ++neededInt; 2093 // Pick an 8-byte type based on the preferred type. 2094 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2095 2096 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2097 return ABIArgInfo::getDirect(HighPart, 8); 2098 break; 2099 2100 // X87Up generally doesn't occur here (long double is passed in 2101 // memory), except in situations involving unions. 2102 case X87Up: 2103 case SSE: 2104 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2105 2106 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2107 return ABIArgInfo::getDirect(HighPart, 8); 2108 2109 ++neededSSE; 2110 break; 2111 2112 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2113 // eightbyte is passed in the upper half of the last used SSE 2114 // register. This only happens when 128-bit vectors are passed. 2115 case SSEUp: 2116 assert(Lo == SSE && "Unexpected SSEUp classification"); 2117 ResType = GetByteVectorType(Ty); 2118 break; 2119 } 2120 2121 // If a high part was specified, merge it together with the low part. It is 2122 // known to pass in the high eightbyte of the result. We do this by forming a 2123 // first class struct aggregate with the high and low part: {low, high} 2124 if (HighPart) 2125 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 2126 2127 return ABIArgInfo::getDirect(ResType); 2128} 2129 2130void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2131 2132 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2133 2134 // Keep track of the number of assigned registers. 2135 unsigned freeIntRegs = 6, freeSSERegs = 8; 2136 2137 // If the return value is indirect, then the hidden argument is consuming one 2138 // integer register. 2139 if (FI.getReturnInfo().isIndirect()) 2140 --freeIntRegs; 2141 2142 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2143 // get assigned (in left-to-right order) for passing as follows... 2144 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2145 it != ie; ++it) { 2146 unsigned neededInt, neededSSE; 2147 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2148 neededSSE); 2149 2150 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2151 // eightbyte of an argument, the whole argument is passed on the 2152 // stack. If registers have already been assigned for some 2153 // eightbytes of such an argument, the assignments get reverted. 2154 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2155 freeIntRegs -= neededInt; 2156 freeSSERegs -= neededSSE; 2157 } else { 2158 it->info = getIndirectResult(it->type, freeIntRegs); 2159 } 2160 } 2161} 2162 2163static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2164 QualType Ty, 2165 CodeGenFunction &CGF) { 2166 llvm::Value *overflow_arg_area_p = 2167 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2168 llvm::Value *overflow_arg_area = 2169 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2170 2171 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2172 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2173 // It isn't stated explicitly in the standard, but in practice we use 2174 // alignment greater than 16 where necessary. 2175 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2176 if (Align > 8) { 2177 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2178 llvm::Value *Offset = 2179 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2180 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2181 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2182 CGF.Int64Ty); 2183 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2184 overflow_arg_area = 2185 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2186 overflow_arg_area->getType(), 2187 "overflow_arg_area.align"); 2188 } 2189 2190 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2191 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2192 llvm::Value *Res = 2193 CGF.Builder.CreateBitCast(overflow_arg_area, 2194 llvm::PointerType::getUnqual(LTy)); 2195 2196 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2197 // l->overflow_arg_area + sizeof(type). 2198 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2199 // an 8 byte boundary. 2200 2201 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2202 llvm::Value *Offset = 2203 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2204 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2205 "overflow_arg_area.next"); 2206 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2207 2208 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2209 return Res; 2210} 2211 2212llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2213 CodeGenFunction &CGF) const { 2214 // Assume that va_list type is correct; should be pointer to LLVM type: 2215 // struct { 2216 // i32 gp_offset; 2217 // i32 fp_offset; 2218 // i8* overflow_arg_area; 2219 // i8* reg_save_area; 2220 // }; 2221 unsigned neededInt, neededSSE; 2222 2223 Ty = CGF.getContext().getCanonicalType(Ty); 2224 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2225 2226 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2227 // in the registers. If not go to step 7. 2228 if (!neededInt && !neededSSE) 2229 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2230 2231 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2232 // general purpose registers needed to pass type and num_fp to hold 2233 // the number of floating point registers needed. 2234 2235 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2236 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2237 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2238 // 2239 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2240 // register save space). 2241 2242 llvm::Value *InRegs = 0; 2243 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2244 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2245 if (neededInt) { 2246 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2247 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2248 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2249 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2250 } 2251 2252 if (neededSSE) { 2253 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2254 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2255 llvm::Value *FitsInFP = 2256 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2257 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2258 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2259 } 2260 2261 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2262 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2263 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2264 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2265 2266 // Emit code to load the value if it was passed in registers. 2267 2268 CGF.EmitBlock(InRegBlock); 2269 2270 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2271 // an offset of l->gp_offset and/or l->fp_offset. This may require 2272 // copying to a temporary location in case the parameter is passed 2273 // in different register classes or requires an alignment greater 2274 // than 8 for general purpose registers and 16 for XMM registers. 2275 // 2276 // FIXME: This really results in shameful code when we end up needing to 2277 // collect arguments from different places; often what should result in a 2278 // simple assembling of a structure from scattered addresses has many more 2279 // loads than necessary. Can we clean this up? 2280 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2281 llvm::Value *RegAddr = 2282 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2283 "reg_save_area"); 2284 if (neededInt && neededSSE) { 2285 // FIXME: Cleanup. 2286 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2287 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2288 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2289 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2290 llvm::Type *TyLo = ST->getElementType(0); 2291 llvm::Type *TyHi = ST->getElementType(1); 2292 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2293 "Unexpected ABI info for mixed regs"); 2294 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2295 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2296 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2297 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2298 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2299 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2300 llvm::Value *V = 2301 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2302 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2303 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2304 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2305 2306 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2307 llvm::PointerType::getUnqual(LTy)); 2308 } else if (neededInt) { 2309 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2310 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2311 llvm::PointerType::getUnqual(LTy)); 2312 } else if (neededSSE == 1) { 2313 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2314 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2315 llvm::PointerType::getUnqual(LTy)); 2316 } else { 2317 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2318 // SSE registers are spaced 16 bytes apart in the register save 2319 // area, we need to collect the two eightbytes together. 2320 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2321 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2322 llvm::Type *DoubleTy = CGF.DoubleTy; 2323 llvm::Type *DblPtrTy = 2324 llvm::PointerType::getUnqual(DoubleTy); 2325 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2326 DoubleTy, NULL); 2327 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2328 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2329 DblPtrTy)); 2330 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2331 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2332 DblPtrTy)); 2333 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2334 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2335 llvm::PointerType::getUnqual(LTy)); 2336 } 2337 2338 // AMD64-ABI 3.5.7p5: Step 5. Set: 2339 // l->gp_offset = l->gp_offset + num_gp * 8 2340 // l->fp_offset = l->fp_offset + num_fp * 16. 2341 if (neededInt) { 2342 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2343 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2344 gp_offset_p); 2345 } 2346 if (neededSSE) { 2347 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2348 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2349 fp_offset_p); 2350 } 2351 CGF.EmitBranch(ContBlock); 2352 2353 // Emit code to load the value if it was passed in memory. 2354 2355 CGF.EmitBlock(InMemBlock); 2356 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2357 2358 // Return the appropriate result. 2359 2360 CGF.EmitBlock(ContBlock); 2361 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2362 "vaarg.addr"); 2363 ResAddr->addIncoming(RegAddr, InRegBlock); 2364 ResAddr->addIncoming(MemAddr, InMemBlock); 2365 return ResAddr; 2366} 2367 2368ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2369 2370 if (Ty->isVoidType()) 2371 return ABIArgInfo::getIgnore(); 2372 2373 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2374 Ty = EnumTy->getDecl()->getIntegerType(); 2375 2376 uint64_t Size = getContext().getTypeSize(Ty); 2377 2378 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2379 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2380 RT->getDecl()->hasFlexibleArrayMember()) 2381 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2382 2383 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2384 if (Size == 128 && 2385 getContext().getTargetInfo().getTriple().getOS() 2386 == llvm::Triple::MinGW32) 2387 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2388 Size)); 2389 2390 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2391 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2392 if (Size <= 64 && 2393 (Size & (Size - 1)) == 0) 2394 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2395 Size)); 2396 2397 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2398 } 2399 2400 if (Ty->isPromotableIntegerType()) 2401 return ABIArgInfo::getExtend(); 2402 2403 return ABIArgInfo::getDirect(); 2404} 2405 2406void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2407 2408 QualType RetTy = FI.getReturnType(); 2409 FI.getReturnInfo() = classify(RetTy); 2410 2411 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2412 it != ie; ++it) 2413 it->info = classify(it->type); 2414} 2415 2416llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2417 CodeGenFunction &CGF) const { 2418 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2419 2420 CGBuilderTy &Builder = CGF.Builder; 2421 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2422 "ap"); 2423 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2424 llvm::Type *PTy = 2425 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2426 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2427 2428 uint64_t Offset = 2429 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2430 llvm::Value *NextAddr = 2431 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2432 "ap.next"); 2433 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2434 2435 return AddrTyped; 2436} 2437 2438// PowerPC-32 2439 2440namespace { 2441class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2442public: 2443 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2444 2445 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2446 // This is recovered from gcc output. 2447 return 1; // r1 is the dedicated stack pointer 2448 } 2449 2450 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2451 llvm::Value *Address) const; 2452}; 2453 2454} 2455 2456bool 2457PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2458 llvm::Value *Address) const { 2459 // This is calculated from the LLVM and GCC tables and verified 2460 // against gcc output. AFAIK all ABIs use the same encoding. 2461 2462 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2463 2464 llvm::IntegerType *i8 = CGF.Int8Ty; 2465 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2466 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2467 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2468 2469 // 0-31: r0-31, the 4-byte general-purpose registers 2470 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2471 2472 // 32-63: fp0-31, the 8-byte floating-point registers 2473 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2474 2475 // 64-76 are various 4-byte special-purpose registers: 2476 // 64: mq 2477 // 65: lr 2478 // 66: ctr 2479 // 67: ap 2480 // 68-75 cr0-7 2481 // 76: xer 2482 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2483 2484 // 77-108: v0-31, the 16-byte vector registers 2485 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2486 2487 // 109: vrsave 2488 // 110: vscr 2489 // 111: spe_acc 2490 // 112: spefscr 2491 // 113: sfp 2492 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2493 2494 return false; 2495} 2496 2497// PowerPC-64 2498 2499namespace { 2500class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2501public: 2502 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2503 2504 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2505 // This is recovered from gcc output. 2506 return 1; // r1 is the dedicated stack pointer 2507 } 2508 2509 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2510 llvm::Value *Address) const; 2511}; 2512 2513} 2514 2515bool 2516PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2517 llvm::Value *Address) const { 2518 // This is calculated from the LLVM and GCC tables and verified 2519 // against gcc output. AFAIK all ABIs use the same encoding. 2520 2521 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2522 2523 llvm::IntegerType *i8 = CGF.Int8Ty; 2524 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2525 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2526 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2527 2528 // 0-31: r0-31, the 8-byte general-purpose registers 2529 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2530 2531 // 32-63: fp0-31, the 8-byte floating-point registers 2532 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2533 2534 // 64-76 are various 4-byte special-purpose registers: 2535 // 64: mq 2536 // 65: lr 2537 // 66: ctr 2538 // 67: ap 2539 // 68-75 cr0-7 2540 // 76: xer 2541 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2542 2543 // 77-108: v0-31, the 16-byte vector registers 2544 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2545 2546 // 109: vrsave 2547 // 110: vscr 2548 // 111: spe_acc 2549 // 112: spefscr 2550 // 113: sfp 2551 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2552 2553 return false; 2554} 2555 2556//===----------------------------------------------------------------------===// 2557// ARM ABI Implementation 2558//===----------------------------------------------------------------------===// 2559 2560namespace { 2561 2562class ARMABIInfo : public ABIInfo { 2563public: 2564 enum ABIKind { 2565 APCS = 0, 2566 AAPCS = 1, 2567 AAPCS_VFP 2568 }; 2569 2570private: 2571 ABIKind Kind; 2572 2573public: 2574 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2575 2576 bool isEABI() const { 2577 StringRef Env = 2578 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2579 return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi"); 2580 } 2581 2582private: 2583 ABIKind getABIKind() const { return Kind; } 2584 2585 ABIArgInfo classifyReturnType(QualType RetTy) const; 2586 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2587 2588 virtual void computeInfo(CGFunctionInfo &FI) const; 2589 2590 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2591 CodeGenFunction &CGF) const; 2592}; 2593 2594class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2595public: 2596 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2597 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2598 2599 const ARMABIInfo &getABIInfo() const { 2600 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2601 } 2602 2603 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2604 return 13; 2605 } 2606 2607 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2608 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2609 } 2610 2611 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2612 llvm::Value *Address) const { 2613 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2614 2615 // 0-15 are the 16 integer registers. 2616 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2617 return false; 2618 } 2619 2620 unsigned getSizeOfUnwindException() const { 2621 if (getABIInfo().isEABI()) return 88; 2622 return TargetCodeGenInfo::getSizeOfUnwindException(); 2623 } 2624}; 2625 2626} 2627 2628void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2629 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2630 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2631 it != ie; ++it) 2632 it->info = classifyArgumentType(it->type); 2633 2634 // Always honor user-specified calling convention. 2635 if (FI.getCallingConvention() != llvm::CallingConv::C) 2636 return; 2637 2638 // Calling convention as default by an ABI. 2639 llvm::CallingConv::ID DefaultCC; 2640 if (isEABI()) 2641 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2642 else 2643 DefaultCC = llvm::CallingConv::ARM_APCS; 2644 2645 // If user did not ask for specific calling convention explicitly (e.g. via 2646 // pcs attribute), set effective calling convention if it's different than ABI 2647 // default. 2648 switch (getABIKind()) { 2649 case APCS: 2650 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2651 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2652 break; 2653 case AAPCS: 2654 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2655 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2656 break; 2657 case AAPCS_VFP: 2658 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2659 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2660 break; 2661 } 2662} 2663 2664/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2665/// aggregate. If HAMembers is non-null, the number of base elements 2666/// contained in the type is returned through it; this is used for the 2667/// recursive calls that check aggregate component types. 2668static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2669 ASTContext &Context, 2670 uint64_t *HAMembers = 0) { 2671 uint64_t Members = 0; 2672 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2673 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2674 return false; 2675 Members *= AT->getSize().getZExtValue(); 2676 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2677 const RecordDecl *RD = RT->getDecl(); 2678 if (RD->hasFlexibleArrayMember()) 2679 return false; 2680 2681 Members = 0; 2682 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2683 i != e; ++i) { 2684 const FieldDecl *FD = *i; 2685 uint64_t FldMembers; 2686 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2687 return false; 2688 2689 Members = (RD->isUnion() ? 2690 std::max(Members, FldMembers) : Members + FldMembers); 2691 } 2692 } else { 2693 Members = 1; 2694 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2695 Members = 2; 2696 Ty = CT->getElementType(); 2697 } 2698 2699 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2700 // double, or 64-bit or 128-bit vectors. 2701 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2702 if (BT->getKind() != BuiltinType::Float && 2703 BT->getKind() != BuiltinType::Double && 2704 BT->getKind() != BuiltinType::LongDouble) 2705 return false; 2706 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2707 unsigned VecSize = Context.getTypeSize(VT); 2708 if (VecSize != 64 && VecSize != 128) 2709 return false; 2710 } else { 2711 return false; 2712 } 2713 2714 // The base type must be the same for all members. Vector types of the 2715 // same total size are treated as being equivalent here. 2716 const Type *TyPtr = Ty.getTypePtr(); 2717 if (!Base) 2718 Base = TyPtr; 2719 if (Base != TyPtr && 2720 (!Base->isVectorType() || !TyPtr->isVectorType() || 2721 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2722 return false; 2723 } 2724 2725 // Homogeneous Aggregates can have at most 4 members of the base type. 2726 if (HAMembers) 2727 *HAMembers = Members; 2728 2729 return (Members > 0 && Members <= 4); 2730} 2731 2732ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2733 if (!isAggregateTypeForABI(Ty)) { 2734 // Treat an enum type as its underlying type. 2735 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2736 Ty = EnumTy->getDecl()->getIntegerType(); 2737 2738 return (Ty->isPromotableIntegerType() ? 2739 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2740 } 2741 2742 // Ignore empty records. 2743 if (isEmptyRecord(getContext(), Ty, true)) 2744 return ABIArgInfo::getIgnore(); 2745 2746 // Structures with either a non-trivial destructor or a non-trivial 2747 // copy constructor are always indirect. 2748 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2749 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2750 2751 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 2752 // Homogeneous Aggregates need to be expanded. 2753 const Type *Base = 0; 2754 if (isHomogeneousAggregate(Ty, Base, getContext())) { 2755 assert(Base && "Base class should be set for homogeneous aggregate"); 2756 return ABIArgInfo::getExpand(); 2757 } 2758 } 2759 2760 // Turn on byval for APCS and AAPCS. 2761 // FIXME: turn on byval for AAPCS_VFP for performance. 2762 if (getABIKind() == ARMABIInfo::APCS || getABIKind() == ARMABIInfo::AAPCS) { 2763 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 2764 getContext().getTypeAlign(Ty) > 64) { 2765 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 2766 } 2767 } 2768 2769 // Otherwise, pass by coercing to a structure of the appropriate size. 2770 llvm::Type* ElemTy; 2771 unsigned SizeRegs; 2772 // FIXME: Try to match the types of the arguments more accurately where 2773 // we can. 2774 if (getContext().getTypeAlign(Ty) <= 32) { 2775 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2776 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2777 } else { 2778 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2779 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2780 } 2781 2782 llvm::Type *STy = 2783 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2784 return ABIArgInfo::getDirect(STy); 2785} 2786 2787static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2788 llvm::LLVMContext &VMContext) { 2789 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2790 // is called integer-like if its size is less than or equal to one word, and 2791 // the offset of each of its addressable sub-fields is zero. 2792 2793 uint64_t Size = Context.getTypeSize(Ty); 2794 2795 // Check that the type fits in a word. 2796 if (Size > 32) 2797 return false; 2798 2799 // FIXME: Handle vector types! 2800 if (Ty->isVectorType()) 2801 return false; 2802 2803 // Float types are never treated as "integer like". 2804 if (Ty->isRealFloatingType()) 2805 return false; 2806 2807 // If this is a builtin or pointer type then it is ok. 2808 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2809 return true; 2810 2811 // Small complex integer types are "integer like". 2812 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2813 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2814 2815 // Single element and zero sized arrays should be allowed, by the definition 2816 // above, but they are not. 2817 2818 // Otherwise, it must be a record type. 2819 const RecordType *RT = Ty->getAs<RecordType>(); 2820 if (!RT) return false; 2821 2822 // Ignore records with flexible arrays. 2823 const RecordDecl *RD = RT->getDecl(); 2824 if (RD->hasFlexibleArrayMember()) 2825 return false; 2826 2827 // Check that all sub-fields are at offset 0, and are themselves "integer 2828 // like". 2829 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2830 2831 bool HadField = false; 2832 unsigned idx = 0; 2833 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2834 i != e; ++i, ++idx) { 2835 const FieldDecl *FD = *i; 2836 2837 // Bit-fields are not addressable, we only need to verify they are "integer 2838 // like". We still have to disallow a subsequent non-bitfield, for example: 2839 // struct { int : 0; int x } 2840 // is non-integer like according to gcc. 2841 if (FD->isBitField()) { 2842 if (!RD->isUnion()) 2843 HadField = true; 2844 2845 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2846 return false; 2847 2848 continue; 2849 } 2850 2851 // Check if this field is at offset 0. 2852 if (Layout.getFieldOffset(idx) != 0) 2853 return false; 2854 2855 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2856 return false; 2857 2858 // Only allow at most one field in a structure. This doesn't match the 2859 // wording above, but follows gcc in situations with a field following an 2860 // empty structure. 2861 if (!RD->isUnion()) { 2862 if (HadField) 2863 return false; 2864 2865 HadField = true; 2866 } 2867 } 2868 2869 return true; 2870} 2871 2872ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2873 if (RetTy->isVoidType()) 2874 return ABIArgInfo::getIgnore(); 2875 2876 // Large vector types should be returned via memory. 2877 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2878 return ABIArgInfo::getIndirect(0); 2879 2880 if (!isAggregateTypeForABI(RetTy)) { 2881 // Treat an enum type as its underlying type. 2882 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2883 RetTy = EnumTy->getDecl()->getIntegerType(); 2884 2885 return (RetTy->isPromotableIntegerType() ? 2886 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2887 } 2888 2889 // Structures with either a non-trivial destructor or a non-trivial 2890 // copy constructor are always indirect. 2891 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2892 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2893 2894 // Are we following APCS? 2895 if (getABIKind() == APCS) { 2896 if (isEmptyRecord(getContext(), RetTy, false)) 2897 return ABIArgInfo::getIgnore(); 2898 2899 // Complex types are all returned as packed integers. 2900 // 2901 // FIXME: Consider using 2 x vector types if the back end handles them 2902 // correctly. 2903 if (RetTy->isAnyComplexType()) 2904 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2905 getContext().getTypeSize(RetTy))); 2906 2907 // Integer like structures are returned in r0. 2908 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2909 // Return in the smallest viable integer type. 2910 uint64_t Size = getContext().getTypeSize(RetTy); 2911 if (Size <= 8) 2912 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2913 if (Size <= 16) 2914 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2915 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2916 } 2917 2918 // Otherwise return in memory. 2919 return ABIArgInfo::getIndirect(0); 2920 } 2921 2922 // Otherwise this is an AAPCS variant. 2923 2924 if (isEmptyRecord(getContext(), RetTy, true)) 2925 return ABIArgInfo::getIgnore(); 2926 2927 // Check for homogeneous aggregates with AAPCS-VFP. 2928 if (getABIKind() == AAPCS_VFP) { 2929 const Type *Base = 0; 2930 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 2931 assert(Base && "Base class should be set for homogeneous aggregate"); 2932 // Homogeneous Aggregates are returned directly. 2933 return ABIArgInfo::getDirect(); 2934 } 2935 } 2936 2937 // Aggregates <= 4 bytes are returned in r0; other aggregates 2938 // are returned indirectly. 2939 uint64_t Size = getContext().getTypeSize(RetTy); 2940 if (Size <= 32) { 2941 // Return in the smallest viable integer type. 2942 if (Size <= 8) 2943 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2944 if (Size <= 16) 2945 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2946 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2947 } 2948 2949 return ABIArgInfo::getIndirect(0); 2950} 2951 2952llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2953 CodeGenFunction &CGF) const { 2954 llvm::Type *BP = CGF.Int8PtrTy; 2955 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2956 2957 CGBuilderTy &Builder = CGF.Builder; 2958 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2959 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2960 // Handle address alignment for type alignment > 32 bits 2961 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2962 if (TyAlign > 4) { 2963 assert((TyAlign & (TyAlign - 1)) == 0 && 2964 "Alignment is not power of 2!"); 2965 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 2966 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 2967 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 2968 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2969 } 2970 llvm::Type *PTy = 2971 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2972 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2973 2974 uint64_t Offset = 2975 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2976 llvm::Value *NextAddr = 2977 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2978 "ap.next"); 2979 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2980 2981 return AddrTyped; 2982} 2983 2984//===----------------------------------------------------------------------===// 2985// NVPTX ABI Implementation 2986//===----------------------------------------------------------------------===// 2987 2988namespace { 2989 2990class NVPTXABIInfo : public ABIInfo { 2991public: 2992 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2993 2994 ABIArgInfo classifyReturnType(QualType RetTy) const; 2995 ABIArgInfo classifyArgumentType(QualType Ty) const; 2996 2997 virtual void computeInfo(CGFunctionInfo &FI) const; 2998 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2999 CodeGenFunction &CFG) const; 3000}; 3001 3002class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3003public: 3004 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3005 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3006 3007 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3008 CodeGen::CodeGenModule &M) const; 3009}; 3010 3011ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3012 if (RetTy->isVoidType()) 3013 return ABIArgInfo::getIgnore(); 3014 if (isAggregateTypeForABI(RetTy)) 3015 return ABIArgInfo::getIndirect(0); 3016 return ABIArgInfo::getDirect(); 3017} 3018 3019ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3020 if (isAggregateTypeForABI(Ty)) 3021 return ABIArgInfo::getIndirect(0); 3022 3023 return ABIArgInfo::getDirect(); 3024} 3025 3026void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3027 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3028 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3029 it != ie; ++it) 3030 it->info = classifyArgumentType(it->type); 3031 3032 // Always honor user-specified calling convention. 3033 if (FI.getCallingConvention() != llvm::CallingConv::C) 3034 return; 3035 3036 // Calling convention as default by an ABI. 3037 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3038 // but we should switch to NVVM metadata later on. 3039 llvm::CallingConv::ID DefaultCC; 3040 const LangOptions &LangOpts = getContext().getLangOpts(); 3041 if (LangOpts.OpenCL || LangOpts.CUDA) { 3042 // If we are in OpenCL or CUDA mode, then default to device functions 3043 DefaultCC = llvm::CallingConv::PTX_Device; 3044 } else { 3045 // If we are in standard C/C++ mode, use the triple to decide on the default 3046 StringRef Env = 3047 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3048 if (Env == "device") 3049 DefaultCC = llvm::CallingConv::PTX_Device; 3050 else 3051 DefaultCC = llvm::CallingConv::PTX_Kernel; 3052 } 3053 FI.setEffectiveCallingConvention(DefaultCC); 3054 3055} 3056 3057llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3058 CodeGenFunction &CFG) const { 3059 llvm_unreachable("NVPTX does not support varargs"); 3060} 3061 3062void NVPTXTargetCodeGenInfo:: 3063SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3064 CodeGen::CodeGenModule &M) const{ 3065 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3066 if (!FD) return; 3067 3068 llvm::Function *F = cast<llvm::Function>(GV); 3069 3070 // Perform special handling in OpenCL mode 3071 if (M.getLangOpts().OpenCL) { 3072 // Use OpenCL function attributes to set proper calling conventions 3073 // By default, all functions are device functions 3074 if (FD->hasAttr<OpenCLKernelAttr>()) { 3075 // OpenCL __kernel functions get a kernel calling convention 3076 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3077 // And kernel functions are not subject to inlining 3078 F->addFnAttr(llvm::Attribute::NoInline); 3079 } 3080 } 3081 3082 // Perform special handling in CUDA mode. 3083 if (M.getLangOpts().CUDA) { 3084 // CUDA __global__ functions get a kernel calling convention. Since 3085 // __global__ functions cannot be called from the device, we do not 3086 // need to set the noinline attribute. 3087 if (FD->getAttr<CUDAGlobalAttr>()) 3088 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3089 } 3090} 3091 3092} 3093 3094//===----------------------------------------------------------------------===// 3095// MBlaze ABI Implementation 3096//===----------------------------------------------------------------------===// 3097 3098namespace { 3099 3100class MBlazeABIInfo : public ABIInfo { 3101public: 3102 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3103 3104 bool isPromotableIntegerType(QualType Ty) const; 3105 3106 ABIArgInfo classifyReturnType(QualType RetTy) const; 3107 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3108 3109 virtual void computeInfo(CGFunctionInfo &FI) const { 3110 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3111 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3112 it != ie; ++it) 3113 it->info = classifyArgumentType(it->type); 3114 } 3115 3116 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3117 CodeGenFunction &CGF) const; 3118}; 3119 3120class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3121public: 3122 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3123 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3124 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3125 CodeGen::CodeGenModule &M) const; 3126}; 3127 3128} 3129 3130bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3131 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3132 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3133 switch (BT->getKind()) { 3134 case BuiltinType::Bool: 3135 case BuiltinType::Char_S: 3136 case BuiltinType::Char_U: 3137 case BuiltinType::SChar: 3138 case BuiltinType::UChar: 3139 case BuiltinType::Short: 3140 case BuiltinType::UShort: 3141 return true; 3142 default: 3143 return false; 3144 } 3145 return false; 3146} 3147 3148llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3149 CodeGenFunction &CGF) const { 3150 // FIXME: Implement 3151 return 0; 3152} 3153 3154 3155ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3156 if (RetTy->isVoidType()) 3157 return ABIArgInfo::getIgnore(); 3158 if (isAggregateTypeForABI(RetTy)) 3159 return ABIArgInfo::getIndirect(0); 3160 3161 return (isPromotableIntegerType(RetTy) ? 3162 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3163} 3164 3165ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3166 if (isAggregateTypeForABI(Ty)) 3167 return ABIArgInfo::getIndirect(0); 3168 3169 return (isPromotableIntegerType(Ty) ? 3170 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3171} 3172 3173void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3174 llvm::GlobalValue *GV, 3175 CodeGen::CodeGenModule &M) 3176 const { 3177 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3178 if (!FD) return; 3179 3180 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3181 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3182 CC = llvm::CallingConv::MBLAZE_INTR; 3183 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3184 CC = llvm::CallingConv::MBLAZE_SVOL; 3185 3186 if (CC != llvm::CallingConv::C) { 3187 // Handle 'interrupt_handler' attribute: 3188 llvm::Function *F = cast<llvm::Function>(GV); 3189 3190 // Step 1: Set ISR calling convention. 3191 F->setCallingConv(CC); 3192 3193 // Step 2: Add attributes goodness. 3194 F->addFnAttr(llvm::Attribute::NoInline); 3195 } 3196 3197 // Step 3: Emit _interrupt_handler alias. 3198 if (CC == llvm::CallingConv::MBLAZE_INTR) 3199 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3200 "_interrupt_handler", GV, &M.getModule()); 3201} 3202 3203 3204//===----------------------------------------------------------------------===// 3205// MSP430 ABI Implementation 3206//===----------------------------------------------------------------------===// 3207 3208namespace { 3209 3210class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3211public: 3212 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3213 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3214 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3215 CodeGen::CodeGenModule &M) const; 3216}; 3217 3218} 3219 3220void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3221 llvm::GlobalValue *GV, 3222 CodeGen::CodeGenModule &M) const { 3223 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3224 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3225 // Handle 'interrupt' attribute: 3226 llvm::Function *F = cast<llvm::Function>(GV); 3227 3228 // Step 1: Set ISR calling convention. 3229 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3230 3231 // Step 2: Add attributes goodness. 3232 F->addFnAttr(llvm::Attribute::NoInline); 3233 3234 // Step 3: Emit ISR vector alias. 3235 unsigned Num = attr->getNumber() + 0xffe0; 3236 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3237 "vector_" + Twine::utohexstr(Num), 3238 GV, &M.getModule()); 3239 } 3240 } 3241} 3242 3243//===----------------------------------------------------------------------===// 3244// MIPS ABI Implementation. This works for both little-endian and 3245// big-endian variants. 3246//===----------------------------------------------------------------------===// 3247 3248namespace { 3249class MipsABIInfo : public ABIInfo { 3250 bool IsO32; 3251 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3252 void CoerceToIntArgs(uint64_t TySize, 3253 SmallVector<llvm::Type*, 8> &ArgList) const; 3254 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3255 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3256 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3257public: 3258 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3259 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3260 StackAlignInBytes(IsO32 ? 8 : 16) {} 3261 3262 ABIArgInfo classifyReturnType(QualType RetTy) const; 3263 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3264 virtual void computeInfo(CGFunctionInfo &FI) const; 3265 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3266 CodeGenFunction &CGF) const; 3267}; 3268 3269class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3270 unsigned SizeOfUnwindException; 3271public: 3272 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3273 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3274 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3275 3276 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3277 return 29; 3278 } 3279 3280 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3281 llvm::Value *Address) const; 3282 3283 unsigned getSizeOfUnwindException() const { 3284 return SizeOfUnwindException; 3285 } 3286}; 3287} 3288 3289void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3290 SmallVector<llvm::Type*, 8> &ArgList) const { 3291 llvm::IntegerType *IntTy = 3292 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3293 3294 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3295 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3296 ArgList.push_back(IntTy); 3297 3298 // If necessary, add one more integer type to ArgList. 3299 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3300 3301 if (R) 3302 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3303} 3304 3305// In N32/64, an aligned double precision floating point field is passed in 3306// a register. 3307llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3308 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3309 3310 if (IsO32) { 3311 CoerceToIntArgs(TySize, ArgList); 3312 return llvm::StructType::get(getVMContext(), ArgList); 3313 } 3314 3315 if (Ty->isComplexType()) 3316 return CGT.ConvertType(Ty); 3317 3318 const RecordType *RT = Ty->getAs<RecordType>(); 3319 3320 // Unions/vectors are passed in integer registers. 3321 if (!RT || !RT->isStructureOrClassType()) { 3322 CoerceToIntArgs(TySize, ArgList); 3323 return llvm::StructType::get(getVMContext(), ArgList); 3324 } 3325 3326 const RecordDecl *RD = RT->getDecl(); 3327 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3328 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3329 3330 uint64_t LastOffset = 0; 3331 unsigned idx = 0; 3332 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3333 3334 // Iterate over fields in the struct/class and check if there are any aligned 3335 // double fields. 3336 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3337 i != e; ++i, ++idx) { 3338 const QualType Ty = i->getType(); 3339 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3340 3341 if (!BT || BT->getKind() != BuiltinType::Double) 3342 continue; 3343 3344 uint64_t Offset = Layout.getFieldOffset(idx); 3345 if (Offset % 64) // Ignore doubles that are not aligned. 3346 continue; 3347 3348 // Add ((Offset - LastOffset) / 64) args of type i64. 3349 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3350 ArgList.push_back(I64); 3351 3352 // Add double type. 3353 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3354 LastOffset = Offset + 64; 3355 } 3356 3357 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3358 ArgList.append(IntArgList.begin(), IntArgList.end()); 3359 3360 return llvm::StructType::get(getVMContext(), ArgList); 3361} 3362 3363llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3364 assert((Offset % MinABIStackAlignInBytes) == 0); 3365 3366 if ((Align - 1) & Offset) 3367 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3368 3369 return 0; 3370} 3371 3372ABIArgInfo 3373MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3374 uint64_t OrigOffset = Offset; 3375 uint64_t TySize = getContext().getTypeSize(Ty); 3376 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3377 3378 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3379 (uint64_t)StackAlignInBytes); 3380 Offset = llvm::RoundUpToAlignment(Offset, Align); 3381 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3382 3383 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3384 // Ignore empty aggregates. 3385 if (TySize == 0) 3386 return ABIArgInfo::getIgnore(); 3387 3388 // Records with non trivial destructors/constructors should not be passed 3389 // by value. 3390 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3391 Offset = OrigOffset + MinABIStackAlignInBytes; 3392 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3393 } 3394 3395 // If we have reached here, aggregates are passed directly by coercing to 3396 // another structure type. Padding is inserted if the offset of the 3397 // aggregate is unaligned. 3398 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3399 getPaddingType(Align, OrigOffset)); 3400 } 3401 3402 // Treat an enum type as its underlying type. 3403 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3404 Ty = EnumTy->getDecl()->getIntegerType(); 3405 3406 if (Ty->isPromotableIntegerType()) 3407 return ABIArgInfo::getExtend(); 3408 3409 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3410} 3411 3412llvm::Type* 3413MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3414 const RecordType *RT = RetTy->getAs<RecordType>(); 3415 SmallVector<llvm::Type*, 8> RTList; 3416 3417 if (RT && RT->isStructureOrClassType()) { 3418 const RecordDecl *RD = RT->getDecl(); 3419 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3420 unsigned FieldCnt = Layout.getFieldCount(); 3421 3422 // N32/64 returns struct/classes in floating point registers if the 3423 // following conditions are met: 3424 // 1. The size of the struct/class is no larger than 128-bit. 3425 // 2. The struct/class has one or two fields all of which are floating 3426 // point types. 3427 // 3. The offset of the first field is zero (this follows what gcc does). 3428 // 3429 // Any other composite results are returned in integer registers. 3430 // 3431 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3432 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3433 for (; b != e; ++b) { 3434 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3435 3436 if (!BT || !BT->isFloatingPoint()) 3437 break; 3438 3439 RTList.push_back(CGT.ConvertType(b->getType())); 3440 } 3441 3442 if (b == e) 3443 return llvm::StructType::get(getVMContext(), RTList, 3444 RD->hasAttr<PackedAttr>()); 3445 3446 RTList.clear(); 3447 } 3448 } 3449 3450 CoerceToIntArgs(Size, RTList); 3451 return llvm::StructType::get(getVMContext(), RTList); 3452} 3453 3454ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3455 uint64_t Size = getContext().getTypeSize(RetTy); 3456 3457 if (RetTy->isVoidType() || Size == 0) 3458 return ABIArgInfo::getIgnore(); 3459 3460 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3461 if (Size <= 128) { 3462 if (RetTy->isAnyComplexType()) 3463 return ABIArgInfo::getDirect(); 3464 3465 // O32 returns integer vectors in registers. 3466 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3467 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3468 3469 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3470 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3471 } 3472 3473 return ABIArgInfo::getIndirect(0); 3474 } 3475 3476 // Treat an enum type as its underlying type. 3477 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3478 RetTy = EnumTy->getDecl()->getIntegerType(); 3479 3480 return (RetTy->isPromotableIntegerType() ? 3481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3482} 3483 3484void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3485 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3486 RetInfo = classifyReturnType(FI.getReturnType()); 3487 3488 // Check if a pointer to an aggregate is passed as a hidden argument. 3489 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3490 3491 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3492 it != ie; ++it) 3493 it->info = classifyArgumentType(it->type, Offset); 3494} 3495 3496llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3497 CodeGenFunction &CGF) const { 3498 llvm::Type *BP = CGF.Int8PtrTy; 3499 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3500 3501 CGBuilderTy &Builder = CGF.Builder; 3502 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3503 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3504 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3505 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3506 llvm::Value *AddrTyped; 3507 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3508 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3509 3510 if (TypeAlign > MinABIStackAlignInBytes) { 3511 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3512 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3513 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3514 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3515 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3516 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3517 } 3518 else 3519 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3520 3521 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3522 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3523 uint64_t Offset = 3524 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3525 llvm::Value *NextAddr = 3526 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3527 "ap.next"); 3528 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3529 3530 return AddrTyped; 3531} 3532 3533bool 3534MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3535 llvm::Value *Address) const { 3536 // This information comes from gcc's implementation, which seems to 3537 // as canonical as it gets. 3538 3539 // Everything on MIPS is 4 bytes. Double-precision FP registers 3540 // are aliased to pairs of single-precision FP registers. 3541 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3542 3543 // 0-31 are the general purpose registers, $0 - $31. 3544 // 32-63 are the floating-point registers, $f0 - $f31. 3545 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3546 // 66 is the (notional, I think) register for signal-handler return. 3547 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3548 3549 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3550 // They are one bit wide and ignored here. 3551 3552 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3553 // (coprocessor 1 is the FP unit) 3554 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3555 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3556 // 176-181 are the DSP accumulator registers. 3557 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3558 return false; 3559} 3560 3561//===----------------------------------------------------------------------===// 3562// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3563// Currently subclassed only to implement custom OpenCL C function attribute 3564// handling. 3565//===----------------------------------------------------------------------===// 3566 3567namespace { 3568 3569class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3570public: 3571 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3572 : DefaultTargetCodeGenInfo(CGT) {} 3573 3574 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3575 CodeGen::CodeGenModule &M) const; 3576}; 3577 3578void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3579 llvm::GlobalValue *GV, 3580 CodeGen::CodeGenModule &M) const { 3581 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3582 if (!FD) return; 3583 3584 llvm::Function *F = cast<llvm::Function>(GV); 3585 3586 if (M.getLangOpts().OpenCL) { 3587 if (FD->hasAttr<OpenCLKernelAttr>()) { 3588 // OpenCL C Kernel functions are not subject to inlining 3589 F->addFnAttr(llvm::Attribute::NoInline); 3590 3591 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3592 3593 // Convert the reqd_work_group_size() attributes to metadata. 3594 llvm::LLVMContext &Context = F->getContext(); 3595 llvm::NamedMDNode *OpenCLMetadata = 3596 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3597 3598 SmallVector<llvm::Value*, 5> Operands; 3599 Operands.push_back(F); 3600 3601 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3602 llvm::APInt(32, 3603 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3604 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3605 llvm::APInt(32, 3606 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3607 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3608 llvm::APInt(32, 3609 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3610 3611 // Add a boolean constant operand for "required" (true) or "hint" (false) 3612 // for implementing the work_group_size_hint attr later. Currently 3613 // always true as the hint is not yet implemented. 3614 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3615 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3616 } 3617 } 3618 } 3619} 3620 3621} 3622 3623//===----------------------------------------------------------------------===// 3624// Hexagon ABI Implementation 3625//===----------------------------------------------------------------------===// 3626 3627namespace { 3628 3629class HexagonABIInfo : public ABIInfo { 3630 3631 3632public: 3633 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3634 3635private: 3636 3637 ABIArgInfo classifyReturnType(QualType RetTy) const; 3638 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3639 3640 virtual void computeInfo(CGFunctionInfo &FI) const; 3641 3642 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3643 CodeGenFunction &CGF) const; 3644}; 3645 3646class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 3647public: 3648 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 3649 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 3650 3651 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3652 return 29; 3653 } 3654}; 3655 3656} 3657 3658void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 3659 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3660 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3661 it != ie; ++it) 3662 it->info = classifyArgumentType(it->type); 3663} 3664 3665ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 3666 if (!isAggregateTypeForABI(Ty)) { 3667 // Treat an enum type as its underlying type. 3668 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3669 Ty = EnumTy->getDecl()->getIntegerType(); 3670 3671 return (Ty->isPromotableIntegerType() ? 3672 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3673 } 3674 3675 // Ignore empty records. 3676 if (isEmptyRecord(getContext(), Ty, true)) 3677 return ABIArgInfo::getIgnore(); 3678 3679 // Structures with either a non-trivial destructor or a non-trivial 3680 // copy constructor are always indirect. 3681 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3682 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3683 3684 uint64_t Size = getContext().getTypeSize(Ty); 3685 if (Size > 64) 3686 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3687 // Pass in the smallest viable integer type. 3688 else if (Size > 32) 3689 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3690 else if (Size > 16) 3691 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3692 else if (Size > 8) 3693 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3694 else 3695 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3696} 3697 3698ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 3699 if (RetTy->isVoidType()) 3700 return ABIArgInfo::getIgnore(); 3701 3702 // Large vector types should be returned via memory. 3703 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 3704 return ABIArgInfo::getIndirect(0); 3705 3706 if (!isAggregateTypeForABI(RetTy)) { 3707 // Treat an enum type as its underlying type. 3708 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3709 RetTy = EnumTy->getDecl()->getIntegerType(); 3710 3711 return (RetTy->isPromotableIntegerType() ? 3712 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3713 } 3714 3715 // Structures with either a non-trivial destructor or a non-trivial 3716 // copy constructor are always indirect. 3717 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3718 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3719 3720 if (isEmptyRecord(getContext(), RetTy, true)) 3721 return ABIArgInfo::getIgnore(); 3722 3723 // Aggregates <= 8 bytes are returned in r0; other aggregates 3724 // are returned indirectly. 3725 uint64_t Size = getContext().getTypeSize(RetTy); 3726 if (Size <= 64) { 3727 // Return in the smallest viable integer type. 3728 if (Size <= 8) 3729 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3730 if (Size <= 16) 3731 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3732 if (Size <= 32) 3733 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3734 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3735 } 3736 3737 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3738} 3739 3740llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3741 CodeGenFunction &CGF) const { 3742 // FIXME: Need to handle alignment 3743 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3744 3745 CGBuilderTy &Builder = CGF.Builder; 3746 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3747 "ap"); 3748 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3749 llvm::Type *PTy = 3750 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3751 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3752 3753 uint64_t Offset = 3754 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 3755 llvm::Value *NextAddr = 3756 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3757 "ap.next"); 3758 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3759 3760 return AddrTyped; 3761} 3762 3763 3764const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3765 if (TheTargetCodeGenInfo) 3766 return *TheTargetCodeGenInfo; 3767 3768 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 3769 switch (Triple.getArch()) { 3770 default: 3771 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3772 3773 case llvm::Triple::mips: 3774 case llvm::Triple::mipsel: 3775 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 3776 3777 case llvm::Triple::mips64: 3778 case llvm::Triple::mips64el: 3779 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 3780 3781 case llvm::Triple::arm: 3782 case llvm::Triple::thumb: 3783 { 3784 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3785 3786 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 3787 Kind = ARMABIInfo::APCS; 3788 else if (CodeGenOpts.FloatABI == "hard") 3789 Kind = ARMABIInfo::AAPCS_VFP; 3790 3791 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 3792 } 3793 3794 case llvm::Triple::ppc: 3795 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 3796 case llvm::Triple::ppc64: 3797 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 3798 3799 case llvm::Triple::nvptx: 3800 case llvm::Triple::nvptx64: 3801 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 3802 3803 case llvm::Triple::mblaze: 3804 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 3805 3806 case llvm::Triple::msp430: 3807 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 3808 3809 case llvm::Triple::tce: 3810 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 3811 3812 case llvm::Triple::x86: { 3813 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 3814 3815 if (Triple.isOSDarwin()) 3816 return *(TheTargetCodeGenInfo = 3817 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 3818 CodeGenOpts.NumRegisterParameters)); 3819 3820 switch (Triple.getOS()) { 3821 case llvm::Triple::Cygwin: 3822 case llvm::Triple::MinGW32: 3823 case llvm::Triple::AuroraUX: 3824 case llvm::Triple::DragonFly: 3825 case llvm::Triple::FreeBSD: 3826 case llvm::Triple::OpenBSD: 3827 case llvm::Triple::Bitrig: 3828 return *(TheTargetCodeGenInfo = 3829 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 3830 false, 3831 CodeGenOpts.NumRegisterParameters)); 3832 3833 case llvm::Triple::Win32: 3834 return *(TheTargetCodeGenInfo = 3835 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 3836 CodeGenOpts.NumRegisterParameters)); 3837 3838 default: 3839 return *(TheTargetCodeGenInfo = 3840 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 3841 false, 3842 CodeGenOpts.NumRegisterParameters)); 3843 } 3844 } 3845 3846 case llvm::Triple::x86_64: { 3847 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 3848 3849 switch (Triple.getOS()) { 3850 case llvm::Triple::Win32: 3851 case llvm::Triple::MinGW32: 3852 case llvm::Triple::Cygwin: 3853 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 3854 default: 3855 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 3856 HasAVX)); 3857 } 3858 } 3859 case llvm::Triple::hexagon: 3860 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 3861 } 3862} 3863