TargetInfo.cpp revision 3ed7903d27f0e7e0cd3a61c165d39eca70f3cff5
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "clang/AST/RecordLayout.h" 19#include "clang/Frontend/CodeGenOptions.h" 20#include "llvm/Type.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/ADT/Triple.h" 23#include "llvm/Support/raw_ostream.h" 24using namespace clang; 25using namespace CodeGen; 26 27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37} 38 39static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42} 43 44ABIInfo::~ABIInfo() {} 45 46ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48} 49 50llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52} 53 54const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56} 57 58 59void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86} 87 88TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90// If someone can figure out a general rule for this, that would be great. 91// It's probably just doomed to be platform-dependent, though. 92unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99} 100 101bool TargetCodeGenInfo::isNoProtoCallVariadic( 102 const CodeGen::CGFunctionInfo &) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108} 109 110static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112/// isEmptyField - Return true iff a the field is "empty", that is it 113/// is an unnamed bit-field or an (array of) empty record(s). 114static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142} 143 144/// isEmptyRecord - Return true iff a structure contains only empty 145/// fields. Note that a structure with a flexible array member is not 146/// considered empty. 147static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167} 168 169/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170/// a non-trivial destructor or a non-trivial copy constructor. 171static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177} 178 179/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180/// a record type with either a non-trivial destructor or a non-trivial copy 181/// constructor. 182static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188} 189 190/// isSingleElementStruct - Determine if a structure is a "single 191/// element struct", i.e. it has exactly one non-empty field or 192/// exactly one field which is itself a single element 193/// struct. Structures with flexible array members are never 194/// considered single element structs. 195/// 196/// \return The field declaration for the single non-empty field, if 197/// it exists. 198static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266} 267 268static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276} 277 278/// canExpandIndirectArgument - Test whether an argument type which is to be 279/// passed indirectly (on the stack) would have the equivalent layout if it was 280/// expanded into separate arguments. If so, we prefer to do the latter to avoid 281/// inhibiting optimizations. 282/// 283// FIXME: This predicate is missing many cases, currently it just follows 284// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285// should probably make this smarter, or better yet make the LLVM backend 286// capable of handling it. 287static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323} 324 325namespace { 326/// DefaultABIInfo - The default implementation for ABI specific 327/// details. This implementation provides information which results in 328/// self-consistent and sensible LLVM IR generation, but does not 329/// conform to any particular ABI. 330class DefaultABIInfo : public ABIInfo { 331public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346}; 347 348class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352}; 353 354llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357} 358 359ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375} 376 377ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390} 391 392/// UseX86_MMXType - Return true if this is an MMX type that should use the special 393/// x86_mmx type. 394bool UseX86_MMXType(llvm::Type *IRType) { 395 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 396 // special x86_mmx type. 397 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 398 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 399 IRType->getScalarSizeInBits() != 64; 400} 401 402static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 403 StringRef Constraint, 404 llvm::Type* Ty) { 405 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 406 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 407 return Ty; 408} 409 410//===----------------------------------------------------------------------===// 411// X86-32 ABI Implementation 412//===----------------------------------------------------------------------===// 413 414/// X86_32ABIInfo - The X86-32 ABI information. 415class X86_32ABIInfo : public ABIInfo { 416 static const unsigned MinABIStackAlignInBytes = 4; 417 418 bool IsDarwinVectorABI; 419 bool IsSmallStructInRegABI; 420 bool IsMMXDisabled; 421 422 static bool isRegisterSize(unsigned Size) { 423 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 424 } 425 426 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 427 428 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 429 /// such that the argument will be passed in memory. 430 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 431 432 /// \brief Return the alignment to use for the given type on the stack. 433 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 434 435public: 436 437 ABIArgInfo classifyReturnType(QualType RetTy) const; 438 ABIArgInfo classifyArgumentType(QualType RetTy) const; 439 440 virtual void computeInfo(CGFunctionInfo &FI) const { 441 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 442 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 443 it != ie; ++it) 444 it->info = classifyArgumentType(it->type); 445 } 446 447 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 448 CodeGenFunction &CGF) const; 449 450 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m) 451 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 452 IsMMXDisabled(m) {} 453}; 454 455class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 456public: 457 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m) 458 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {} 459 460 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 461 CodeGen::CodeGenModule &CGM) const; 462 463 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 464 // Darwin uses different dwarf register numbers for EH. 465 if (CGM.isTargetDarwin()) return 5; 466 467 return 4; 468 } 469 470 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 471 llvm::Value *Address) const; 472 473 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 474 StringRef Constraint, 475 llvm::Type* Ty) const { 476 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 477 } 478 479}; 480 481} 482 483/// shouldReturnTypeInRegister - Determine if the given type should be 484/// passed in a register (for the Darwin ABI). 485bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 486 ASTContext &Context) { 487 uint64_t Size = Context.getTypeSize(Ty); 488 489 // Type must be register sized. 490 if (!isRegisterSize(Size)) 491 return false; 492 493 if (Ty->isVectorType()) { 494 // 64- and 128- bit vectors inside structures are not returned in 495 // registers. 496 if (Size == 64 || Size == 128) 497 return false; 498 499 return true; 500 } 501 502 // If this is a builtin, pointer, enum, complex type, member pointer, or 503 // member function pointer it is ok. 504 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 505 Ty->isAnyComplexType() || Ty->isEnumeralType() || 506 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 507 return true; 508 509 // Arrays are treated like records. 510 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 511 return shouldReturnTypeInRegister(AT->getElementType(), Context); 512 513 // Otherwise, it must be a record type. 514 const RecordType *RT = Ty->getAs<RecordType>(); 515 if (!RT) return false; 516 517 // FIXME: Traverse bases here too. 518 519 // Structure types are passed in register if all fields would be 520 // passed in a register. 521 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 522 e = RT->getDecl()->field_end(); i != e; ++i) { 523 const FieldDecl *FD = *i; 524 525 // Empty fields are ignored. 526 if (isEmptyField(Context, FD, true)) 527 continue; 528 529 // Check fields recursively. 530 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 531 return false; 532 } 533 534 return true; 535} 536 537ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { 538 if (RetTy->isVoidType()) 539 return ABIArgInfo::getIgnore(); 540 541 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 542 // On Darwin, some vectors are returned in registers. 543 if (IsDarwinVectorABI) { 544 uint64_t Size = getContext().getTypeSize(RetTy); 545 546 // 128-bit vectors are a special case; they are returned in 547 // registers and we need to make sure to pick a type the LLVM 548 // backend will like. 549 if (Size == 128) 550 return ABIArgInfo::getDirect(llvm::VectorType::get( 551 llvm::Type::getInt64Ty(getVMContext()), 2)); 552 553 // Always return in register if it fits in a general purpose 554 // register, or if it is 64 bits and has a single element. 555 if ((Size == 8 || Size == 16 || Size == 32) || 556 (Size == 64 && VT->getNumElements() == 1)) 557 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 558 Size)); 559 560 return ABIArgInfo::getIndirect(0); 561 } 562 563 return ABIArgInfo::getDirect(); 564 } 565 566 if (isAggregateTypeForABI(RetTy)) { 567 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 568 // Structures with either a non-trivial destructor or a non-trivial 569 // copy constructor are always indirect. 570 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 571 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 572 573 // Structures with flexible arrays are always indirect. 574 if (RT->getDecl()->hasFlexibleArrayMember()) 575 return ABIArgInfo::getIndirect(0); 576 } 577 578 // If specified, structs and unions are always indirect. 579 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 580 return ABIArgInfo::getIndirect(0); 581 582 // Small structures which are register sized are generally returned 583 // in a register. 584 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) { 585 uint64_t Size = getContext().getTypeSize(RetTy); 586 587 // As a special-case, if the struct is a "single-element" struct, and 588 // the field is of type "float" or "double", return it in a 589 // floating-point register. We apply a similar transformation for 590 // pointer types to improve the quality of the generated IR. 591 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 592 if (SeltTy->isRealFloatingType() || SeltTy->hasPointerRepresentation()) 593 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 594 595 // FIXME: We should be able to narrow this integer in cases with dead 596 // padding. 597 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 598 } 599 600 return ABIArgInfo::getIndirect(0); 601 } 602 603 // Treat an enum type as its underlying type. 604 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 605 RetTy = EnumTy->getDecl()->getIntegerType(); 606 607 return (RetTy->isPromotableIntegerType() ? 608 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 609} 610 611static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 612 const RecordType *RT = Ty->getAs<RecordType>(); 613 if (!RT) 614 return 0; 615 const RecordDecl *RD = RT->getDecl(); 616 617 // If this is a C++ record, check the bases first. 618 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 619 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 620 e = CXXRD->bases_end(); i != e; ++i) 621 if (!isRecordWithSSEVectorType(Context, i->getType())) 622 return false; 623 624 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 625 i != e; ++i) { 626 QualType FT = i->getType(); 627 628 if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128) 629 return true; 630 631 if (isRecordWithSSEVectorType(Context, FT)) 632 return true; 633 } 634 635 return false; 636} 637 638unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 639 unsigned Align) const { 640 // Otherwise, if the alignment is less than or equal to the minimum ABI 641 // alignment, just use the default; the backend will handle this. 642 if (Align <= MinABIStackAlignInBytes) 643 return 0; // Use default alignment. 644 645 // On non-Darwin, the stack type alignment is always 4. 646 if (!IsDarwinVectorABI) { 647 // Set explicit alignment, since we may need to realign the top. 648 return MinABIStackAlignInBytes; 649 } 650 651 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 652 if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty)) 653 return 16; 654 655 return MinABIStackAlignInBytes; 656} 657 658ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 659 if (!ByVal) 660 return ABIArgInfo::getIndirect(0, false); 661 662 // Compute the byval alignment. 663 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 664 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 665 if (StackAlign == 0) 666 return ABIArgInfo::getIndirect(4); 667 668 // If the stack alignment is less than the type alignment, realign the 669 // argument. 670 if (StackAlign < TypeAlign) 671 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 672 /*Realign=*/true); 673 674 return ABIArgInfo::getIndirect(StackAlign); 675} 676 677ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 678 // FIXME: Set alignment on indirect arguments. 679 if (isAggregateTypeForABI(Ty)) { 680 // Structures with flexible arrays are always indirect. 681 if (const RecordType *RT = Ty->getAs<RecordType>()) { 682 // Structures with either a non-trivial destructor or a non-trivial 683 // copy constructor are always indirect. 684 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 685 return getIndirectResult(Ty, /*ByVal=*/false); 686 687 if (RT->getDecl()->hasFlexibleArrayMember()) 688 return getIndirectResult(Ty); 689 } 690 691 // Ignore empty structs/unions. 692 if (isEmptyRecord(getContext(), Ty, true)) 693 return ABIArgInfo::getIgnore(); 694 695 // Expand small (<= 128-bit) record types when we know that the stack layout 696 // of those arguments will match the struct. This is important because the 697 // LLVM backend isn't smart enough to remove byval, which inhibits many 698 // optimizations. 699 if (getContext().getTypeSize(Ty) <= 4*32 && 700 canExpandIndirectArgument(Ty, getContext())) 701 return ABIArgInfo::getExpand(); 702 703 return getIndirectResult(Ty); 704 } 705 706 if (const VectorType *VT = Ty->getAs<VectorType>()) { 707 // On Darwin, some vectors are passed in memory, we handle this by passing 708 // it as an i8/i16/i32/i64. 709 if (IsDarwinVectorABI) { 710 uint64_t Size = getContext().getTypeSize(Ty); 711 if ((Size == 8 || Size == 16 || Size == 32) || 712 (Size == 64 && VT->getNumElements() == 1)) 713 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 714 Size)); 715 } 716 717 llvm::Type *IRType = CGT.ConvertType(Ty); 718 if (UseX86_MMXType(IRType)) { 719 if (IsMMXDisabled) 720 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 721 64)); 722 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 723 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 724 return AAI; 725 } 726 727 return ABIArgInfo::getDirect(); 728 } 729 730 731 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 732 Ty = EnumTy->getDecl()->getIntegerType(); 733 734 return (Ty->isPromotableIntegerType() ? 735 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 736} 737 738llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 739 CodeGenFunction &CGF) const { 740 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 741 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 742 743 CGBuilderTy &Builder = CGF.Builder; 744 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 745 "ap"); 746 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 747 748 // Compute if the address needs to be aligned 749 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 750 Align = getTypeStackAlignInBytes(Ty, Align); 751 Align = std::max(Align, 4U); 752 if (Align > 4) { 753 // addr = (addr + align - 1) & -align; 754 llvm::Value *Offset = 755 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 756 Addr = CGF.Builder.CreateGEP(Addr, Offset); 757 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 758 CGF.Int32Ty); 759 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 760 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 761 Addr->getType(), 762 "ap.cur.aligned"); 763 } 764 765 llvm::Type *PTy = 766 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 767 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 768 769 uint64_t Offset = 770 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 771 llvm::Value *NextAddr = 772 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 773 "ap.next"); 774 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 775 776 return AddrTyped; 777} 778 779void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 780 llvm::GlobalValue *GV, 781 CodeGen::CodeGenModule &CGM) const { 782 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 783 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 784 // Get the LLVM function. 785 llvm::Function *Fn = cast<llvm::Function>(GV); 786 787 // Now add the 'alignstack' attribute with a value of 16. 788 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 789 } 790 } 791} 792 793bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 794 CodeGen::CodeGenFunction &CGF, 795 llvm::Value *Address) const { 796 CodeGen::CGBuilderTy &Builder = CGF.Builder; 797 llvm::LLVMContext &Context = CGF.getLLVMContext(); 798 799 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 800 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 801 802 // 0-7 are the eight integer registers; the order is different 803 // on Darwin (for EH), but the range is the same. 804 // 8 is %eip. 805 AssignToArrayRange(Builder, Address, Four8, 0, 8); 806 807 if (CGF.CGM.isTargetDarwin()) { 808 // 12-16 are st(0..4). Not sure why we stop at 4. 809 // These have size 16, which is sizeof(long double) on 810 // platforms with 8-byte alignment for that type. 811 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 812 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 813 814 } else { 815 // 9 is %eflags, which doesn't get a size on Darwin for some 816 // reason. 817 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 818 819 // 11-16 are st(0..5). Not sure why we stop at 5. 820 // These have size 12, which is sizeof(long double) on 821 // platforms with 4-byte alignment for that type. 822 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12); 823 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 824 } 825 826 return false; 827} 828 829//===----------------------------------------------------------------------===// 830// X86-64 ABI Implementation 831//===----------------------------------------------------------------------===// 832 833 834namespace { 835/// X86_64ABIInfo - The X86_64 ABI information. 836class X86_64ABIInfo : public ABIInfo { 837 enum Class { 838 Integer = 0, 839 SSE, 840 SSEUp, 841 X87, 842 X87Up, 843 ComplexX87, 844 NoClass, 845 Memory 846 }; 847 848 /// merge - Implement the X86_64 ABI merging algorithm. 849 /// 850 /// Merge an accumulating classification \arg Accum with a field 851 /// classification \arg Field. 852 /// 853 /// \param Accum - The accumulating classification. This should 854 /// always be either NoClass or the result of a previous merge 855 /// call. In addition, this should never be Memory (the caller 856 /// should just return Memory for the aggregate). 857 static Class merge(Class Accum, Class Field); 858 859 /// postMerge - Implement the X86_64 ABI post merging algorithm. 860 /// 861 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 862 /// final MEMORY or SSE classes when necessary. 863 /// 864 /// \param AggregateSize - The size of the current aggregate in 865 /// the classification process. 866 /// 867 /// \param Lo - The classification for the parts of the type 868 /// residing in the low word of the containing object. 869 /// 870 /// \param Hi - The classification for the parts of the type 871 /// residing in the higher words of the containing object. 872 /// 873 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 874 875 /// classify - Determine the x86_64 register classes in which the 876 /// given type T should be passed. 877 /// 878 /// \param Lo - The classification for the parts of the type 879 /// residing in the low word of the containing object. 880 /// 881 /// \param Hi - The classification for the parts of the type 882 /// residing in the high word of the containing object. 883 /// 884 /// \param OffsetBase - The bit offset of this type in the 885 /// containing object. Some parameters are classified different 886 /// depending on whether they straddle an eightbyte boundary. 887 /// 888 /// If a word is unused its result will be NoClass; if a type should 889 /// be passed in Memory then at least the classification of \arg Lo 890 /// will be Memory. 891 /// 892 /// The \arg Lo class will be NoClass iff the argument is ignored. 893 /// 894 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 895 /// also be ComplexX87. 896 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 897 898 llvm::Type *GetByteVectorType(QualType Ty) const; 899 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 900 unsigned IROffset, QualType SourceTy, 901 unsigned SourceOffset) const; 902 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 903 unsigned IROffset, QualType SourceTy, 904 unsigned SourceOffset) const; 905 906 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 907 /// such that the argument will be returned in memory. 908 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 909 910 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 911 /// such that the argument will be passed in memory. 912 ABIArgInfo getIndirectResult(QualType Ty) const; 913 914 ABIArgInfo classifyReturnType(QualType RetTy) const; 915 916 ABIArgInfo classifyArgumentType(QualType Ty, 917 unsigned &neededInt, 918 unsigned &neededSSE) const; 919 920 /// The 0.98 ABI revision clarified a lot of ambiguities, 921 /// unfortunately in ways that were not always consistent with 922 /// certain previous compilers. In particular, platforms which 923 /// required strict binary compatibility with older versions of GCC 924 /// may need to exempt themselves. 925 bool honorsRevision0_98() const { 926 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 927 } 928 929public: 930 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 931 932 virtual void computeInfo(CGFunctionInfo &FI) const; 933 934 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 935 CodeGenFunction &CGF) const; 936}; 937 938/// WinX86_64ABIInfo - The Windows X86_64 ABI information. 939class WinX86_64ABIInfo : public ABIInfo { 940 941 ABIArgInfo classify(QualType Ty) const; 942 943public: 944 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 945 946 virtual void computeInfo(CGFunctionInfo &FI) const; 947 948 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 949 CodeGenFunction &CGF) const; 950}; 951 952class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 953public: 954 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 955 : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {} 956 957 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 958 return 7; 959 } 960 961 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 962 llvm::Value *Address) const { 963 CodeGen::CGBuilderTy &Builder = CGF.Builder; 964 llvm::LLVMContext &Context = CGF.getLLVMContext(); 965 966 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 967 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 968 969 // 0-15 are the 16 integer registers. 970 // 16 is %rip. 971 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 972 973 return false; 974 } 975 976 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 977 StringRef Constraint, 978 llvm::Type* Ty) const { 979 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 980 } 981 982 bool isNoProtoCallVariadic(const CodeGen::CGFunctionInfo &FI) const { 983 // The default CC on x86-64 sets %al to the number of SSA 984 // registers used, and GCC sets this when calling an unprototyped 985 // function, so we override the default behavior. However, don't do 986 // that when AVX types are involved. 987 if (FI.getCallingConvention() == llvm::CallingConv::C) { 988 bool HasAVXType = false; 989 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 990 ie = FI.arg_end(); 991 it != ie; ++it) { 992 if (it->info.isDirect()) { 993 llvm::Type *Ty = it->info.getCoerceToType(); 994 if (llvm::VectorType *VTy = dyn_cast_or_null<llvm::VectorType>(Ty)) { 995 if (VTy->getBitWidth() > 128) { 996 HasAVXType = true; 997 break; 998 } 999 } 1000 } 1001 } 1002 if (!HasAVXType) 1003 return true; 1004 } 1005 1006 return TargetCodeGenInfo::isNoProtoCallVariadic(FI); 1007 } 1008 1009}; 1010 1011class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1012public: 1013 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1014 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1015 1016 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1017 return 7; 1018 } 1019 1020 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1021 llvm::Value *Address) const { 1022 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1023 llvm::LLVMContext &Context = CGF.getLLVMContext(); 1024 1025 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 1026 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 1027 1028 // 0-15 are the 16 integer registers. 1029 // 16 is %rip. 1030 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 1031 1032 return false; 1033 } 1034}; 1035 1036} 1037 1038void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1039 Class &Hi) const { 1040 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1041 // 1042 // (a) If one of the classes is Memory, the whole argument is passed in 1043 // memory. 1044 // 1045 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1046 // memory. 1047 // 1048 // (c) If the size of the aggregate exceeds two eightbytes and the first 1049 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1050 // argument is passed in memory. NOTE: This is necessary to keep the 1051 // ABI working for processors that don't support the __m256 type. 1052 // 1053 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1054 // 1055 // Some of these are enforced by the merging logic. Others can arise 1056 // only with unions; for example: 1057 // union { _Complex double; unsigned; } 1058 // 1059 // Note that clauses (b) and (c) were added in 0.98. 1060 // 1061 if (Hi == Memory) 1062 Lo = Memory; 1063 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1064 Lo = Memory; 1065 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1066 Lo = Memory; 1067 if (Hi == SSEUp && Lo != SSE) 1068 Hi = SSE; 1069} 1070 1071X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1072 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1073 // classified recursively so that always two fields are 1074 // considered. The resulting class is calculated according to 1075 // the classes of the fields in the eightbyte: 1076 // 1077 // (a) If both classes are equal, this is the resulting class. 1078 // 1079 // (b) If one of the classes is NO_CLASS, the resulting class is 1080 // the other class. 1081 // 1082 // (c) If one of the classes is MEMORY, the result is the MEMORY 1083 // class. 1084 // 1085 // (d) If one of the classes is INTEGER, the result is the 1086 // INTEGER. 1087 // 1088 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1089 // MEMORY is used as class. 1090 // 1091 // (f) Otherwise class SSE is used. 1092 1093 // Accum should never be memory (we should have returned) or 1094 // ComplexX87 (because this cannot be passed in a structure). 1095 assert((Accum != Memory && Accum != ComplexX87) && 1096 "Invalid accumulated classification during merge."); 1097 if (Accum == Field || Field == NoClass) 1098 return Accum; 1099 if (Field == Memory) 1100 return Memory; 1101 if (Accum == NoClass) 1102 return Field; 1103 if (Accum == Integer || Field == Integer) 1104 return Integer; 1105 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1106 Accum == X87 || Accum == X87Up) 1107 return Memory; 1108 return SSE; 1109} 1110 1111void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1112 Class &Lo, Class &Hi) const { 1113 // FIXME: This code can be simplified by introducing a simple value class for 1114 // Class pairs with appropriate constructor methods for the various 1115 // situations. 1116 1117 // FIXME: Some of the split computations are wrong; unaligned vectors 1118 // shouldn't be passed in registers for example, so there is no chance they 1119 // can straddle an eightbyte. Verify & simplify. 1120 1121 Lo = Hi = NoClass; 1122 1123 Class &Current = OffsetBase < 64 ? Lo : Hi; 1124 Current = Memory; 1125 1126 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1127 BuiltinType::Kind k = BT->getKind(); 1128 1129 if (k == BuiltinType::Void) { 1130 Current = NoClass; 1131 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1132 Lo = Integer; 1133 Hi = Integer; 1134 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1135 Current = Integer; 1136 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1137 Current = SSE; 1138 } else if (k == BuiltinType::LongDouble) { 1139 Lo = X87; 1140 Hi = X87Up; 1141 } 1142 // FIXME: _Decimal32 and _Decimal64 are SSE. 1143 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1144 return; 1145 } 1146 1147 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1148 // Classify the underlying integer type. 1149 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1150 return; 1151 } 1152 1153 if (Ty->hasPointerRepresentation()) { 1154 Current = Integer; 1155 return; 1156 } 1157 1158 if (Ty->isMemberPointerType()) { 1159 if (Ty->isMemberFunctionPointerType()) 1160 Lo = Hi = Integer; 1161 else 1162 Current = Integer; 1163 return; 1164 } 1165 1166 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1167 uint64_t Size = getContext().getTypeSize(VT); 1168 if (Size == 32) { 1169 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1170 // float> as integer. 1171 Current = Integer; 1172 1173 // If this type crosses an eightbyte boundary, it should be 1174 // split. 1175 uint64_t EB_Real = (OffsetBase) / 64; 1176 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1177 if (EB_Real != EB_Imag) 1178 Hi = Lo; 1179 } else if (Size == 64) { 1180 // gcc passes <1 x double> in memory. :( 1181 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1182 return; 1183 1184 // gcc passes <1 x long long> as INTEGER. 1185 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1186 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1187 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1188 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1189 Current = Integer; 1190 else 1191 Current = SSE; 1192 1193 // If this type crosses an eightbyte boundary, it should be 1194 // split. 1195 if (OffsetBase && OffsetBase != 64) 1196 Hi = Lo; 1197 } else if (Size == 128 || Size == 256) { 1198 // Arguments of 256-bits are split into four eightbyte chunks. The 1199 // least significant one belongs to class SSE and all the others to class 1200 // SSEUP. The original Lo and Hi design considers that types can't be 1201 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1202 // This design isn't correct for 256-bits, but since there're no cases 1203 // where the upper parts would need to be inspected, avoid adding 1204 // complexity and just consider Hi to match the 64-256 part. 1205 Lo = SSE; 1206 Hi = SSEUp; 1207 } 1208 return; 1209 } 1210 1211 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1212 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1213 1214 uint64_t Size = getContext().getTypeSize(Ty); 1215 if (ET->isIntegralOrEnumerationType()) { 1216 if (Size <= 64) 1217 Current = Integer; 1218 else if (Size <= 128) 1219 Lo = Hi = Integer; 1220 } else if (ET == getContext().FloatTy) 1221 Current = SSE; 1222 else if (ET == getContext().DoubleTy) 1223 Lo = Hi = SSE; 1224 else if (ET == getContext().LongDoubleTy) 1225 Current = ComplexX87; 1226 1227 // If this complex type crosses an eightbyte boundary then it 1228 // should be split. 1229 uint64_t EB_Real = (OffsetBase) / 64; 1230 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1231 if (Hi == NoClass && EB_Real != EB_Imag) 1232 Hi = Lo; 1233 1234 return; 1235 } 1236 1237 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1238 // Arrays are treated like structures. 1239 1240 uint64_t Size = getContext().getTypeSize(Ty); 1241 1242 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1243 // than four eightbytes, ..., it has class MEMORY. 1244 if (Size > 256) 1245 return; 1246 1247 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1248 // fields, it has class MEMORY. 1249 // 1250 // Only need to check alignment of array base. 1251 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1252 return; 1253 1254 // Otherwise implement simplified merge. We could be smarter about 1255 // this, but it isn't worth it and would be harder to verify. 1256 Current = NoClass; 1257 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1258 uint64_t ArraySize = AT->getSize().getZExtValue(); 1259 1260 // The only case a 256-bit wide vector could be used is when the array 1261 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1262 // to work for sizes wider than 128, early check and fallback to memory. 1263 if (Size > 128 && EltSize != 256) 1264 return; 1265 1266 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1267 Class FieldLo, FieldHi; 1268 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1269 Lo = merge(Lo, FieldLo); 1270 Hi = merge(Hi, FieldHi); 1271 if (Lo == Memory || Hi == Memory) 1272 break; 1273 } 1274 1275 postMerge(Size, Lo, Hi); 1276 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1277 return; 1278 } 1279 1280 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1281 uint64_t Size = getContext().getTypeSize(Ty); 1282 1283 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1284 // than four eightbytes, ..., it has class MEMORY. 1285 if (Size > 256) 1286 return; 1287 1288 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1289 // copy constructor or a non-trivial destructor, it is passed by invisible 1290 // reference. 1291 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1292 return; 1293 1294 const RecordDecl *RD = RT->getDecl(); 1295 1296 // Assume variable sized types are passed in memory. 1297 if (RD->hasFlexibleArrayMember()) 1298 return; 1299 1300 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1301 1302 // Reset Lo class, this will be recomputed. 1303 Current = NoClass; 1304 1305 // If this is a C++ record, classify the bases first. 1306 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1307 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1308 e = CXXRD->bases_end(); i != e; ++i) { 1309 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1310 "Unexpected base class!"); 1311 const CXXRecordDecl *Base = 1312 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1313 1314 // Classify this field. 1315 // 1316 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1317 // single eightbyte, each is classified separately. Each eightbyte gets 1318 // initialized to class NO_CLASS. 1319 Class FieldLo, FieldHi; 1320 uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base); 1321 classify(i->getType(), Offset, FieldLo, FieldHi); 1322 Lo = merge(Lo, FieldLo); 1323 Hi = merge(Hi, FieldHi); 1324 if (Lo == Memory || Hi == Memory) 1325 break; 1326 } 1327 } 1328 1329 // Classify the fields one at a time, merging the results. 1330 unsigned idx = 0; 1331 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1332 i != e; ++i, ++idx) { 1333 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1334 bool BitField = i->isBitField(); 1335 1336 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1337 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1338 // 1339 // The only case a 256-bit wide vector could be used is when the struct 1340 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1341 // to work for sizes wider than 128, early check and fallback to memory. 1342 // 1343 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1344 Lo = Memory; 1345 return; 1346 } 1347 // Note, skip this test for bit-fields, see below. 1348 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1349 Lo = Memory; 1350 return; 1351 } 1352 1353 // Classify this field. 1354 // 1355 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1356 // exceeds a single eightbyte, each is classified 1357 // separately. Each eightbyte gets initialized to class 1358 // NO_CLASS. 1359 Class FieldLo, FieldHi; 1360 1361 // Bit-fields require special handling, they do not force the 1362 // structure to be passed in memory even if unaligned, and 1363 // therefore they can straddle an eightbyte. 1364 if (BitField) { 1365 // Ignore padding bit-fields. 1366 if (i->isUnnamedBitfield()) 1367 continue; 1368 1369 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1370 uint64_t Size = i->getBitWidthValue(getContext()); 1371 1372 uint64_t EB_Lo = Offset / 64; 1373 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1374 FieldLo = FieldHi = NoClass; 1375 if (EB_Lo) { 1376 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1377 FieldLo = NoClass; 1378 FieldHi = Integer; 1379 } else { 1380 FieldLo = Integer; 1381 FieldHi = EB_Hi ? Integer : NoClass; 1382 } 1383 } else 1384 classify(i->getType(), Offset, FieldLo, FieldHi); 1385 Lo = merge(Lo, FieldLo); 1386 Hi = merge(Hi, FieldHi); 1387 if (Lo == Memory || Hi == Memory) 1388 break; 1389 } 1390 1391 postMerge(Size, Lo, Hi); 1392 } 1393} 1394 1395ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1396 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1397 // place naturally. 1398 if (!isAggregateTypeForABI(Ty)) { 1399 // Treat an enum type as its underlying type. 1400 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1401 Ty = EnumTy->getDecl()->getIntegerType(); 1402 1403 return (Ty->isPromotableIntegerType() ? 1404 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1405 } 1406 1407 return ABIArgInfo::getIndirect(0); 1408} 1409 1410ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1411 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1412 // place naturally. 1413 if (!isAggregateTypeForABI(Ty)) { 1414 // Treat an enum type as its underlying type. 1415 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1416 Ty = EnumTy->getDecl()->getIntegerType(); 1417 1418 return (Ty->isPromotableIntegerType() ? 1419 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1420 } 1421 1422 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1423 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1424 1425 // Compute the byval alignment. We specify the alignment of the byval in all 1426 // cases so that the mid-level optimizer knows the alignment of the byval. 1427 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1428 return ABIArgInfo::getIndirect(Align); 1429} 1430 1431/// GetByteVectorType - The ABI specifies that a value should be passed in an 1432/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1433/// vector register. 1434llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1435 llvm::Type *IRType = CGT.ConvertType(Ty); 1436 1437 // Wrapper structs that just contain vectors are passed just like vectors, 1438 // strip them off if present. 1439 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1440 while (STy && STy->getNumElements() == 1) { 1441 IRType = STy->getElementType(0); 1442 STy = dyn_cast<llvm::StructType>(IRType); 1443 } 1444 1445 // If the preferred type is a 16-byte vector, prefer to pass it. 1446 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1447 llvm::Type *EltTy = VT->getElementType(); 1448 unsigned BitWidth = VT->getBitWidth(); 1449 if ((BitWidth >= 128 && BitWidth <= 256) && 1450 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1451 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1452 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1453 EltTy->isIntegerTy(128))) 1454 return VT; 1455 } 1456 1457 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1458} 1459 1460/// BitsContainNoUserData - Return true if the specified [start,end) bit range 1461/// is known to either be off the end of the specified type or being in 1462/// alignment padding. The user type specified is known to be at most 128 bits 1463/// in size, and have passed through X86_64ABIInfo::classify with a successful 1464/// classification that put one of the two halves in the INTEGER class. 1465/// 1466/// It is conservatively correct to return false. 1467static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1468 unsigned EndBit, ASTContext &Context) { 1469 // If the bytes being queried are off the end of the type, there is no user 1470 // data hiding here. This handles analysis of builtins, vectors and other 1471 // types that don't contain interesting padding. 1472 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1473 if (TySize <= StartBit) 1474 return true; 1475 1476 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1477 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1478 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1479 1480 // Check each element to see if the element overlaps with the queried range. 1481 for (unsigned i = 0; i != NumElts; ++i) { 1482 // If the element is after the span we care about, then we're done.. 1483 unsigned EltOffset = i*EltSize; 1484 if (EltOffset >= EndBit) break; 1485 1486 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1487 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1488 EndBit-EltOffset, Context)) 1489 return false; 1490 } 1491 // If it overlaps no elements, then it is safe to process as padding. 1492 return true; 1493 } 1494 1495 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1496 const RecordDecl *RD = RT->getDecl(); 1497 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1498 1499 // If this is a C++ record, check the bases first. 1500 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1501 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1502 e = CXXRD->bases_end(); i != e; ++i) { 1503 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1504 "Unexpected base class!"); 1505 const CXXRecordDecl *Base = 1506 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1507 1508 // If the base is after the span we care about, ignore it. 1509 unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base); 1510 if (BaseOffset >= EndBit) continue; 1511 1512 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1513 if (!BitsContainNoUserData(i->getType(), BaseStart, 1514 EndBit-BaseOffset, Context)) 1515 return false; 1516 } 1517 } 1518 1519 // Verify that no field has data that overlaps the region of interest. Yes 1520 // this could be sped up a lot by being smarter about queried fields, 1521 // however we're only looking at structs up to 16 bytes, so we don't care 1522 // much. 1523 unsigned idx = 0; 1524 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1525 i != e; ++i, ++idx) { 1526 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1527 1528 // If we found a field after the region we care about, then we're done. 1529 if (FieldOffset >= EndBit) break; 1530 1531 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1532 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1533 Context)) 1534 return false; 1535 } 1536 1537 // If nothing in this record overlapped the area of interest, then we're 1538 // clean. 1539 return true; 1540 } 1541 1542 return false; 1543} 1544 1545/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1546/// float member at the specified offset. For example, {int,{float}} has a 1547/// float at offset 4. It is conservatively correct for this routine to return 1548/// false. 1549static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1550 const llvm::TargetData &TD) { 1551 // Base case if we find a float. 1552 if (IROffset == 0 && IRType->isFloatTy()) 1553 return true; 1554 1555 // If this is a struct, recurse into the field at the specified offset. 1556 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1557 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1558 unsigned Elt = SL->getElementContainingOffset(IROffset); 1559 IROffset -= SL->getElementOffset(Elt); 1560 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1561 } 1562 1563 // If this is an array, recurse into the field at the specified offset. 1564 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1565 llvm::Type *EltTy = ATy->getElementType(); 1566 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1567 IROffset -= IROffset/EltSize*EltSize; 1568 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1569 } 1570 1571 return false; 1572} 1573 1574 1575/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1576/// low 8 bytes of an XMM register, corresponding to the SSE class. 1577llvm::Type *X86_64ABIInfo:: 1578GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1579 QualType SourceTy, unsigned SourceOffset) const { 1580 // The only three choices we have are either double, <2 x float>, or float. We 1581 // pass as float if the last 4 bytes is just padding. This happens for 1582 // structs that contain 3 floats. 1583 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1584 SourceOffset*8+64, getContext())) 1585 return llvm::Type::getFloatTy(getVMContext()); 1586 1587 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1588 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1589 // case. 1590 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1591 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1592 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1593 1594 return llvm::Type::getDoubleTy(getVMContext()); 1595} 1596 1597 1598/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1599/// an 8-byte GPR. This means that we either have a scalar or we are talking 1600/// about the high or low part of an up-to-16-byte struct. This routine picks 1601/// the best LLVM IR type to represent this, which may be i64 or may be anything 1602/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1603/// etc). 1604/// 1605/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1606/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1607/// the 8-byte value references. PrefType may be null. 1608/// 1609/// SourceTy is the source level type for the entire argument. SourceOffset is 1610/// an offset into this that we're processing (which is always either 0 or 8). 1611/// 1612llvm::Type *X86_64ABIInfo:: 1613GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1614 QualType SourceTy, unsigned SourceOffset) const { 1615 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1616 // returning an 8-byte unit starting with it. See if we can safely use it. 1617 if (IROffset == 0) { 1618 // Pointers and int64's always fill the 8-byte unit. 1619 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1620 return IRType; 1621 1622 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1623 // goodness in the source type is just tail padding. This is allowed to 1624 // kick in for struct {double,int} on the int, but not on 1625 // struct{double,int,int} because we wouldn't return the second int. We 1626 // have to do this analysis on the source type because we can't depend on 1627 // unions being lowered a specific way etc. 1628 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1629 IRType->isIntegerTy(32)) { 1630 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1631 1632 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1633 SourceOffset*8+64, getContext())) 1634 return IRType; 1635 } 1636 } 1637 1638 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1639 // If this is a struct, recurse into the field at the specified offset. 1640 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1641 if (IROffset < SL->getSizeInBytes()) { 1642 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1643 IROffset -= SL->getElementOffset(FieldIdx); 1644 1645 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1646 SourceTy, SourceOffset); 1647 } 1648 } 1649 1650 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1651 llvm::Type *EltTy = ATy->getElementType(); 1652 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1653 unsigned EltOffset = IROffset/EltSize*EltSize; 1654 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1655 SourceOffset); 1656 } 1657 1658 // Okay, we don't have any better idea of what to pass, so we pass this in an 1659 // integer register that isn't too big to fit the rest of the struct. 1660 unsigned TySizeInBytes = 1661 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1662 1663 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1664 1665 // It is always safe to classify this as an integer type up to i64 that 1666 // isn't larger than the structure. 1667 return llvm::IntegerType::get(getVMContext(), 1668 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1669} 1670 1671 1672/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1673/// be used as elements of a two register pair to pass or return, return a 1674/// first class aggregate to represent them. For example, if the low part of 1675/// a by-value argument should be passed as i32* and the high part as float, 1676/// return {i32*, float}. 1677static llvm::Type * 1678GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1679 const llvm::TargetData &TD) { 1680 // In order to correctly satisfy the ABI, we need to the high part to start 1681 // at offset 8. If the high and low parts we inferred are both 4-byte types 1682 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1683 // the second element at offset 8. Check for this: 1684 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1685 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1686 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1687 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1688 1689 // To handle this, we have to increase the size of the low part so that the 1690 // second element will start at an 8 byte offset. We can't increase the size 1691 // of the second element because it might make us access off the end of the 1692 // struct. 1693 if (HiStart != 8) { 1694 // There are only two sorts of types the ABI generation code can produce for 1695 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1696 // Promote these to a larger type. 1697 if (Lo->isFloatTy()) 1698 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1699 else { 1700 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1701 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1702 } 1703 } 1704 1705 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1706 1707 1708 // Verify that the second element is at an 8-byte offset. 1709 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1710 "Invalid x86-64 argument pair!"); 1711 return Result; 1712} 1713 1714ABIArgInfo X86_64ABIInfo:: 1715classifyReturnType(QualType RetTy) const { 1716 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1717 // classification algorithm. 1718 X86_64ABIInfo::Class Lo, Hi; 1719 classify(RetTy, 0, Lo, Hi); 1720 1721 // Check some invariants. 1722 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1723 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1724 1725 llvm::Type *ResType = 0; 1726 switch (Lo) { 1727 case NoClass: 1728 if (Hi == NoClass) 1729 return ABIArgInfo::getIgnore(); 1730 // If the low part is just padding, it takes no register, leave ResType 1731 // null. 1732 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1733 "Unknown missing lo part"); 1734 break; 1735 1736 case SSEUp: 1737 case X87Up: 1738 llvm_unreachable("Invalid classification for lo word."); 1739 1740 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1741 // hidden argument. 1742 case Memory: 1743 return getIndirectReturnResult(RetTy); 1744 1745 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1746 // available register of the sequence %rax, %rdx is used. 1747 case Integer: 1748 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1749 1750 // If we have a sign or zero extended integer, make sure to return Extend 1751 // so that the parameter gets the right LLVM IR attributes. 1752 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1753 // Treat an enum type as its underlying type. 1754 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1755 RetTy = EnumTy->getDecl()->getIntegerType(); 1756 1757 if (RetTy->isIntegralOrEnumerationType() && 1758 RetTy->isPromotableIntegerType()) 1759 return ABIArgInfo::getExtend(); 1760 } 1761 break; 1762 1763 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1764 // available SSE register of the sequence %xmm0, %xmm1 is used. 1765 case SSE: 1766 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1767 break; 1768 1769 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1770 // returned on the X87 stack in %st0 as 80-bit x87 number. 1771 case X87: 1772 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1773 break; 1774 1775 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1776 // part of the value is returned in %st0 and the imaginary part in 1777 // %st1. 1778 case ComplexX87: 1779 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1780 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 1781 llvm::Type::getX86_FP80Ty(getVMContext()), 1782 NULL); 1783 break; 1784 } 1785 1786 llvm::Type *HighPart = 0; 1787 switch (Hi) { 1788 // Memory was handled previously and X87 should 1789 // never occur as a hi class. 1790 case Memory: 1791 case X87: 1792 llvm_unreachable("Invalid classification for hi word."); 1793 1794 case ComplexX87: // Previously handled. 1795 case NoClass: 1796 break; 1797 1798 case Integer: 1799 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1800 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1801 return ABIArgInfo::getDirect(HighPart, 8); 1802 break; 1803 case SSE: 1804 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1805 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1806 return ABIArgInfo::getDirect(HighPart, 8); 1807 break; 1808 1809 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1810 // is passed in the next available eightbyte chunk if the last used 1811 // vector register. 1812 // 1813 // SSEUP should always be preceded by SSE, just widen. 1814 case SSEUp: 1815 assert(Lo == SSE && "Unexpected SSEUp classification."); 1816 ResType = GetByteVectorType(RetTy); 1817 break; 1818 1819 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1820 // returned together with the previous X87 value in %st0. 1821 case X87Up: 1822 // If X87Up is preceded by X87, we don't need to do 1823 // anything. However, in some cases with unions it may not be 1824 // preceded by X87. In such situations we follow gcc and pass the 1825 // extra bits in an SSE reg. 1826 if (Lo != X87) { 1827 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1828 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1829 return ABIArgInfo::getDirect(HighPart, 8); 1830 } 1831 break; 1832 } 1833 1834 // If a high part was specified, merge it together with the low part. It is 1835 // known to pass in the high eightbyte of the result. We do this by forming a 1836 // first class struct aggregate with the high and low part: {low, high} 1837 if (HighPart) 1838 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1839 1840 return ABIArgInfo::getDirect(ResType); 1841} 1842 1843ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1844 unsigned &neededSSE) const { 1845 X86_64ABIInfo::Class Lo, Hi; 1846 classify(Ty, 0, Lo, Hi); 1847 1848 // Check some invariants. 1849 // FIXME: Enforce these by construction. 1850 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1851 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1852 1853 neededInt = 0; 1854 neededSSE = 0; 1855 llvm::Type *ResType = 0; 1856 switch (Lo) { 1857 case NoClass: 1858 if (Hi == NoClass) 1859 return ABIArgInfo::getIgnore(); 1860 // If the low part is just padding, it takes no register, leave ResType 1861 // null. 1862 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1863 "Unknown missing lo part"); 1864 break; 1865 1866 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1867 // on the stack. 1868 case Memory: 1869 1870 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1871 // COMPLEX_X87, it is passed in memory. 1872 case X87: 1873 case ComplexX87: 1874 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1875 ++neededInt; 1876 return getIndirectResult(Ty); 1877 1878 case SSEUp: 1879 case X87Up: 1880 llvm_unreachable("Invalid classification for lo word."); 1881 1882 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1883 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1884 // and %r9 is used. 1885 case Integer: 1886 ++neededInt; 1887 1888 // Pick an 8-byte type based on the preferred type. 1889 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 1890 1891 // If we have a sign or zero extended integer, make sure to return Extend 1892 // so that the parameter gets the right LLVM IR attributes. 1893 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1894 // Treat an enum type as its underlying type. 1895 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1896 Ty = EnumTy->getDecl()->getIntegerType(); 1897 1898 if (Ty->isIntegralOrEnumerationType() && 1899 Ty->isPromotableIntegerType()) 1900 return ABIArgInfo::getExtend(); 1901 } 1902 1903 break; 1904 1905 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1906 // available SSE register is used, the registers are taken in the 1907 // order from %xmm0 to %xmm7. 1908 case SSE: { 1909 llvm::Type *IRType = CGT.ConvertType(Ty); 1910 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1911 ++neededSSE; 1912 break; 1913 } 1914 } 1915 1916 llvm::Type *HighPart = 0; 1917 switch (Hi) { 1918 // Memory was handled previously, ComplexX87 and X87 should 1919 // never occur as hi classes, and X87Up must be preceded by X87, 1920 // which is passed in memory. 1921 case Memory: 1922 case X87: 1923 case ComplexX87: 1924 llvm_unreachable("Invalid classification for hi word."); 1925 1926 case NoClass: break; 1927 1928 case Integer: 1929 ++neededInt; 1930 // Pick an 8-byte type based on the preferred type. 1931 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1932 1933 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1934 return ABIArgInfo::getDirect(HighPart, 8); 1935 break; 1936 1937 // X87Up generally doesn't occur here (long double is passed in 1938 // memory), except in situations involving unions. 1939 case X87Up: 1940 case SSE: 1941 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1942 1943 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1944 return ABIArgInfo::getDirect(HighPart, 8); 1945 1946 ++neededSSE; 1947 break; 1948 1949 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1950 // eightbyte is passed in the upper half of the last used SSE 1951 // register. This only happens when 128-bit vectors are passed. 1952 case SSEUp: 1953 assert(Lo == SSE && "Unexpected SSEUp classification"); 1954 ResType = GetByteVectorType(Ty); 1955 break; 1956 } 1957 1958 // If a high part was specified, merge it together with the low part. It is 1959 // known to pass in the high eightbyte of the result. We do this by forming a 1960 // first class struct aggregate with the high and low part: {low, high} 1961 if (HighPart) 1962 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1963 1964 return ABIArgInfo::getDirect(ResType); 1965} 1966 1967void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1968 1969 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1970 1971 // Keep track of the number of assigned registers. 1972 unsigned freeIntRegs = 6, freeSSERegs = 8; 1973 1974 // If the return value is indirect, then the hidden argument is consuming one 1975 // integer register. 1976 if (FI.getReturnInfo().isIndirect()) 1977 --freeIntRegs; 1978 1979 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1980 // get assigned (in left-to-right order) for passing as follows... 1981 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1982 it != ie; ++it) { 1983 unsigned neededInt, neededSSE; 1984 it->info = classifyArgumentType(it->type, neededInt, neededSSE); 1985 1986 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1987 // eightbyte of an argument, the whole argument is passed on the 1988 // stack. If registers have already been assigned for some 1989 // eightbytes of such an argument, the assignments get reverted. 1990 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1991 freeIntRegs -= neededInt; 1992 freeSSERegs -= neededSSE; 1993 } else { 1994 it->info = getIndirectResult(it->type); 1995 } 1996 } 1997} 1998 1999static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2000 QualType Ty, 2001 CodeGenFunction &CGF) { 2002 llvm::Value *overflow_arg_area_p = 2003 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2004 llvm::Value *overflow_arg_area = 2005 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2006 2007 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2008 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2009 // It isn't stated explicitly in the standard, but in practice we use 2010 // alignment greater than 16 where necessary. 2011 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2012 if (Align > 8) { 2013 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2014 llvm::Value *Offset = 2015 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2016 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2017 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2018 CGF.Int64Ty); 2019 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2020 overflow_arg_area = 2021 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2022 overflow_arg_area->getType(), 2023 "overflow_arg_area.align"); 2024 } 2025 2026 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2027 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2028 llvm::Value *Res = 2029 CGF.Builder.CreateBitCast(overflow_arg_area, 2030 llvm::PointerType::getUnqual(LTy)); 2031 2032 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2033 // l->overflow_arg_area + sizeof(type). 2034 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2035 // an 8 byte boundary. 2036 2037 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2038 llvm::Value *Offset = 2039 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2040 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2041 "overflow_arg_area.next"); 2042 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2043 2044 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2045 return Res; 2046} 2047 2048llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2049 CodeGenFunction &CGF) const { 2050 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 2051 2052 // Assume that va_list type is correct; should be pointer to LLVM type: 2053 // struct { 2054 // i32 gp_offset; 2055 // i32 fp_offset; 2056 // i8* overflow_arg_area; 2057 // i8* reg_save_area; 2058 // }; 2059 unsigned neededInt, neededSSE; 2060 2061 Ty = CGF.getContext().getCanonicalType(Ty); 2062 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE); 2063 2064 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2065 // in the registers. If not go to step 7. 2066 if (!neededInt && !neededSSE) 2067 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2068 2069 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2070 // general purpose registers needed to pass type and num_fp to hold 2071 // the number of floating point registers needed. 2072 2073 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2074 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2075 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2076 // 2077 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2078 // register save space). 2079 2080 llvm::Value *InRegs = 0; 2081 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2082 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2083 if (neededInt) { 2084 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2085 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2086 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2087 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2088 } 2089 2090 if (neededSSE) { 2091 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2092 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2093 llvm::Value *FitsInFP = 2094 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2095 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2096 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2097 } 2098 2099 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2100 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2101 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2102 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2103 2104 // Emit code to load the value if it was passed in registers. 2105 2106 CGF.EmitBlock(InRegBlock); 2107 2108 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2109 // an offset of l->gp_offset and/or l->fp_offset. This may require 2110 // copying to a temporary location in case the parameter is passed 2111 // in different register classes or requires an alignment greater 2112 // than 8 for general purpose registers and 16 for XMM registers. 2113 // 2114 // FIXME: This really results in shameful code when we end up needing to 2115 // collect arguments from different places; often what should result in a 2116 // simple assembling of a structure from scattered addresses has many more 2117 // loads than necessary. Can we clean this up? 2118 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2119 llvm::Value *RegAddr = 2120 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2121 "reg_save_area"); 2122 if (neededInt && neededSSE) { 2123 // FIXME: Cleanup. 2124 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2125 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2126 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2127 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2128 llvm::Type *TyLo = ST->getElementType(0); 2129 llvm::Type *TyHi = ST->getElementType(1); 2130 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2131 "Unexpected ABI info for mixed regs"); 2132 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2133 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2134 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2135 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2136 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2137 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2138 llvm::Value *V = 2139 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2140 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2141 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2142 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2143 2144 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2145 llvm::PointerType::getUnqual(LTy)); 2146 } else if (neededInt) { 2147 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2148 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2149 llvm::PointerType::getUnqual(LTy)); 2150 } else if (neededSSE == 1) { 2151 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2152 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2153 llvm::PointerType::getUnqual(LTy)); 2154 } else { 2155 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2156 // SSE registers are spaced 16 bytes apart in the register save 2157 // area, we need to collect the two eightbytes together. 2158 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2159 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2160 llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); 2161 llvm::Type *DblPtrTy = 2162 llvm::PointerType::getUnqual(DoubleTy); 2163 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2164 DoubleTy, NULL); 2165 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2166 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2167 DblPtrTy)); 2168 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2169 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2170 DblPtrTy)); 2171 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2172 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2173 llvm::PointerType::getUnqual(LTy)); 2174 } 2175 2176 // AMD64-ABI 3.5.7p5: Step 5. Set: 2177 // l->gp_offset = l->gp_offset + num_gp * 8 2178 // l->fp_offset = l->fp_offset + num_fp * 16. 2179 if (neededInt) { 2180 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2181 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2182 gp_offset_p); 2183 } 2184 if (neededSSE) { 2185 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2186 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2187 fp_offset_p); 2188 } 2189 CGF.EmitBranch(ContBlock); 2190 2191 // Emit code to load the value if it was passed in memory. 2192 2193 CGF.EmitBlock(InMemBlock); 2194 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2195 2196 // Return the appropriate result. 2197 2198 CGF.EmitBlock(ContBlock); 2199 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2200 "vaarg.addr"); 2201 ResAddr->addIncoming(RegAddr, InRegBlock); 2202 ResAddr->addIncoming(MemAddr, InMemBlock); 2203 return ResAddr; 2204} 2205 2206ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2207 2208 if (Ty->isVoidType()) 2209 return ABIArgInfo::getIgnore(); 2210 2211 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2212 Ty = EnumTy->getDecl()->getIntegerType(); 2213 2214 uint64_t Size = getContext().getTypeSize(Ty); 2215 2216 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2217 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2218 RT->getDecl()->hasFlexibleArrayMember()) 2219 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2220 2221 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2222 if (Size == 128 && 2223 getContext().getTargetInfo().getTriple().getOS() == llvm::Triple::MinGW32) 2224 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2225 Size)); 2226 2227 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2228 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2229 if (Size <= 64 && 2230 (Size & (Size - 1)) == 0) 2231 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2232 Size)); 2233 2234 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2235 } 2236 2237 if (Ty->isPromotableIntegerType()) 2238 return ABIArgInfo::getExtend(); 2239 2240 return ABIArgInfo::getDirect(); 2241} 2242 2243void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2244 2245 QualType RetTy = FI.getReturnType(); 2246 FI.getReturnInfo() = classify(RetTy); 2247 2248 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2249 it != ie; ++it) 2250 it->info = classify(it->type); 2251} 2252 2253llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2254 CodeGenFunction &CGF) const { 2255 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2256 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2257 2258 CGBuilderTy &Builder = CGF.Builder; 2259 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2260 "ap"); 2261 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2262 llvm::Type *PTy = 2263 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2264 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2265 2266 uint64_t Offset = 2267 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2268 llvm::Value *NextAddr = 2269 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2270 "ap.next"); 2271 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2272 2273 return AddrTyped; 2274} 2275 2276// PowerPC-32 2277 2278namespace { 2279class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2280public: 2281 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2282 2283 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2284 // This is recovered from gcc output. 2285 return 1; // r1 is the dedicated stack pointer 2286 } 2287 2288 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2289 llvm::Value *Address) const; 2290}; 2291 2292} 2293 2294bool 2295PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2296 llvm::Value *Address) const { 2297 // This is calculated from the LLVM and GCC tables and verified 2298 // against gcc output. AFAIK all ABIs use the same encoding. 2299 2300 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2301 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2302 2303 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2304 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2305 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2306 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2307 2308 // 0-31: r0-31, the 4-byte general-purpose registers 2309 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2310 2311 // 32-63: fp0-31, the 8-byte floating-point registers 2312 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2313 2314 // 64-76 are various 4-byte special-purpose registers: 2315 // 64: mq 2316 // 65: lr 2317 // 66: ctr 2318 // 67: ap 2319 // 68-75 cr0-7 2320 // 76: xer 2321 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2322 2323 // 77-108: v0-31, the 16-byte vector registers 2324 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2325 2326 // 109: vrsave 2327 // 110: vscr 2328 // 111: spe_acc 2329 // 112: spefscr 2330 // 113: sfp 2331 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2332 2333 return false; 2334} 2335 2336 2337//===----------------------------------------------------------------------===// 2338// ARM ABI Implementation 2339//===----------------------------------------------------------------------===// 2340 2341namespace { 2342 2343class ARMABIInfo : public ABIInfo { 2344public: 2345 enum ABIKind { 2346 APCS = 0, 2347 AAPCS = 1, 2348 AAPCS_VFP 2349 }; 2350 2351private: 2352 ABIKind Kind; 2353 2354public: 2355 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2356 2357 bool isEABI() const { 2358 StringRef Env = getContext().getTargetInfo().getTriple().getEnvironmentName(); 2359 return (Env == "gnueabi" || Env == "eabi"); 2360 } 2361 2362private: 2363 ABIKind getABIKind() const { return Kind; } 2364 2365 ABIArgInfo classifyReturnType(QualType RetTy) const; 2366 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2367 2368 virtual void computeInfo(CGFunctionInfo &FI) const; 2369 2370 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2371 CodeGenFunction &CGF) const; 2372}; 2373 2374class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2375public: 2376 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2377 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2378 2379 const ARMABIInfo &getABIInfo() const { 2380 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2381 } 2382 2383 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2384 return 13; 2385 } 2386 2387 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2388 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2389 } 2390 2391 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2392 llvm::Value *Address) const { 2393 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2394 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2395 2396 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2397 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2398 2399 // 0-15 are the 16 integer registers. 2400 AssignToArrayRange(Builder, Address, Four8, 0, 15); 2401 2402 return false; 2403 } 2404 2405 unsigned getSizeOfUnwindException() const { 2406 if (getABIInfo().isEABI()) return 88; 2407 return TargetCodeGenInfo::getSizeOfUnwindException(); 2408 } 2409}; 2410 2411} 2412 2413void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2414 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2415 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2416 it != ie; ++it) 2417 it->info = classifyArgumentType(it->type); 2418 2419 // Always honor user-specified calling convention. 2420 if (FI.getCallingConvention() != llvm::CallingConv::C) 2421 return; 2422 2423 // Calling convention as default by an ABI. 2424 llvm::CallingConv::ID DefaultCC; 2425 if (isEABI()) 2426 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2427 else 2428 DefaultCC = llvm::CallingConv::ARM_APCS; 2429 2430 // If user did not ask for specific calling convention explicitly (e.g. via 2431 // pcs attribute), set effective calling convention if it's different than ABI 2432 // default. 2433 switch (getABIKind()) { 2434 case APCS: 2435 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2436 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2437 break; 2438 case AAPCS: 2439 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2440 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2441 break; 2442 case AAPCS_VFP: 2443 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2444 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2445 break; 2446 } 2447} 2448 2449/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2450/// aggregate. If HAMembers is non-null, the number of base elements 2451/// contained in the type is returned through it; this is used for the 2452/// recursive calls that check aggregate component types. 2453static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2454 ASTContext &Context, 2455 uint64_t *HAMembers = 0) { 2456 uint64_t Members; 2457 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2458 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2459 return false; 2460 Members *= AT->getSize().getZExtValue(); 2461 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2462 const RecordDecl *RD = RT->getDecl(); 2463 if (RD->isUnion() || RD->hasFlexibleArrayMember()) 2464 return false; 2465 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2466 if (!CXXRD->isAggregate()) 2467 return false; 2468 } 2469 Members = 0; 2470 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2471 i != e; ++i) { 2472 const FieldDecl *FD = *i; 2473 uint64_t FldMembers; 2474 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2475 return false; 2476 Members += FldMembers; 2477 } 2478 } else { 2479 Members = 1; 2480 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2481 Members = 2; 2482 Ty = CT->getElementType(); 2483 } 2484 2485 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2486 // double, or 64-bit or 128-bit vectors. 2487 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2488 if (BT->getKind() != BuiltinType::Float && 2489 BT->getKind() != BuiltinType::Double) 2490 return false; 2491 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2492 unsigned VecSize = Context.getTypeSize(VT); 2493 if (VecSize != 64 && VecSize != 128) 2494 return false; 2495 } else { 2496 return false; 2497 } 2498 2499 // The base type must be the same for all members. Vector types of the 2500 // same total size are treated as being equivalent here. 2501 const Type *TyPtr = Ty.getTypePtr(); 2502 if (!Base) 2503 Base = TyPtr; 2504 if (Base != TyPtr && 2505 (!Base->isVectorType() || !TyPtr->isVectorType() || 2506 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2507 return false; 2508 } 2509 2510 // Homogeneous Aggregates can have at most 4 members of the base type. 2511 if (HAMembers) 2512 *HAMembers = Members; 2513 return (Members <= 4); 2514} 2515 2516ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2517 if (!isAggregateTypeForABI(Ty)) { 2518 // Treat an enum type as its underlying type. 2519 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2520 Ty = EnumTy->getDecl()->getIntegerType(); 2521 2522 return (Ty->isPromotableIntegerType() ? 2523 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2524 } 2525 2526 // Ignore empty records. 2527 if (isEmptyRecord(getContext(), Ty, true)) 2528 return ABIArgInfo::getIgnore(); 2529 2530 // Structures with either a non-trivial destructor or a non-trivial 2531 // copy constructor are always indirect. 2532 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2533 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2534 2535 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 2536 // Homogeneous Aggregates need to be expanded. 2537 const Type *Base = 0; 2538 if (isHomogeneousAggregate(Ty, Base, getContext())) 2539 return ABIArgInfo::getExpand(); 2540 } 2541 2542 // Otherwise, pass by coercing to a structure of the appropriate size. 2543 // 2544 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 2545 // backend doesn't support byval. 2546 // FIXME: This doesn't handle alignment > 64 bits. 2547 llvm::Type* ElemTy; 2548 unsigned SizeRegs; 2549 if (getContext().getTypeAlign(Ty) > 32) { 2550 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2551 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2552 } else { 2553 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2554 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2555 } 2556 2557 llvm::Type *STy = 2558 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2559 return ABIArgInfo::getDirect(STy); 2560} 2561 2562static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2563 llvm::LLVMContext &VMContext) { 2564 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2565 // is called integer-like if its size is less than or equal to one word, and 2566 // the offset of each of its addressable sub-fields is zero. 2567 2568 uint64_t Size = Context.getTypeSize(Ty); 2569 2570 // Check that the type fits in a word. 2571 if (Size > 32) 2572 return false; 2573 2574 // FIXME: Handle vector types! 2575 if (Ty->isVectorType()) 2576 return false; 2577 2578 // Float types are never treated as "integer like". 2579 if (Ty->isRealFloatingType()) 2580 return false; 2581 2582 // If this is a builtin or pointer type then it is ok. 2583 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2584 return true; 2585 2586 // Small complex integer types are "integer like". 2587 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2588 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2589 2590 // Single element and zero sized arrays should be allowed, by the definition 2591 // above, but they are not. 2592 2593 // Otherwise, it must be a record type. 2594 const RecordType *RT = Ty->getAs<RecordType>(); 2595 if (!RT) return false; 2596 2597 // Ignore records with flexible arrays. 2598 const RecordDecl *RD = RT->getDecl(); 2599 if (RD->hasFlexibleArrayMember()) 2600 return false; 2601 2602 // Check that all sub-fields are at offset 0, and are themselves "integer 2603 // like". 2604 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2605 2606 bool HadField = false; 2607 unsigned idx = 0; 2608 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2609 i != e; ++i, ++idx) { 2610 const FieldDecl *FD = *i; 2611 2612 // Bit-fields are not addressable, we only need to verify they are "integer 2613 // like". We still have to disallow a subsequent non-bitfield, for example: 2614 // struct { int : 0; int x } 2615 // is non-integer like according to gcc. 2616 if (FD->isBitField()) { 2617 if (!RD->isUnion()) 2618 HadField = true; 2619 2620 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2621 return false; 2622 2623 continue; 2624 } 2625 2626 // Check if this field is at offset 0. 2627 if (Layout.getFieldOffset(idx) != 0) 2628 return false; 2629 2630 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2631 return false; 2632 2633 // Only allow at most one field in a structure. This doesn't match the 2634 // wording above, but follows gcc in situations with a field following an 2635 // empty structure. 2636 if (!RD->isUnion()) { 2637 if (HadField) 2638 return false; 2639 2640 HadField = true; 2641 } 2642 } 2643 2644 return true; 2645} 2646 2647ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2648 if (RetTy->isVoidType()) 2649 return ABIArgInfo::getIgnore(); 2650 2651 // Large vector types should be returned via memory. 2652 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2653 return ABIArgInfo::getIndirect(0); 2654 2655 if (!isAggregateTypeForABI(RetTy)) { 2656 // Treat an enum type as its underlying type. 2657 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2658 RetTy = EnumTy->getDecl()->getIntegerType(); 2659 2660 return (RetTy->isPromotableIntegerType() ? 2661 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2662 } 2663 2664 // Structures with either a non-trivial destructor or a non-trivial 2665 // copy constructor are always indirect. 2666 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2667 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2668 2669 // Are we following APCS? 2670 if (getABIKind() == APCS) { 2671 if (isEmptyRecord(getContext(), RetTy, false)) 2672 return ABIArgInfo::getIgnore(); 2673 2674 // Complex types are all returned as packed integers. 2675 // 2676 // FIXME: Consider using 2 x vector types if the back end handles them 2677 // correctly. 2678 if (RetTy->isAnyComplexType()) 2679 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2680 getContext().getTypeSize(RetTy))); 2681 2682 // Integer like structures are returned in r0. 2683 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2684 // Return in the smallest viable integer type. 2685 uint64_t Size = getContext().getTypeSize(RetTy); 2686 if (Size <= 8) 2687 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2688 if (Size <= 16) 2689 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2690 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2691 } 2692 2693 // Otherwise return in memory. 2694 return ABIArgInfo::getIndirect(0); 2695 } 2696 2697 // Otherwise this is an AAPCS variant. 2698 2699 if (isEmptyRecord(getContext(), RetTy, true)) 2700 return ABIArgInfo::getIgnore(); 2701 2702 // Check for homogeneous aggregates with AAPCS-VFP. 2703 if (getABIKind() == AAPCS_VFP) { 2704 const Type *Base = 0; 2705 if (isHomogeneousAggregate(RetTy, Base, getContext())) 2706 // Homogeneous Aggregates are returned directly. 2707 return ABIArgInfo::getDirect(); 2708 } 2709 2710 // Aggregates <= 4 bytes are returned in r0; other aggregates 2711 // are returned indirectly. 2712 uint64_t Size = getContext().getTypeSize(RetTy); 2713 if (Size <= 32) { 2714 // Return in the smallest viable integer type. 2715 if (Size <= 8) 2716 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2717 if (Size <= 16) 2718 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2719 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2720 } 2721 2722 return ABIArgInfo::getIndirect(0); 2723} 2724 2725llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2726 CodeGenFunction &CGF) const { 2727 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2728 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2729 2730 CGBuilderTy &Builder = CGF.Builder; 2731 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2732 "ap"); 2733 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2734 // Handle address alignment for type alignment > 32 bits 2735 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2736 if (TyAlign > 4) { 2737 assert((TyAlign & (TyAlign - 1)) == 0 && 2738 "Alignment is not power of 2!"); 2739 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 2740 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 2741 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 2742 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2743 } 2744 llvm::Type *PTy = 2745 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2746 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2747 2748 uint64_t Offset = 2749 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2750 llvm::Value *NextAddr = 2751 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2752 "ap.next"); 2753 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2754 2755 return AddrTyped; 2756} 2757 2758//===----------------------------------------------------------------------===// 2759// PTX ABI Implementation 2760//===----------------------------------------------------------------------===// 2761 2762namespace { 2763 2764class PTXABIInfo : public ABIInfo { 2765public: 2766 PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2767 2768 ABIArgInfo classifyReturnType(QualType RetTy) const; 2769 ABIArgInfo classifyArgumentType(QualType Ty) const; 2770 2771 virtual void computeInfo(CGFunctionInfo &FI) const; 2772 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2773 CodeGenFunction &CFG) const; 2774}; 2775 2776class PTXTargetCodeGenInfo : public TargetCodeGenInfo { 2777public: 2778 PTXTargetCodeGenInfo(CodeGenTypes &CGT) 2779 : TargetCodeGenInfo(new PTXABIInfo(CGT)) {} 2780 2781 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2782 CodeGen::CodeGenModule &M) const; 2783}; 2784 2785ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const { 2786 if (RetTy->isVoidType()) 2787 return ABIArgInfo::getIgnore(); 2788 if (isAggregateTypeForABI(RetTy)) 2789 return ABIArgInfo::getIndirect(0); 2790 return ABIArgInfo::getDirect(); 2791} 2792 2793ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const { 2794 if (isAggregateTypeForABI(Ty)) 2795 return ABIArgInfo::getIndirect(0); 2796 2797 return ABIArgInfo::getDirect(); 2798} 2799 2800void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 2801 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2802 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2803 it != ie; ++it) 2804 it->info = classifyArgumentType(it->type); 2805 2806 // Always honor user-specified calling convention. 2807 if (FI.getCallingConvention() != llvm::CallingConv::C) 2808 return; 2809 2810 // Calling convention as default by an ABI. 2811 llvm::CallingConv::ID DefaultCC; 2812 const LangOptions &LangOpts = getContext().getLangOptions(); 2813 if (LangOpts.OpenCL || LangOpts.CUDA) { 2814 // If we are in OpenCL or CUDA mode, then default to device functions 2815 DefaultCC = llvm::CallingConv::PTX_Device; 2816 } else { 2817 // If we are in standard C/C++ mode, use the triple to decide on the default 2818 StringRef Env = 2819 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2820 if (Env == "device") 2821 DefaultCC = llvm::CallingConv::PTX_Device; 2822 else 2823 DefaultCC = llvm::CallingConv::PTX_Kernel; 2824 } 2825 FI.setEffectiveCallingConvention(DefaultCC); 2826 2827} 2828 2829llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2830 CodeGenFunction &CFG) const { 2831 llvm_unreachable("PTX does not support varargs"); 2832 return 0; 2833} 2834 2835void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2836 llvm::GlobalValue *GV, 2837 CodeGen::CodeGenModule &M) const{ 2838 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2839 if (!FD) return; 2840 2841 llvm::Function *F = cast<llvm::Function>(GV); 2842 2843 // Perform special handling in OpenCL mode 2844 if (M.getLangOptions().OpenCL) { 2845 // Use OpenCL function attributes to set proper calling conventions 2846 // By default, all functions are device functions 2847 if (FD->hasAttr<OpenCLKernelAttr>()) { 2848 // OpenCL __kernel functions get a kernel calling convention 2849 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2850 // And kernel functions are not subject to inlining 2851 F->addFnAttr(llvm::Attribute::NoInline); 2852 } 2853 } 2854 2855 // Perform special handling in CUDA mode. 2856 if (M.getLangOptions().CUDA) { 2857 // CUDA __global__ functions get a kernel calling convention. Since 2858 // __global__ functions cannot be called from the device, we do not 2859 // need to set the noinline attribute. 2860 if (FD->getAttr<CUDAGlobalAttr>()) 2861 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2862 } 2863} 2864 2865} 2866 2867//===----------------------------------------------------------------------===// 2868// MBlaze ABI Implementation 2869//===----------------------------------------------------------------------===// 2870 2871namespace { 2872 2873class MBlazeABIInfo : public ABIInfo { 2874public: 2875 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2876 2877 bool isPromotableIntegerType(QualType Ty) const; 2878 2879 ABIArgInfo classifyReturnType(QualType RetTy) const; 2880 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2881 2882 virtual void computeInfo(CGFunctionInfo &FI) const { 2883 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2884 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2885 it != ie; ++it) 2886 it->info = classifyArgumentType(it->type); 2887 } 2888 2889 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2890 CodeGenFunction &CGF) const; 2891}; 2892 2893class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 2894public: 2895 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 2896 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 2897 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2898 CodeGen::CodeGenModule &M) const; 2899}; 2900 2901} 2902 2903bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 2904 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 2905 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2906 switch (BT->getKind()) { 2907 case BuiltinType::Bool: 2908 case BuiltinType::Char_S: 2909 case BuiltinType::Char_U: 2910 case BuiltinType::SChar: 2911 case BuiltinType::UChar: 2912 case BuiltinType::Short: 2913 case BuiltinType::UShort: 2914 return true; 2915 default: 2916 return false; 2917 } 2918 return false; 2919} 2920 2921llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2922 CodeGenFunction &CGF) const { 2923 // FIXME: Implement 2924 return 0; 2925} 2926 2927 2928ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 2929 if (RetTy->isVoidType()) 2930 return ABIArgInfo::getIgnore(); 2931 if (isAggregateTypeForABI(RetTy)) 2932 return ABIArgInfo::getIndirect(0); 2933 2934 return (isPromotableIntegerType(RetTy) ? 2935 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2936} 2937 2938ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 2939 if (isAggregateTypeForABI(Ty)) 2940 return ABIArgInfo::getIndirect(0); 2941 2942 return (isPromotableIntegerType(Ty) ? 2943 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2944} 2945 2946void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2947 llvm::GlobalValue *GV, 2948 CodeGen::CodeGenModule &M) 2949 const { 2950 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2951 if (!FD) return; 2952 2953 llvm::CallingConv::ID CC = llvm::CallingConv::C; 2954 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 2955 CC = llvm::CallingConv::MBLAZE_INTR; 2956 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 2957 CC = llvm::CallingConv::MBLAZE_SVOL; 2958 2959 if (CC != llvm::CallingConv::C) { 2960 // Handle 'interrupt_handler' attribute: 2961 llvm::Function *F = cast<llvm::Function>(GV); 2962 2963 // Step 1: Set ISR calling convention. 2964 F->setCallingConv(CC); 2965 2966 // Step 2: Add attributes goodness. 2967 F->addFnAttr(llvm::Attribute::NoInline); 2968 } 2969 2970 // Step 3: Emit _interrupt_handler alias. 2971 if (CC == llvm::CallingConv::MBLAZE_INTR) 2972 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2973 "_interrupt_handler", GV, &M.getModule()); 2974} 2975 2976 2977//===----------------------------------------------------------------------===// 2978// MSP430 ABI Implementation 2979//===----------------------------------------------------------------------===// 2980 2981namespace { 2982 2983class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 2984public: 2985 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 2986 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2987 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2988 CodeGen::CodeGenModule &M) const; 2989}; 2990 2991} 2992 2993void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2994 llvm::GlobalValue *GV, 2995 CodeGen::CodeGenModule &M) const { 2996 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2997 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 2998 // Handle 'interrupt' attribute: 2999 llvm::Function *F = cast<llvm::Function>(GV); 3000 3001 // Step 1: Set ISR calling convention. 3002 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3003 3004 // Step 2: Add attributes goodness. 3005 F->addFnAttr(llvm::Attribute::NoInline); 3006 3007 // Step 3: Emit ISR vector alias. 3008 unsigned Num = attr->getNumber() + 0xffe0; 3009 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3010 "vector_" + Twine::utohexstr(Num), 3011 GV, &M.getModule()); 3012 } 3013 } 3014} 3015 3016//===----------------------------------------------------------------------===// 3017// MIPS ABI Implementation. This works for both little-endian and 3018// big-endian variants. 3019//===----------------------------------------------------------------------===// 3020 3021namespace { 3022class MipsABIInfo : public ABIInfo { 3023 bool IsO32; 3024 unsigned MinABIStackAlignInBytes; 3025 llvm::Type* HandleStructTy(QualType Ty) const; 3026public: 3027 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3028 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {} 3029 3030 ABIArgInfo classifyReturnType(QualType RetTy) const; 3031 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3032 virtual void computeInfo(CGFunctionInfo &FI) const; 3033 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3034 CodeGenFunction &CGF) const; 3035}; 3036 3037class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3038 unsigned SizeOfUnwindException; 3039public: 3040 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3041 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3042 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3043 3044 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3045 return 29; 3046 } 3047 3048 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3049 llvm::Value *Address) const; 3050 3051 unsigned getSizeOfUnwindException() const { 3052 return SizeOfUnwindException; 3053 } 3054}; 3055} 3056 3057// In N32/64, an aligned double precision floating point field is passed in 3058// a register. 3059llvm::Type* MipsABIInfo::HandleStructTy(QualType Ty) const { 3060 if (IsO32) 3061 return 0; 3062 3063 const RecordType *RT = Ty->getAsStructureType(); 3064 3065 if (!RT) 3066 return 0; 3067 3068 const RecordDecl *RD = RT->getDecl(); 3069 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3070 uint64_t StructSize = getContext().getTypeSize(Ty); 3071 assert(!(StructSize % 8) && "Size of structure must be multiple of 8."); 3072 3073 SmallVector<llvm::Type*, 8> ArgList; 3074 uint64_t LastOffset = 0; 3075 unsigned idx = 0; 3076 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3077 3078 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3079 i != e; ++i, ++idx) { 3080 const QualType Ty = (*i)->getType(); 3081 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3082 3083 if (!BT || BT->getKind() != BuiltinType::Double) 3084 continue; 3085 3086 uint64_t Offset = Layout.getFieldOffset(idx); 3087 if (Offset % 64) // Ignore doubles that are not aligned. 3088 continue; 3089 3090 // Add ((Offset - LastOffset) / 64) args of type i64. 3091 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3092 ArgList.push_back(I64); 3093 3094 // Add double type. 3095 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3096 LastOffset = Offset + 64; 3097 } 3098 3099 // This structure doesn't have an aligned double field. 3100 if (!LastOffset) 3101 return 0; 3102 3103 // Add ((StructSize - LastOffset) / 64) args of type i64. 3104 for (unsigned N = (StructSize - LastOffset) / 64; N; --N) 3105 ArgList.push_back(I64); 3106 3107 // If the size of the remainder is not zero, add one more integer type to 3108 // ArgList. 3109 unsigned R = (StructSize - LastOffset) % 64; 3110 if (R) 3111 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3112 3113 return llvm::StructType::get(getVMContext(), ArgList); 3114} 3115 3116ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const { 3117 if (isAggregateTypeForABI(Ty)) { 3118 // Ignore empty aggregates. 3119 if (getContext().getTypeSize(Ty) == 0) 3120 return ABIArgInfo::getIgnore(); 3121 3122 // Records with non trivial destructors/constructors should not be passed 3123 // by value. 3124 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3125 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3126 3127 llvm::Type *ResType; 3128 if ((ResType = HandleStructTy(Ty))) 3129 return ABIArgInfo::getDirect(ResType); 3130 3131 return ABIArgInfo::getIndirect(0); 3132 } 3133 3134 // Treat an enum type as its underlying type. 3135 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3136 Ty = EnumTy->getDecl()->getIntegerType(); 3137 3138 return (Ty->isPromotableIntegerType() ? 3139 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3140} 3141 3142ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3143 if (RetTy->isVoidType()) 3144 return ABIArgInfo::getIgnore(); 3145 3146 if (isAggregateTypeForABI(RetTy)) { 3147 if ((IsO32 && RetTy->isAnyComplexType()) || 3148 (!IsO32 && (getContext().getTypeSize(RetTy) <= 128))) 3149 return ABIArgInfo::getDirect(); 3150 3151 return ABIArgInfo::getIndirect(0); 3152 } 3153 3154 // Treat an enum type as its underlying type. 3155 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3156 RetTy = EnumTy->getDecl()->getIntegerType(); 3157 3158 return (RetTy->isPromotableIntegerType() ? 3159 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3160} 3161 3162void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3163 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3164 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3165 it != ie; ++it) 3166 it->info = classifyArgumentType(it->type); 3167} 3168 3169llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3170 CodeGenFunction &CGF) const { 3171 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 3172 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 3173 3174 CGBuilderTy &Builder = CGF.Builder; 3175 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3176 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3177 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 3178 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3179 llvm::Value *AddrTyped; 3180 3181 if (TypeAlign > MinABIStackAlignInBytes) { 3182 llvm::Value *AddrAsInt32 = CGF.Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3183 llvm::Value *Inc = llvm::ConstantInt::get(CGF.Int32Ty, TypeAlign - 1); 3184 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -TypeAlign); 3185 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt32, Inc); 3186 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3187 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3188 } 3189 else 3190 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3191 3192 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3193 TypeAlign = std::max(TypeAlign, MinABIStackAlignInBytes); 3194 uint64_t Offset = 3195 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3196 llvm::Value *NextAddr = 3197 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3198 "ap.next"); 3199 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3200 3201 return AddrTyped; 3202} 3203 3204bool 3205MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3206 llvm::Value *Address) const { 3207 // This information comes from gcc's implementation, which seems to 3208 // as canonical as it gets. 3209 3210 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3211 llvm::LLVMContext &Context = CGF.getLLVMContext(); 3212 3213 // Everything on MIPS is 4 bytes. Double-precision FP registers 3214 // are aliased to pairs of single-precision FP registers. 3215 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 3216 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3217 3218 // 0-31 are the general purpose registers, $0 - $31. 3219 // 32-63 are the floating-point registers, $f0 - $f31. 3220 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3221 // 66 is the (notional, I think) register for signal-handler return. 3222 AssignToArrayRange(Builder, Address, Four8, 0, 65); 3223 3224 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3225 // They are one bit wide and ignored here. 3226 3227 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3228 // (coprocessor 1 is the FP unit) 3229 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3230 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3231 // 176-181 are the DSP accumulator registers. 3232 AssignToArrayRange(Builder, Address, Four8, 80, 181); 3233 3234 return false; 3235} 3236 3237//===----------------------------------------------------------------------===// 3238// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3239// Currently subclassed only to implement custom OpenCL C function attribute 3240// handling. 3241//===----------------------------------------------------------------------===// 3242 3243namespace { 3244 3245class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3246public: 3247 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3248 : DefaultTargetCodeGenInfo(CGT) {} 3249 3250 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3251 CodeGen::CodeGenModule &M) const; 3252}; 3253 3254void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3255 llvm::GlobalValue *GV, 3256 CodeGen::CodeGenModule &M) const { 3257 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3258 if (!FD) return; 3259 3260 llvm::Function *F = cast<llvm::Function>(GV); 3261 3262 if (M.getLangOptions().OpenCL) { 3263 if (FD->hasAttr<OpenCLKernelAttr>()) { 3264 // OpenCL C Kernel functions are not subject to inlining 3265 F->addFnAttr(llvm::Attribute::NoInline); 3266 3267 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3268 3269 // Convert the reqd_work_group_size() attributes to metadata. 3270 llvm::LLVMContext &Context = F->getContext(); 3271 llvm::NamedMDNode *OpenCLMetadata = 3272 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3273 3274 SmallVector<llvm::Value*, 5> Operands; 3275 Operands.push_back(F); 3276 3277 Operands.push_back(llvm::Constant::getIntegerValue( 3278 llvm::Type::getInt32Ty(Context), 3279 llvm::APInt( 3280 32, 3281 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3282 Operands.push_back(llvm::Constant::getIntegerValue( 3283 llvm::Type::getInt32Ty(Context), 3284 llvm::APInt( 3285 32, 3286 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3287 Operands.push_back(llvm::Constant::getIntegerValue( 3288 llvm::Type::getInt32Ty(Context), 3289 llvm::APInt( 3290 32, 3291 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3292 3293 // Add a boolean constant operand for "required" (true) or "hint" (false) 3294 // for implementing the work_group_size_hint attr later. Currently 3295 // always true as the hint is not yet implemented. 3296 Operands.push_back(llvm::ConstantInt::getTrue(llvm::Type::getInt1Ty(Context))); 3297 3298 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3299 } 3300 } 3301 } 3302} 3303 3304} 3305 3306const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3307 if (TheTargetCodeGenInfo) 3308 return *TheTargetCodeGenInfo; 3309 3310 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 3311 switch (Triple.getArch()) { 3312 default: 3313 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3314 3315 case llvm::Triple::mips: 3316 case llvm::Triple::mipsel: 3317 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 3318 3319 case llvm::Triple::mips64: 3320 case llvm::Triple::mips64el: 3321 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 3322 3323 case llvm::Triple::arm: 3324 case llvm::Triple::thumb: 3325 { 3326 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3327 3328 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 3329 Kind = ARMABIInfo::APCS; 3330 else if (CodeGenOpts.FloatABI == "hard") 3331 Kind = ARMABIInfo::AAPCS_VFP; 3332 3333 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 3334 } 3335 3336 case llvm::Triple::ppc: 3337 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 3338 3339 case llvm::Triple::ptx32: 3340 case llvm::Triple::ptx64: 3341 return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types)); 3342 3343 case llvm::Triple::mblaze: 3344 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 3345 3346 case llvm::Triple::msp430: 3347 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 3348 3349 case llvm::Triple::tce: 3350 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 3351 3352 case llvm::Triple::x86: { 3353 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 3354 3355 if (Triple.isOSDarwin()) 3356 return *(TheTargetCodeGenInfo = 3357 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX)); 3358 3359 switch (Triple.getOS()) { 3360 case llvm::Triple::Cygwin: 3361 case llvm::Triple::MinGW32: 3362 case llvm::Triple::AuroraUX: 3363 case llvm::Triple::DragonFly: 3364 case llvm::Triple::FreeBSD: 3365 case llvm::Triple::OpenBSD: 3366 case llvm::Triple::NetBSD: 3367 return *(TheTargetCodeGenInfo = 3368 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX)); 3369 3370 default: 3371 return *(TheTargetCodeGenInfo = 3372 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX)); 3373 } 3374 } 3375 3376 case llvm::Triple::x86_64: 3377 switch (Triple.getOS()) { 3378 case llvm::Triple::Win32: 3379 case llvm::Triple::MinGW32: 3380 case llvm::Triple::Cygwin: 3381 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 3382 default: 3383 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types)); 3384 } 3385 } 3386} 3387