TargetInfo.cpp revision 44f0fd2804e9952a8dbf85bb60ee3501aa9f5ee7
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "TargetInfo.h" 16#include "ABIInfo.h" 17#include "CodeGenFunction.h" 18#include "clang/AST/RecordLayout.h" 19#include "llvm/Type.h" 20#include "llvm/Target/TargetData.h" 21#include "llvm/ADT/StringExtras.h" 22#include "llvm/ADT/Triple.h" 23#include "llvm/Support/raw_ostream.h" 24using namespace clang; 25using namespace CodeGen; 26 27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37} 38 39ABIInfo::~ABIInfo() {} 40 41ASTContext &ABIInfo::getContext() const { 42 return CGT.getContext(); 43} 44 45llvm::LLVMContext &ABIInfo::getVMContext() const { 46 return CGT.getLLVMContext(); 47} 48 49const llvm::TargetData &ABIInfo::getTargetData() const { 50 return CGT.getTargetData(); 51} 52 53 54void ABIArgInfo::dump() const { 55 llvm::raw_ostream &OS = llvm::errs(); 56 OS << "(ABIArgInfo Kind="; 57 switch (TheKind) { 58 case Direct: 59 OS << "Direct"; 60 break; 61 case Extend: 62 OS << "Extend"; 63 break; 64 case Ignore: 65 OS << "Ignore"; 66 break; 67 case Coerce: 68 OS << "Coerce Type="; 69 getCoerceToType()->print(OS); 70 break; 71 case Indirect: 72 OS << "Indirect Align=" << getIndirectAlign() 73 << " Byal=" << getIndirectByVal(); 74 break; 75 case Expand: 76 OS << "Expand"; 77 break; 78 } 79 OS << ")\n"; 80} 81 82TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 83 84static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 85 86/// isEmptyField - Return true iff a the field is "empty", that is it 87/// is an unnamed bit-field or an (array of) empty record(s). 88static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 89 bool AllowArrays) { 90 if (FD->isUnnamedBitfield()) 91 return true; 92 93 QualType FT = FD->getType(); 94 95 // Constant arrays of empty records count as empty, strip them off. 96 if (AllowArrays) 97 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 98 FT = AT->getElementType(); 99 100 const RecordType *RT = FT->getAs<RecordType>(); 101 if (!RT) 102 return false; 103 104 // C++ record fields are never empty, at least in the Itanium ABI. 105 // 106 // FIXME: We should use a predicate for whether this behavior is true in the 107 // current ABI. 108 if (isa<CXXRecordDecl>(RT->getDecl())) 109 return false; 110 111 return isEmptyRecord(Context, FT, AllowArrays); 112} 113 114/// isEmptyRecord - Return true iff a structure contains only empty 115/// fields. Note that a structure with a flexible array member is not 116/// considered empty. 117static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 118 const RecordType *RT = T->getAs<RecordType>(); 119 if (!RT) 120 return 0; 121 const RecordDecl *RD = RT->getDecl(); 122 if (RD->hasFlexibleArrayMember()) 123 return false; 124 125 // If this is a C++ record, check the bases first. 126 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 127 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 128 e = CXXRD->bases_end(); i != e; ++i) 129 if (!isEmptyRecord(Context, i->getType(), true)) 130 return false; 131 132 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 133 i != e; ++i) 134 if (!isEmptyField(Context, *i, AllowArrays)) 135 return false; 136 return true; 137} 138 139/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 140/// a non-trivial destructor or a non-trivial copy constructor. 141static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 142 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 143 if (!RD) 144 return false; 145 146 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 147} 148 149/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 150/// a record type with either a non-trivial destructor or a non-trivial copy 151/// constructor. 152static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 153 const RecordType *RT = T->getAs<RecordType>(); 154 if (!RT) 155 return false; 156 157 return hasNonTrivialDestructorOrCopyConstructor(RT); 158} 159 160/// isSingleElementStruct - Determine if a structure is a "single 161/// element struct", i.e. it has exactly one non-empty field or 162/// exactly one field which is itself a single element 163/// struct. Structures with flexible array members are never 164/// considered single element structs. 165/// 166/// \return The field declaration for the single non-empty field, if 167/// it exists. 168static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 169 const RecordType *RT = T->getAsStructureType(); 170 if (!RT) 171 return 0; 172 173 const RecordDecl *RD = RT->getDecl(); 174 if (RD->hasFlexibleArrayMember()) 175 return 0; 176 177 const Type *Found = 0; 178 179 // If this is a C++ record, check the bases first. 180 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 181 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 182 e = CXXRD->bases_end(); i != e; ++i) { 183 // Ignore empty records. 184 if (isEmptyRecord(Context, i->getType(), true)) 185 continue; 186 187 // If we already found an element then this isn't a single-element struct. 188 if (Found) 189 return 0; 190 191 // If this is non-empty and not a single element struct, the composite 192 // cannot be a single element struct. 193 Found = isSingleElementStruct(i->getType(), Context); 194 if (!Found) 195 return 0; 196 } 197 } 198 199 // Check for single element. 200 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 201 i != e; ++i) { 202 const FieldDecl *FD = *i; 203 QualType FT = FD->getType(); 204 205 // Ignore empty fields. 206 if (isEmptyField(Context, FD, true)) 207 continue; 208 209 // If we already found an element then this isn't a single-element 210 // struct. 211 if (Found) 212 return 0; 213 214 // Treat single element arrays as the element. 215 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 216 if (AT->getSize().getZExtValue() != 1) 217 break; 218 FT = AT->getElementType(); 219 } 220 221 if (!CodeGenFunction::hasAggregateLLVMType(FT)) { 222 Found = FT.getTypePtr(); 223 } else { 224 Found = isSingleElementStruct(FT, Context); 225 if (!Found) 226 return 0; 227 } 228 } 229 230 return Found; 231} 232 233static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 234 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 235 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 236 !Ty->isBlockPointerType()) 237 return false; 238 239 uint64_t Size = Context.getTypeSize(Ty); 240 return Size == 32 || Size == 64; 241} 242 243/// canExpandIndirectArgument - Test whether an argument type which is to be 244/// passed indirectly (on the stack) would have the equivalent layout if it was 245/// expanded into separate arguments. If so, we prefer to do the latter to avoid 246/// inhibiting optimizations. 247/// 248// FIXME: This predicate is missing many cases, currently it just follows 249// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 250// should probably make this smarter, or better yet make the LLVM backend 251// capable of handling it. 252static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 253 // We can only expand structure types. 254 const RecordType *RT = Ty->getAs<RecordType>(); 255 if (!RT) 256 return false; 257 258 // We can only expand (C) structures. 259 // 260 // FIXME: This needs to be generalized to handle classes as well. 261 const RecordDecl *RD = RT->getDecl(); 262 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 263 return false; 264 265 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 266 i != e; ++i) { 267 const FieldDecl *FD = *i; 268 269 if (!is32Or64BitBasicType(FD->getType(), Context)) 270 return false; 271 272 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 273 // how to expand them yet, and the predicate for telling if a bitfield still 274 // counts as "basic" is more complicated than what we were doing previously. 275 if (FD->isBitField()) 276 return false; 277 } 278 279 return true; 280} 281 282namespace { 283/// DefaultABIInfo - The default implementation for ABI specific 284/// details. This implementation provides information which results in 285/// self-consistent and sensible LLVM IR generation, but does not 286/// conform to any particular ABI. 287class DefaultABIInfo : public ABIInfo { 288public: 289 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 290 291 ABIArgInfo classifyReturnType(QualType RetTy) const; 292 ABIArgInfo classifyArgumentType(QualType RetTy) const; 293 294 virtual void computeInfo(CGFunctionInfo &FI, 295 const llvm::Type *const *PrefTypes, 296 unsigned NumPrefTypes) const { 297 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 298 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 299 it != ie; ++it) 300 it->info = classifyArgumentType(it->type); 301 } 302 303 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 304 CodeGenFunction &CGF) const; 305}; 306 307class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 308public: 309 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 310 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 311}; 312 313llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 314 CodeGenFunction &CGF) const { 315 return 0; 316} 317 318ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 319 if (CodeGenFunction::hasAggregateLLVMType(Ty)) 320 return ABIArgInfo::getIndirect(0); 321 322 // Treat an enum type as its underlying type. 323 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 324 Ty = EnumTy->getDecl()->getIntegerType(); 325 326 return (Ty->isPromotableIntegerType() ? 327 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 328} 329 330//===----------------------------------------------------------------------===// 331// X86-32 ABI Implementation 332//===----------------------------------------------------------------------===// 333 334/// X86_32ABIInfo - The X86-32 ABI information. 335class X86_32ABIInfo : public ABIInfo { 336 bool IsDarwinVectorABI; 337 bool IsSmallStructInRegABI; 338 339 static bool isRegisterSize(unsigned Size) { 340 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 341 } 342 343 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 344 345 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 346 /// such that the argument will be passed in memory. 347 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 348 349public: 350 351 ABIArgInfo classifyReturnType(QualType RetTy) const; 352 ABIArgInfo classifyArgumentType(QualType RetTy) const; 353 354 virtual void computeInfo(CGFunctionInfo &FI, 355 const llvm::Type *const *PrefTypes, 356 unsigned NumPrefTypes) const { 357 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 358 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 359 it != ie; ++it) 360 it->info = classifyArgumentType(it->type); 361 } 362 363 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 364 CodeGenFunction &CGF) const; 365 366 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p) 367 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {} 368}; 369 370class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 371public: 372 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p) 373 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {} 374 375 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 376 CodeGen::CodeGenModule &CGM) const; 377 378 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 379 // Darwin uses different dwarf register numbers for EH. 380 if (CGM.isTargetDarwin()) return 5; 381 382 return 4; 383 } 384 385 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 386 llvm::Value *Address) const; 387}; 388 389} 390 391/// shouldReturnTypeInRegister - Determine if the given type should be 392/// passed in a register (for the Darwin ABI). 393bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 394 ASTContext &Context) { 395 uint64_t Size = Context.getTypeSize(Ty); 396 397 // Type must be register sized. 398 if (!isRegisterSize(Size)) 399 return false; 400 401 if (Ty->isVectorType()) { 402 // 64- and 128- bit vectors inside structures are not returned in 403 // registers. 404 if (Size == 64 || Size == 128) 405 return false; 406 407 return true; 408 } 409 410 // If this is a builtin, pointer, enum, complex type, member pointer, or 411 // member function pointer it is ok. 412 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 413 Ty->isAnyComplexType() || Ty->isEnumeralType() || 414 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 415 return true; 416 417 // Arrays are treated like records. 418 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 419 return shouldReturnTypeInRegister(AT->getElementType(), Context); 420 421 // Otherwise, it must be a record type. 422 const RecordType *RT = Ty->getAs<RecordType>(); 423 if (!RT) return false; 424 425 // FIXME: Traverse bases here too. 426 427 // Structure types are passed in register if all fields would be 428 // passed in a register. 429 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 430 e = RT->getDecl()->field_end(); i != e; ++i) { 431 const FieldDecl *FD = *i; 432 433 // Empty fields are ignored. 434 if (isEmptyField(Context, FD, true)) 435 continue; 436 437 // Check fields recursively. 438 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 439 return false; 440 } 441 442 return true; 443} 444 445ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { 446 if (RetTy->isVoidType()) 447 return ABIArgInfo::getIgnore(); 448 449 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 450 // On Darwin, some vectors are returned in registers. 451 if (IsDarwinVectorABI) { 452 uint64_t Size = getContext().getTypeSize(RetTy); 453 454 // 128-bit vectors are a special case; they are returned in 455 // registers and we need to make sure to pick a type the LLVM 456 // backend will like. 457 if (Size == 128) 458 return ABIArgInfo::getCoerce(llvm::VectorType::get( 459 llvm::Type::getInt64Ty(getVMContext()), 2)); 460 461 // Always return in register if it fits in a general purpose 462 // register, or if it is 64 bits and has a single element. 463 if ((Size == 8 || Size == 16 || Size == 32) || 464 (Size == 64 && VT->getNumElements() == 1)) 465 return ABIArgInfo::getCoerce(llvm::IntegerType::get(getVMContext(), 466 Size)); 467 468 return ABIArgInfo::getIndirect(0); 469 } 470 471 return ABIArgInfo::getDirect(); 472 } 473 474 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 475 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 476 // Structures with either a non-trivial destructor or a non-trivial 477 // copy constructor are always indirect. 478 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 479 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 480 481 // Structures with flexible arrays are always indirect. 482 if (RT->getDecl()->hasFlexibleArrayMember()) 483 return ABIArgInfo::getIndirect(0); 484 } 485 486 // If specified, structs and unions are always indirect. 487 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 488 return ABIArgInfo::getIndirect(0); 489 490 // Classify "single element" structs as their element type. 491 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) { 492 if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) { 493 if (BT->isIntegerType()) { 494 // We need to use the size of the structure, padding 495 // bit-fields can adjust that to be larger than the single 496 // element type. 497 uint64_t Size = getContext().getTypeSize(RetTy); 498 return ABIArgInfo::getCoerce( 499 llvm::IntegerType::get(getVMContext(), (unsigned)Size)); 500 } 501 502 if (BT->getKind() == BuiltinType::Float) { 503 assert(getContext().getTypeSize(RetTy) == 504 getContext().getTypeSize(SeltTy) && 505 "Unexpect single element structure size!"); 506 return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(getVMContext())); 507 } 508 509 if (BT->getKind() == BuiltinType::Double) { 510 assert(getContext().getTypeSize(RetTy) == 511 getContext().getTypeSize(SeltTy) && 512 "Unexpect single element structure size!"); 513 return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(getVMContext())); 514 } 515 } else if (SeltTy->isPointerType()) { 516 // FIXME: It would be really nice if this could come out as the proper 517 // pointer type. 518 const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext()); 519 return ABIArgInfo::getCoerce(PtrTy); 520 } else if (SeltTy->isVectorType()) { 521 // 64- and 128-bit vectors are never returned in a 522 // register when inside a structure. 523 uint64_t Size = getContext().getTypeSize(RetTy); 524 if (Size == 64 || Size == 128) 525 return ABIArgInfo::getIndirect(0); 526 527 return classifyReturnType(QualType(SeltTy, 0)); 528 } 529 } 530 531 // Small structures which are register sized are generally returned 532 // in a register. 533 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) { 534 uint64_t Size = getContext().getTypeSize(RetTy); 535 return ABIArgInfo::getCoerce(llvm::IntegerType::get(getVMContext(),Size)); 536 } 537 538 return ABIArgInfo::getIndirect(0); 539 } 540 541 // Treat an enum type as its underlying type. 542 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 543 RetTy = EnumTy->getDecl()->getIntegerType(); 544 545 return (RetTy->isPromotableIntegerType() ? 546 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 547} 548 549ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 550 if (!ByVal) 551 return ABIArgInfo::getIndirect(0, false); 552 553 // Compute the byval alignment. We trust the back-end to honor the 554 // minimum ABI alignment for byval, to make cleaner IR. 555 const unsigned MinABIAlign = 4; 556 unsigned Align = getContext().getTypeAlign(Ty) / 8; 557 if (Align > MinABIAlign) 558 return ABIArgInfo::getIndirect(Align); 559 return ABIArgInfo::getIndirect(0); 560} 561 562ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 563 // FIXME: Set alignment on indirect arguments. 564 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 565 // Structures with flexible arrays are always indirect. 566 if (const RecordType *RT = Ty->getAs<RecordType>()) { 567 // Structures with either a non-trivial destructor or a non-trivial 568 // copy constructor are always indirect. 569 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 570 return getIndirectResult(Ty, /*ByVal=*/false); 571 572 if (RT->getDecl()->hasFlexibleArrayMember()) 573 return getIndirectResult(Ty); 574 } 575 576 // Ignore empty structs. 577 if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0) 578 return ABIArgInfo::getIgnore(); 579 580 // Expand small (<= 128-bit) record types when we know that the stack layout 581 // of those arguments will match the struct. This is important because the 582 // LLVM backend isn't smart enough to remove byval, which inhibits many 583 // optimizations. 584 if (getContext().getTypeSize(Ty) <= 4*32 && 585 canExpandIndirectArgument(Ty, getContext())) 586 return ABIArgInfo::getExpand(); 587 588 return getIndirectResult(Ty); 589 } 590 591 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 592 Ty = EnumTy->getDecl()->getIntegerType(); 593 594 return (Ty->isPromotableIntegerType() ? 595 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 596} 597 598llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 599 CodeGenFunction &CGF) const { 600 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 601 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 602 603 CGBuilderTy &Builder = CGF.Builder; 604 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 605 "ap"); 606 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 607 llvm::Type *PTy = 608 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 609 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 610 611 uint64_t Offset = 612 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 613 llvm::Value *NextAddr = 614 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 615 "ap.next"); 616 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 617 618 return AddrTyped; 619} 620 621void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 622 llvm::GlobalValue *GV, 623 CodeGen::CodeGenModule &CGM) const { 624 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 625 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 626 // Get the LLVM function. 627 llvm::Function *Fn = cast<llvm::Function>(GV); 628 629 // Now add the 'alignstack' attribute with a value of 16. 630 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 631 } 632 } 633} 634 635bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 636 CodeGen::CodeGenFunction &CGF, 637 llvm::Value *Address) const { 638 CodeGen::CGBuilderTy &Builder = CGF.Builder; 639 llvm::LLVMContext &Context = CGF.getLLVMContext(); 640 641 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 642 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 643 644 // 0-7 are the eight integer registers; the order is different 645 // on Darwin (for EH), but the range is the same. 646 // 8 is %eip. 647 AssignToArrayRange(Builder, Address, Four8, 0, 8); 648 649 if (CGF.CGM.isTargetDarwin()) { 650 // 12-16 are st(0..4). Not sure why we stop at 4. 651 // These have size 16, which is sizeof(long double) on 652 // platforms with 8-byte alignment for that type. 653 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 654 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 655 656 } else { 657 // 9 is %eflags, which doesn't get a size on Darwin for some 658 // reason. 659 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 660 661 // 11-16 are st(0..5). Not sure why we stop at 5. 662 // These have size 12, which is sizeof(long double) on 663 // platforms with 4-byte alignment for that type. 664 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12); 665 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 666 } 667 668 return false; 669} 670 671//===----------------------------------------------------------------------===// 672// X86-64 ABI Implementation 673//===----------------------------------------------------------------------===// 674 675 676namespace { 677/// X86_64ABIInfo - The X86_64 ABI information. 678class X86_64ABIInfo : public ABIInfo { 679 enum Class { 680 Integer = 0, 681 SSE, 682 SSEUp, 683 X87, 684 X87Up, 685 ComplexX87, 686 NoClass, 687 Memory 688 }; 689 690 /// merge - Implement the X86_64 ABI merging algorithm. 691 /// 692 /// Merge an accumulating classification \arg Accum with a field 693 /// classification \arg Field. 694 /// 695 /// \param Accum - The accumulating classification. This should 696 /// always be either NoClass or the result of a previous merge 697 /// call. In addition, this should never be Memory (the caller 698 /// should just return Memory for the aggregate). 699 static Class merge(Class Accum, Class Field); 700 701 /// classify - Determine the x86_64 register classes in which the 702 /// given type T should be passed. 703 /// 704 /// \param Lo - The classification for the parts of the type 705 /// residing in the low word of the containing object. 706 /// 707 /// \param Hi - The classification for the parts of the type 708 /// residing in the high word of the containing object. 709 /// 710 /// \param OffsetBase - The bit offset of this type in the 711 /// containing object. Some parameters are classified different 712 /// depending on whether they straddle an eightbyte boundary. 713 /// 714 /// If a word is unused its result will be NoClass; if a type should 715 /// be passed in Memory then at least the classification of \arg Lo 716 /// will be Memory. 717 /// 718 /// The \arg Lo class will be NoClass iff the argument is ignored. 719 /// 720 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 721 /// also be ComplexX87. 722 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 723 724 const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType, 725 unsigned IROffset, 726 QualType SourceTy, 727 unsigned SourceOffset) const; 728 729 /// getCoerceResult - Given a source type \arg Ty and an LLVM type 730 /// to coerce to, chose the best way to pass Ty in the same place 731 /// that \arg CoerceTo would be passed, but while keeping the 732 /// emitted code as simple as possible. 733 /// 734 /// FIXME: Note, this should be cleaned up to just take an enumeration of all 735 /// the ways we might want to pass things, instead of constructing an LLVM 736 /// type. This makes this code more explicit, and it makes it clearer that we 737 /// are also doing this for correctness in the case of passing scalar types. 738 ABIArgInfo getCoerceResult(QualType Ty, 739 const llvm::Type *CoerceTo) const; 740 741 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 742 /// such that the argument will be returned in memory. 743 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 744 745 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 746 /// such that the argument will be passed in memory. 747 ABIArgInfo getIndirectResult(QualType Ty) const; 748 749 ABIArgInfo classifyReturnType(QualType RetTy) const; 750 751 ABIArgInfo classifyArgumentType(QualType Ty, 752 unsigned &neededInt, 753 unsigned &neededSSE, 754 const llvm::Type *PrefType) const; 755 756public: 757 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 758 759 virtual void computeInfo(CGFunctionInfo &FI, 760 const llvm::Type *const *PrefTypes, 761 unsigned NumPrefTypes) const; 762 763 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 764 CodeGenFunction &CGF) const; 765}; 766 767class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 768public: 769 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 770 : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {} 771 772 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 773 return 7; 774 } 775 776 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 777 llvm::Value *Address) const { 778 CodeGen::CGBuilderTy &Builder = CGF.Builder; 779 llvm::LLVMContext &Context = CGF.getLLVMContext(); 780 781 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 782 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 783 784 // 0-15 are the 16 integer registers. 785 // 16 is %rip. 786 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 787 788 return false; 789 } 790}; 791 792} 793 794X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 795 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 796 // classified recursively so that always two fields are 797 // considered. The resulting class is calculated according to 798 // the classes of the fields in the eightbyte: 799 // 800 // (a) If both classes are equal, this is the resulting class. 801 // 802 // (b) If one of the classes is NO_CLASS, the resulting class is 803 // the other class. 804 // 805 // (c) If one of the classes is MEMORY, the result is the MEMORY 806 // class. 807 // 808 // (d) If one of the classes is INTEGER, the result is the 809 // INTEGER. 810 // 811 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 812 // MEMORY is used as class. 813 // 814 // (f) Otherwise class SSE is used. 815 816 // Accum should never be memory (we should have returned) or 817 // ComplexX87 (because this cannot be passed in a structure). 818 assert((Accum != Memory && Accum != ComplexX87) && 819 "Invalid accumulated classification during merge."); 820 if (Accum == Field || Field == NoClass) 821 return Accum; 822 if (Field == Memory) 823 return Memory; 824 if (Accum == NoClass) 825 return Field; 826 if (Accum == Integer || Field == Integer) 827 return Integer; 828 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 829 Accum == X87 || Accum == X87Up) 830 return Memory; 831 return SSE; 832} 833 834void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 835 Class &Lo, Class &Hi) const { 836 // FIXME: This code can be simplified by introducing a simple value class for 837 // Class pairs with appropriate constructor methods for the various 838 // situations. 839 840 // FIXME: Some of the split computations are wrong; unaligned vectors 841 // shouldn't be passed in registers for example, so there is no chance they 842 // can straddle an eightbyte. Verify & simplify. 843 844 Lo = Hi = NoClass; 845 846 Class &Current = OffsetBase < 64 ? Lo : Hi; 847 Current = Memory; 848 849 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 850 BuiltinType::Kind k = BT->getKind(); 851 852 if (k == BuiltinType::Void) { 853 Current = NoClass; 854 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 855 Lo = Integer; 856 Hi = Integer; 857 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 858 Current = Integer; 859 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 860 Current = SSE; 861 } else if (k == BuiltinType::LongDouble) { 862 Lo = X87; 863 Hi = X87Up; 864 } 865 // FIXME: _Decimal32 and _Decimal64 are SSE. 866 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 867 return; 868 } 869 870 if (const EnumType *ET = Ty->getAs<EnumType>()) { 871 // Classify the underlying integer type. 872 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 873 return; 874 } 875 876 if (Ty->hasPointerRepresentation()) { 877 Current = Integer; 878 return; 879 } 880 881 if (Ty->isMemberPointerType()) { 882 if (Ty->isMemberFunctionPointerType()) 883 Lo = Hi = Integer; 884 else 885 Current = Integer; 886 return; 887 } 888 889 if (const VectorType *VT = Ty->getAs<VectorType>()) { 890 uint64_t Size = getContext().getTypeSize(VT); 891 if (Size == 32) { 892 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 893 // float> as integer. 894 Current = Integer; 895 896 // If this type crosses an eightbyte boundary, it should be 897 // split. 898 uint64_t EB_Real = (OffsetBase) / 64; 899 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 900 if (EB_Real != EB_Imag) 901 Hi = Lo; 902 } else if (Size == 64) { 903 // gcc passes <1 x double> in memory. :( 904 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 905 return; 906 907 // gcc passes <1 x long long> as INTEGER. 908 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong)) 909 Current = Integer; 910 else 911 Current = SSE; 912 913 // If this type crosses an eightbyte boundary, it should be 914 // split. 915 if (OffsetBase && OffsetBase != 64) 916 Hi = Lo; 917 } else if (Size == 128) { 918 Lo = SSE; 919 Hi = SSEUp; 920 } 921 return; 922 } 923 924 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 925 QualType ET = getContext().getCanonicalType(CT->getElementType()); 926 927 uint64_t Size = getContext().getTypeSize(Ty); 928 if (ET->isIntegralOrEnumerationType()) { 929 if (Size <= 64) 930 Current = Integer; 931 else if (Size <= 128) 932 Lo = Hi = Integer; 933 } else if (ET == getContext().FloatTy) 934 Current = SSE; 935 else if (ET == getContext().DoubleTy) 936 Lo = Hi = SSE; 937 else if (ET == getContext().LongDoubleTy) 938 Current = ComplexX87; 939 940 // If this complex type crosses an eightbyte boundary then it 941 // should be split. 942 uint64_t EB_Real = (OffsetBase) / 64; 943 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 944 if (Hi == NoClass && EB_Real != EB_Imag) 945 Hi = Lo; 946 947 return; 948 } 949 950 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 951 // Arrays are treated like structures. 952 953 uint64_t Size = getContext().getTypeSize(Ty); 954 955 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 956 // than two eightbytes, ..., it has class MEMORY. 957 if (Size > 128) 958 return; 959 960 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 961 // fields, it has class MEMORY. 962 // 963 // Only need to check alignment of array base. 964 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 965 return; 966 967 // Otherwise implement simplified merge. We could be smarter about 968 // this, but it isn't worth it and would be harder to verify. 969 Current = NoClass; 970 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 971 uint64_t ArraySize = AT->getSize().getZExtValue(); 972 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 973 Class FieldLo, FieldHi; 974 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 975 Lo = merge(Lo, FieldLo); 976 Hi = merge(Hi, FieldHi); 977 if (Lo == Memory || Hi == Memory) 978 break; 979 } 980 981 // Do post merger cleanup (see below). Only case we worry about is Memory. 982 if (Hi == Memory) 983 Lo = Memory; 984 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 985 return; 986 } 987 988 if (const RecordType *RT = Ty->getAs<RecordType>()) { 989 uint64_t Size = getContext().getTypeSize(Ty); 990 991 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 992 // than two eightbytes, ..., it has class MEMORY. 993 if (Size > 128) 994 return; 995 996 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 997 // copy constructor or a non-trivial destructor, it is passed by invisible 998 // reference. 999 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1000 return; 1001 1002 const RecordDecl *RD = RT->getDecl(); 1003 1004 // Assume variable sized types are passed in memory. 1005 if (RD->hasFlexibleArrayMember()) 1006 return; 1007 1008 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1009 1010 // Reset Lo class, this will be recomputed. 1011 Current = NoClass; 1012 1013 // If this is a C++ record, classify the bases first. 1014 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1015 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1016 e = CXXRD->bases_end(); i != e; ++i) { 1017 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1018 "Unexpected base class!"); 1019 const CXXRecordDecl *Base = 1020 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1021 1022 // Classify this field. 1023 // 1024 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1025 // single eightbyte, each is classified separately. Each eightbyte gets 1026 // initialized to class NO_CLASS. 1027 Class FieldLo, FieldHi; 1028 uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base); 1029 classify(i->getType(), Offset, FieldLo, FieldHi); 1030 Lo = merge(Lo, FieldLo); 1031 Hi = merge(Hi, FieldHi); 1032 if (Lo == Memory || Hi == Memory) 1033 break; 1034 } 1035 1036 // If this record has no fields but isn't empty, classify as INTEGER. 1037 if (RD->field_empty() && Size) 1038 Current = Integer; 1039 } 1040 1041 // Classify the fields one at a time, merging the results. 1042 unsigned idx = 0; 1043 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1044 i != e; ++i, ++idx) { 1045 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1046 bool BitField = i->isBitField(); 1047 1048 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1049 // fields, it has class MEMORY. 1050 // 1051 // Note, skip this test for bit-fields, see below. 1052 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1053 Lo = Memory; 1054 return; 1055 } 1056 1057 // Classify this field. 1058 // 1059 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1060 // exceeds a single eightbyte, each is classified 1061 // separately. Each eightbyte gets initialized to class 1062 // NO_CLASS. 1063 Class FieldLo, FieldHi; 1064 1065 // Bit-fields require special handling, they do not force the 1066 // structure to be passed in memory even if unaligned, and 1067 // therefore they can straddle an eightbyte. 1068 if (BitField) { 1069 // Ignore padding bit-fields. 1070 if (i->isUnnamedBitfield()) 1071 continue; 1072 1073 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1074 uint64_t Size = 1075 i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue(); 1076 1077 uint64_t EB_Lo = Offset / 64; 1078 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1079 FieldLo = FieldHi = NoClass; 1080 if (EB_Lo) { 1081 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1082 FieldLo = NoClass; 1083 FieldHi = Integer; 1084 } else { 1085 FieldLo = Integer; 1086 FieldHi = EB_Hi ? Integer : NoClass; 1087 } 1088 } else 1089 classify(i->getType(), Offset, FieldLo, FieldHi); 1090 Lo = merge(Lo, FieldLo); 1091 Hi = merge(Hi, FieldHi); 1092 if (Lo == Memory || Hi == Memory) 1093 break; 1094 } 1095 1096 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1097 // 1098 // (a) If one of the classes is MEMORY, the whole argument is 1099 // passed in memory. 1100 // 1101 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. 1102 1103 // The first of these conditions is guaranteed by how we implement 1104 // the merge (just bail). 1105 // 1106 // The second condition occurs in the case of unions; for example 1107 // union { _Complex double; unsigned; }. 1108 if (Hi == Memory) 1109 Lo = Memory; 1110 if (Hi == SSEUp && Lo != SSE) 1111 Hi = SSE; 1112 } 1113} 1114 1115ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, 1116 const llvm::Type *CoerceTo) const { 1117 // If this is a pointer passed as a pointer, just pass it directly. 1118 if ((isa<llvm::PointerType>(CoerceTo) || CoerceTo->isIntegerTy(64)) && 1119 Ty->hasPointerRepresentation()) 1120 return ABIArgInfo::getExtend(); 1121 1122 if (isa<llvm::IntegerType>(CoerceTo)) { 1123 // Integer and pointer types will end up in a general purpose 1124 // register. 1125 1126 // Treat an enum type as its underlying type. 1127 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1128 Ty = EnumTy->getDecl()->getIntegerType(); 1129 1130 if (Ty->isIntegralOrEnumerationType()) 1131 return (Ty->isPromotableIntegerType() ? 1132 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1133 1134 } else if (CoerceTo->isDoubleTy()) { 1135 assert(Ty.isCanonical() && "should always have a canonical type here"); 1136 assert(!Ty.hasQualifiers() && "should never have a qualified type here"); 1137 1138 // Float and double end up in a single SSE reg. 1139 if (Ty == getContext().FloatTy || Ty == getContext().DoubleTy) 1140 return ABIArgInfo::getDirect(); 1141 1142 // If this is a 32-bit structure that is passed as a double, then it will be 1143 // passed in the low 32-bits of the XMM register, which is the same as how a 1144 // float is passed. Coerce to a float instead of a double. 1145 if (getContext().getTypeSizeInChars(Ty).getQuantity() == 4) 1146 CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext()); 1147 } 1148 1149 return ABIArgInfo::getCoerce(CoerceTo); 1150} 1151 1152ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1153 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1154 // place naturally. 1155 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1156 // Treat an enum type as its underlying type. 1157 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1158 Ty = EnumTy->getDecl()->getIntegerType(); 1159 1160 return (Ty->isPromotableIntegerType() ? 1161 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1162 } 1163 1164 return ABIArgInfo::getIndirect(0); 1165} 1166 1167ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1168 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1169 // place naturally. 1170 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1171 // Treat an enum type as its underlying type. 1172 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1173 Ty = EnumTy->getDecl()->getIntegerType(); 1174 1175 return (Ty->isPromotableIntegerType() ? 1176 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1177 } 1178 1179 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1180 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1181 1182 // Compute the byval alignment. We trust the back-end to honor the 1183 // minimum ABI alignment for byval, to make cleaner IR. 1184 const unsigned MinABIAlign = 8; 1185 unsigned Align = getContext().getTypeAlign(Ty) / 8; 1186 if (Align > MinABIAlign) 1187 return ABIArgInfo::getIndirect(Align); 1188 return ABIArgInfo::getIndirect(0); 1189} 1190 1191/// Get8ByteTypeAtOffset - The ABI specifies that a value should be passed in an 1192/// 8-byte GPR. This means that we either have a scalar or we are talking about 1193/// the high or low part of an up-to-16-byte struct. This routine picks the 1194/// best LLVM IR type to represent this, which may be i64 or may be anything 1195/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1196/// etc). 1197/// 1198/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1199/// the source type. IROffset is an offset in bytes into the LLVM IR type that 1200/// the 8-byte value references. PrefType may be null. 1201/// 1202/// SourceTy is the source level type for the entire argument. SourceOffset is 1203/// an offset into this that we're processing (which is always either 0 or 8). 1204/// 1205const llvm::Type *X86_64ABIInfo:: 1206Get8ByteTypeAtOffset(const llvm::Type *PrefType, unsigned IROffset, 1207 QualType SourceTy, unsigned SourceOffset) const { 1208 // Pointers are always 8-bytes at offset 0. 1209 if (IROffset == 0 && PrefType && isa<llvm::PointerType>(PrefType)) 1210 return PrefType; 1211 1212 // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that 1213 // the "hole" is not used in the containing struct (just undef padding). 1214 1215 if (const llvm::StructType *STy = 1216 dyn_cast_or_null<llvm::StructType>(PrefType)) { 1217 // If this is a struct, recurse into the field at the specified offset. 1218 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1219 if (IROffset < SL->getSizeInBytes()) { 1220 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1221 IROffset -= SL->getElementOffset(FieldIdx); 1222 1223 return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1224 SourceTy, SourceOffset); 1225 } 1226 } 1227 1228 // Okay, we don't have any better idea of what to pass, so we pass this in an 1229 // integer register that isn't too big to fit the rest of the struct. 1230 uint64_t TySizeInBytes = 1231 getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1232 1233 // It is always safe to classify this as an integer type up to i64 that 1234 // isn't larger than the structure. 1235 switch (unsigned(TySizeInBytes-SourceOffset)) { 1236 case 1: return llvm::Type::getInt8Ty(getVMContext()); 1237 case 2: return llvm::Type::getInt16Ty(getVMContext()); 1238 case 3: 1239 case 4: return llvm::Type::getInt32Ty(getVMContext()); 1240 default: return llvm::Type::getInt64Ty(getVMContext()); 1241 } 1242} 1243 1244ABIArgInfo X86_64ABIInfo:: 1245classifyReturnType(QualType RetTy) const { 1246 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1247 // classification algorithm. 1248 X86_64ABIInfo::Class Lo, Hi; 1249 classify(RetTy, 0, Lo, Hi); 1250 1251 // Check some invariants. 1252 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1253 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 1254 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1255 1256 const llvm::Type *ResType = 0; 1257 switch (Lo) { 1258 case NoClass: 1259 return ABIArgInfo::getIgnore(); 1260 1261 case SSEUp: 1262 case X87Up: 1263 assert(0 && "Invalid classification for lo word."); 1264 1265 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1266 // hidden argument. 1267 case Memory: 1268 return getIndirectReturnResult(RetTy); 1269 1270 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1271 // available register of the sequence %rax, %rdx is used. 1272 case Integer: 1273 ResType = Get8ByteTypeAtOffset(0, 0, RetTy, 0); 1274 break; 1275 1276 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1277 // available SSE register of the sequence %xmm0, %xmm1 is used. 1278 case SSE: 1279 ResType = llvm::Type::getDoubleTy(getVMContext()); 1280 break; 1281 1282 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1283 // returned on the X87 stack in %st0 as 80-bit x87 number. 1284 case X87: 1285 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1286 break; 1287 1288 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1289 // part of the value is returned in %st0 and the imaginary part in 1290 // %st1. 1291 case ComplexX87: 1292 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1293 ResType = llvm::StructType::get(getVMContext(), 1294 llvm::Type::getX86_FP80Ty(getVMContext()), 1295 llvm::Type::getX86_FP80Ty(getVMContext()), 1296 NULL); 1297 break; 1298 } 1299 1300 switch (Hi) { 1301 // Memory was handled previously and X87 should 1302 // never occur as a hi class. 1303 case Memory: 1304 case X87: 1305 assert(0 && "Invalid classification for hi word."); 1306 1307 case ComplexX87: // Previously handled. 1308 case NoClass: 1309 break; 1310 1311 case Integer: { 1312 const llvm::Type *HiType = Get8ByteTypeAtOffset(0, 8, RetTy, 8); 1313 ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL); 1314 break; 1315 } 1316 case SSE: 1317 ResType = llvm::StructType::get(getVMContext(), ResType, 1318 llvm::Type::getDoubleTy(getVMContext()), 1319 NULL); 1320 break; 1321 1322 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1323 // is passed in the upper half of the last used SSE register. 1324 // 1325 // SSEUP should always be preceeded by SSE, just widen. 1326 case SSEUp: 1327 assert(Lo == SSE && "Unexpected SSEUp classification."); 1328 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1329 break; 1330 1331 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1332 // returned together with the previous X87 value in %st0. 1333 case X87Up: 1334 // If X87Up is preceeded by X87, we don't need to do 1335 // anything. However, in some cases with unions it may not be 1336 // preceeded by X87. In such situations we follow gcc and pass the 1337 // extra bits in an SSE reg. 1338 if (Lo != X87) 1339 ResType = llvm::StructType::get(getVMContext(), ResType, 1340 llvm::Type::getDoubleTy(getVMContext()), 1341 NULL); 1342 break; 1343 } 1344 1345 return getCoerceResult(RetTy, ResType); 1346} 1347 1348ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1349 unsigned &neededSSE, 1350 const llvm::Type *PrefType)const{ 1351 X86_64ABIInfo::Class Lo, Hi; 1352 classify(Ty, 0, Lo, Hi); 1353 1354 // Check some invariants. 1355 // FIXME: Enforce these by construction. 1356 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1357 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 1358 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1359 1360 neededInt = 0; 1361 neededSSE = 0; 1362 const llvm::Type *ResType = 0; 1363 switch (Lo) { 1364 case NoClass: 1365 return ABIArgInfo::getIgnore(); 1366 1367 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1368 // on the stack. 1369 case Memory: 1370 1371 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1372 // COMPLEX_X87, it is passed in memory. 1373 case X87: 1374 case ComplexX87: 1375 return getIndirectResult(Ty); 1376 1377 case SSEUp: 1378 case X87Up: 1379 assert(0 && "Invalid classification for lo word."); 1380 1381 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1382 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1383 // and %r9 is used. 1384 case Integer: 1385 ++neededInt; 1386 1387 // Pick an 8-byte type based on the preferred type. 1388 ResType = Get8ByteTypeAtOffset(PrefType, 0, Ty, 0); 1389 break; 1390 1391 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1392 // available SSE register is used, the registers are taken in the 1393 // order from %xmm0 to %xmm7. 1394 case SSE: 1395 ++neededSSE; 1396 ResType = llvm::Type::getDoubleTy(getVMContext()); 1397 break; 1398 } 1399 1400 switch (Hi) { 1401 // Memory was handled previously, ComplexX87 and X87 should 1402 // never occur as hi classes, and X87Up must be preceed by X87, 1403 // which is passed in memory. 1404 case Memory: 1405 case X87: 1406 case ComplexX87: 1407 assert(0 && "Invalid classification for hi word."); 1408 break; 1409 1410 case NoClass: break; 1411 1412 case Integer: { 1413 ++neededInt; 1414 1415 // Pick an 8-byte type based on the preferred type. 1416 const llvm::Type *HiType = Get8ByteTypeAtOffset(PrefType, 8, Ty, 8); 1417 ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL); 1418 break; 1419 } 1420 1421 // X87Up generally doesn't occur here (long double is passed in 1422 // memory), except in situations involving unions. 1423 case X87Up: 1424 case SSE: 1425 ResType = llvm::StructType::get(getVMContext(), ResType, 1426 llvm::Type::getDoubleTy(getVMContext()), 1427 NULL); 1428 ++neededSSE; 1429 break; 1430 1431 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1432 // eightbyte is passed in the upper half of the last used SSE 1433 // register. This only happens when 128-bit vectors are passed. 1434 case SSEUp: 1435 assert(Lo == SSE && "Unexpected SSEUp classification"); 1436 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1437 1438 // If the preferred type is a 16-byte vector, prefer to pass it. 1439 if (const llvm::VectorType *VT = 1440 dyn_cast_or_null<llvm::VectorType>(PrefType)) { 1441 const llvm::Type *EltTy = VT->getElementType(); 1442 if (VT->getBitWidth() == 128 && 1443 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1444 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1445 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1446 EltTy->isIntegerTy(128))) 1447 ResType = PrefType; 1448 } 1449 break; 1450 } 1451 1452 return getCoerceResult(Ty, ResType); 1453} 1454 1455void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, 1456 const llvm::Type *const *PrefTypes, 1457 unsigned NumPrefTypes) const { 1458 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1459 1460 // Keep track of the number of assigned registers. 1461 unsigned freeIntRegs = 6, freeSSERegs = 8; 1462 1463 // If the return value is indirect, then the hidden argument is consuming one 1464 // integer register. 1465 if (FI.getReturnInfo().isIndirect()) 1466 --freeIntRegs; 1467 1468 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1469 // get assigned (in left-to-right order) for passing as follows... 1470 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1471 it != ie; ++it) { 1472 // If the client specified a preferred IR type to use, pass it down to 1473 // classifyArgumentType. 1474 const llvm::Type *PrefType = 0; 1475 if (NumPrefTypes) { 1476 PrefType = *PrefTypes++; 1477 --NumPrefTypes; 1478 } 1479 1480 unsigned neededInt, neededSSE; 1481 it->info = classifyArgumentType(it->type, neededInt, neededSSE, PrefType); 1482 1483 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1484 // eightbyte of an argument, the whole argument is passed on the 1485 // stack. If registers have already been assigned for some 1486 // eightbytes of such an argument, the assignments get reverted. 1487 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1488 freeIntRegs -= neededInt; 1489 freeSSERegs -= neededSSE; 1490 } else { 1491 it->info = getIndirectResult(it->type); 1492 } 1493 } 1494} 1495 1496static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1497 QualType Ty, 1498 CodeGenFunction &CGF) { 1499 llvm::Value *overflow_arg_area_p = 1500 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1501 llvm::Value *overflow_arg_area = 1502 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1503 1504 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1505 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1506 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1507 if (Align > 8) { 1508 // Note that we follow the ABI & gcc here, even though the type 1509 // could in theory have an alignment greater than 16. This case 1510 // shouldn't ever matter in practice. 1511 1512 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1513 llvm::Value *Offset = 1514 llvm::ConstantInt::get(CGF.Int32Ty, 15); 1515 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1516 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1517 CGF.Int64Ty); 1518 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL); 1519 overflow_arg_area = 1520 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1521 overflow_arg_area->getType(), 1522 "overflow_arg_area.align"); 1523 } 1524 1525 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1526 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1527 llvm::Value *Res = 1528 CGF.Builder.CreateBitCast(overflow_arg_area, 1529 llvm::PointerType::getUnqual(LTy)); 1530 1531 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1532 // l->overflow_arg_area + sizeof(type). 1533 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1534 // an 8 byte boundary. 1535 1536 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1537 llvm::Value *Offset = 1538 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 1539 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1540 "overflow_arg_area.next"); 1541 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1542 1543 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1544 return Res; 1545} 1546 1547llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1548 CodeGenFunction &CGF) const { 1549 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 1550 1551 // Assume that va_list type is correct; should be pointer to LLVM type: 1552 // struct { 1553 // i32 gp_offset; 1554 // i32 fp_offset; 1555 // i8* overflow_arg_area; 1556 // i8* reg_save_area; 1557 // }; 1558 unsigned neededInt, neededSSE; 1559 1560 Ty = CGF.getContext().getCanonicalType(Ty); 1561 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE, 0); 1562 1563 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 1564 // in the registers. If not go to step 7. 1565 if (!neededInt && !neededSSE) 1566 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1567 1568 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 1569 // general purpose registers needed to pass type and num_fp to hold 1570 // the number of floating point registers needed. 1571 1572 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 1573 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 1574 // l->fp_offset > 304 - num_fp * 16 go to step 7. 1575 // 1576 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 1577 // register save space). 1578 1579 llvm::Value *InRegs = 0; 1580 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 1581 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 1582 if (neededInt) { 1583 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 1584 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 1585 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 1586 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 1587 } 1588 1589 if (neededSSE) { 1590 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 1591 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 1592 llvm::Value *FitsInFP = 1593 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 1594 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 1595 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 1596 } 1597 1598 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 1599 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 1600 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 1601 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 1602 1603 // Emit code to load the value if it was passed in registers. 1604 1605 CGF.EmitBlock(InRegBlock); 1606 1607 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 1608 // an offset of l->gp_offset and/or l->fp_offset. This may require 1609 // copying to a temporary location in case the parameter is passed 1610 // in different register classes or requires an alignment greater 1611 // than 8 for general purpose registers and 16 for XMM registers. 1612 // 1613 // FIXME: This really results in shameful code when we end up needing to 1614 // collect arguments from different places; often what should result in a 1615 // simple assembling of a structure from scattered addresses has many more 1616 // loads than necessary. Can we clean this up? 1617 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1618 llvm::Value *RegAddr = 1619 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 1620 "reg_save_area"); 1621 if (neededInt && neededSSE) { 1622 // FIXME: Cleanup. 1623 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs"); 1624 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 1625 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 1626 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 1627 const llvm::Type *TyLo = ST->getElementType(0); 1628 const llvm::Type *TyHi = ST->getElementType(1); 1629 assert((TyLo->isFloatingPointTy() ^ TyHi->isFloatingPointTy()) && 1630 "Unexpected ABI info for mixed regs"); 1631 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 1632 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 1633 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1634 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1635 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 1636 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 1637 llvm::Value *V = 1638 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 1639 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1640 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 1641 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1642 1643 RegAddr = CGF.Builder.CreateBitCast(Tmp, 1644 llvm::PointerType::getUnqual(LTy)); 1645 } else if (neededInt) { 1646 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1647 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1648 llvm::PointerType::getUnqual(LTy)); 1649 } else if (neededSSE == 1) { 1650 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1651 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1652 llvm::PointerType::getUnqual(LTy)); 1653 } else { 1654 assert(neededSSE == 2 && "Invalid number of needed registers!"); 1655 // SSE registers are spaced 16 bytes apart in the register save 1656 // area, we need to collect the two eightbytes together. 1657 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1658 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 1659 const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); 1660 const llvm::Type *DblPtrTy = 1661 llvm::PointerType::getUnqual(DoubleTy); 1662 const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy, 1663 DoubleTy, NULL); 1664 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 1665 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 1666 DblPtrTy)); 1667 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1668 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 1669 DblPtrTy)); 1670 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1671 RegAddr = CGF.Builder.CreateBitCast(Tmp, 1672 llvm::PointerType::getUnqual(LTy)); 1673 } 1674 1675 // AMD64-ABI 3.5.7p5: Step 5. Set: 1676 // l->gp_offset = l->gp_offset + num_gp * 8 1677 // l->fp_offset = l->fp_offset + num_fp * 16. 1678 if (neededInt) { 1679 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 1680 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 1681 gp_offset_p); 1682 } 1683 if (neededSSE) { 1684 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 1685 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 1686 fp_offset_p); 1687 } 1688 CGF.EmitBranch(ContBlock); 1689 1690 // Emit code to load the value if it was passed in memory. 1691 1692 CGF.EmitBlock(InMemBlock); 1693 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1694 1695 // Return the appropriate result. 1696 1697 CGF.EmitBlock(ContBlock); 1698 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 1699 "vaarg.addr"); 1700 ResAddr->reserveOperandSpace(2); 1701 ResAddr->addIncoming(RegAddr, InRegBlock); 1702 ResAddr->addIncoming(MemAddr, InMemBlock); 1703 return ResAddr; 1704} 1705 1706 1707 1708//===----------------------------------------------------------------------===// 1709// PIC16 ABI Implementation 1710//===----------------------------------------------------------------------===// 1711 1712namespace { 1713 1714class PIC16ABIInfo : public ABIInfo { 1715public: 1716 PIC16ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 1717 1718 ABIArgInfo classifyReturnType(QualType RetTy) const; 1719 1720 ABIArgInfo classifyArgumentType(QualType RetTy) const; 1721 1722 virtual void computeInfo(CGFunctionInfo &FI, 1723 const llvm::Type *const *PrefTypes, 1724 unsigned NumPrefTypes) const { 1725 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1726 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1727 it != ie; ++it) 1728 it->info = classifyArgumentType(it->type); 1729 } 1730 1731 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1732 CodeGenFunction &CGF) const; 1733}; 1734 1735class PIC16TargetCodeGenInfo : public TargetCodeGenInfo { 1736public: 1737 PIC16TargetCodeGenInfo(CodeGenTypes &CGT) 1738 : TargetCodeGenInfo(new PIC16ABIInfo(CGT)) {} 1739}; 1740 1741} 1742 1743ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy) const { 1744 if (RetTy->isVoidType()) { 1745 return ABIArgInfo::getIgnore(); 1746 } else { 1747 return ABIArgInfo::getDirect(); 1748 } 1749} 1750 1751ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty) const { 1752 return ABIArgInfo::getDirect(); 1753} 1754 1755llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1756 CodeGenFunction &CGF) const { 1757 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 1758 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 1759 1760 CGBuilderTy &Builder = CGF.Builder; 1761 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1762 "ap"); 1763 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1764 llvm::Type *PTy = 1765 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1766 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1767 1768 uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8; 1769 1770 llvm::Value *NextAddr = 1771 Builder.CreateGEP(Addr, llvm::ConstantInt::get( 1772 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), 1773 "ap.next"); 1774 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1775 1776 return AddrTyped; 1777} 1778 1779 1780// PowerPC-32 1781 1782namespace { 1783class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 1784public: 1785 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 1786 1787 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 1788 // This is recovered from gcc output. 1789 return 1; // r1 is the dedicated stack pointer 1790 } 1791 1792 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1793 llvm::Value *Address) const; 1794}; 1795 1796} 1797 1798bool 1799PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1800 llvm::Value *Address) const { 1801 // This is calculated from the LLVM and GCC tables and verified 1802 // against gcc output. AFAIK all ABIs use the same encoding. 1803 1804 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1805 llvm::LLVMContext &Context = CGF.getLLVMContext(); 1806 1807 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 1808 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 1809 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 1810 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 1811 1812 // 0-31: r0-31, the 4-byte general-purpose registers 1813 AssignToArrayRange(Builder, Address, Four8, 0, 31); 1814 1815 // 32-63: fp0-31, the 8-byte floating-point registers 1816 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 1817 1818 // 64-76 are various 4-byte special-purpose registers: 1819 // 64: mq 1820 // 65: lr 1821 // 66: ctr 1822 // 67: ap 1823 // 68-75 cr0-7 1824 // 76: xer 1825 AssignToArrayRange(Builder, Address, Four8, 64, 76); 1826 1827 // 77-108: v0-31, the 16-byte vector registers 1828 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 1829 1830 // 109: vrsave 1831 // 110: vscr 1832 // 111: spe_acc 1833 // 112: spefscr 1834 // 113: sfp 1835 AssignToArrayRange(Builder, Address, Four8, 109, 113); 1836 1837 return false; 1838} 1839 1840 1841//===----------------------------------------------------------------------===// 1842// ARM ABI Implementation 1843//===----------------------------------------------------------------------===// 1844 1845namespace { 1846 1847class ARMABIInfo : public ABIInfo { 1848public: 1849 enum ABIKind { 1850 APCS = 0, 1851 AAPCS = 1, 1852 AAPCS_VFP 1853 }; 1854 1855private: 1856 ABIKind Kind; 1857 1858public: 1859 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 1860 1861private: 1862 ABIKind getABIKind() const { return Kind; } 1863 1864 ABIArgInfo classifyReturnType(QualType RetTy) const; 1865 ABIArgInfo classifyArgumentType(QualType RetTy) const; 1866 1867 virtual void computeInfo(CGFunctionInfo &FI, 1868 const llvm::Type *const *PrefTypes, 1869 unsigned NumPrefTypes) const; 1870 1871 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1872 CodeGenFunction &CGF) const; 1873}; 1874 1875class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 1876public: 1877 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 1878 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 1879 1880 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 1881 return 13; 1882 } 1883}; 1884 1885} 1886 1887void ARMABIInfo::computeInfo(CGFunctionInfo &FI, 1888 const llvm::Type *const *PrefTypes, 1889 unsigned NumPrefTypes) const { 1890 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1891 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1892 it != ie; ++it) 1893 it->info = classifyArgumentType(it->type); 1894 1895 const llvm::Triple &Triple(getContext().Target.getTriple()); 1896 llvm::CallingConv::ID DefaultCC; 1897 if (Triple.getEnvironmentName() == "gnueabi" || 1898 Triple.getEnvironmentName() == "eabi") 1899 DefaultCC = llvm::CallingConv::ARM_AAPCS; 1900 else 1901 DefaultCC = llvm::CallingConv::ARM_APCS; 1902 1903 switch (getABIKind()) { 1904 case APCS: 1905 if (DefaultCC != llvm::CallingConv::ARM_APCS) 1906 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 1907 break; 1908 1909 case AAPCS: 1910 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 1911 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 1912 break; 1913 1914 case AAPCS_VFP: 1915 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 1916 break; 1917 } 1918} 1919 1920ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 1921 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1922 // Treat an enum type as its underlying type. 1923 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1924 Ty = EnumTy->getDecl()->getIntegerType(); 1925 1926 return (Ty->isPromotableIntegerType() ? 1927 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1928 } 1929 1930 // Ignore empty records. 1931 if (isEmptyRecord(getContext(), Ty, true)) 1932 return ABIArgInfo::getIgnore(); 1933 1934 // Structures with either a non-trivial destructor or a non-trivial 1935 // copy constructor are always indirect. 1936 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1937 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1938 1939 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 1940 // backend doesn't support byval. 1941 // FIXME: This doesn't handle alignment > 64 bits. 1942 const llvm::Type* ElemTy; 1943 unsigned SizeRegs; 1944 if (getContext().getTypeAlign(Ty) > 32) { 1945 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 1946 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 1947 } else { 1948 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 1949 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1950 } 1951 std::vector<const llvm::Type*> LLVMFields; 1952 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); 1953 const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields, 1954 true); 1955 return ABIArgInfo::getCoerce(STy); 1956} 1957 1958static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 1959 llvm::LLVMContext &VMContext) { 1960 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 1961 // is called integer-like if its size is less than or equal to one word, and 1962 // the offset of each of its addressable sub-fields is zero. 1963 1964 uint64_t Size = Context.getTypeSize(Ty); 1965 1966 // Check that the type fits in a word. 1967 if (Size > 32) 1968 return false; 1969 1970 // FIXME: Handle vector types! 1971 if (Ty->isVectorType()) 1972 return false; 1973 1974 // Float types are never treated as "integer like". 1975 if (Ty->isRealFloatingType()) 1976 return false; 1977 1978 // If this is a builtin or pointer type then it is ok. 1979 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 1980 return true; 1981 1982 // Small complex integer types are "integer like". 1983 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 1984 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 1985 1986 // Single element and zero sized arrays should be allowed, by the definition 1987 // above, but they are not. 1988 1989 // Otherwise, it must be a record type. 1990 const RecordType *RT = Ty->getAs<RecordType>(); 1991 if (!RT) return false; 1992 1993 // Ignore records with flexible arrays. 1994 const RecordDecl *RD = RT->getDecl(); 1995 if (RD->hasFlexibleArrayMember()) 1996 return false; 1997 1998 // Check that all sub-fields are at offset 0, and are themselves "integer 1999 // like". 2000 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2001 2002 bool HadField = false; 2003 unsigned idx = 0; 2004 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2005 i != e; ++i, ++idx) { 2006 const FieldDecl *FD = *i; 2007 2008 // Bit-fields are not addressable, we only need to verify they are "integer 2009 // like". We still have to disallow a subsequent non-bitfield, for example: 2010 // struct { int : 0; int x } 2011 // is non-integer like according to gcc. 2012 if (FD->isBitField()) { 2013 if (!RD->isUnion()) 2014 HadField = true; 2015 2016 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2017 return false; 2018 2019 continue; 2020 } 2021 2022 // Check if this field is at offset 0. 2023 if (Layout.getFieldOffset(idx) != 0) 2024 return false; 2025 2026 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2027 return false; 2028 2029 // Only allow at most one field in a structure. This doesn't match the 2030 // wording above, but follows gcc in situations with a field following an 2031 // empty structure. 2032 if (!RD->isUnion()) { 2033 if (HadField) 2034 return false; 2035 2036 HadField = true; 2037 } 2038 } 2039 2040 return true; 2041} 2042 2043ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2044 if (RetTy->isVoidType()) 2045 return ABIArgInfo::getIgnore(); 2046 2047 if (!CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2048 // Treat an enum type as its underlying type. 2049 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2050 RetTy = EnumTy->getDecl()->getIntegerType(); 2051 2052 return (RetTy->isPromotableIntegerType() ? 2053 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2054 } 2055 2056 // Structures with either a non-trivial destructor or a non-trivial 2057 // copy constructor are always indirect. 2058 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2059 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2060 2061 // Are we following APCS? 2062 if (getABIKind() == APCS) { 2063 if (isEmptyRecord(getContext(), RetTy, false)) 2064 return ABIArgInfo::getIgnore(); 2065 2066 // Complex types are all returned as packed integers. 2067 // 2068 // FIXME: Consider using 2 x vector types if the back end handles them 2069 // correctly. 2070 if (RetTy->isAnyComplexType()) 2071 return ABIArgInfo::getCoerce(llvm::IntegerType::get(getVMContext(), 2072 getContext().getTypeSize(RetTy))); 2073 2074 // Integer like structures are returned in r0. 2075 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2076 // Return in the smallest viable integer type. 2077 uint64_t Size = getContext().getTypeSize(RetTy); 2078 if (Size <= 8) 2079 return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(getVMContext())); 2080 if (Size <= 16) 2081 return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(getVMContext())); 2082 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(getVMContext())); 2083 } 2084 2085 // Otherwise return in memory. 2086 return ABIArgInfo::getIndirect(0); 2087 } 2088 2089 // Otherwise this is an AAPCS variant. 2090 2091 if (isEmptyRecord(getContext(), RetTy, true)) 2092 return ABIArgInfo::getIgnore(); 2093 2094 // Aggregates <= 4 bytes are returned in r0; other aggregates 2095 // are returned indirectly. 2096 uint64_t Size = getContext().getTypeSize(RetTy); 2097 if (Size <= 32) { 2098 // Return in the smallest viable integer type. 2099 if (Size <= 8) 2100 return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(getVMContext())); 2101 if (Size <= 16) 2102 return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(getVMContext())); 2103 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(getVMContext())); 2104 } 2105 2106 return ABIArgInfo::getIndirect(0); 2107} 2108 2109llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2110 CodeGenFunction &CGF) const { 2111 // FIXME: Need to handle alignment 2112 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2113 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2114 2115 CGBuilderTy &Builder = CGF.Builder; 2116 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2117 "ap"); 2118 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2119 llvm::Type *PTy = 2120 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2121 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2122 2123 uint64_t Offset = 2124 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2125 llvm::Value *NextAddr = 2126 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2127 "ap.next"); 2128 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2129 2130 return AddrTyped; 2131} 2132 2133ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 2134 if (RetTy->isVoidType()) 2135 return ABIArgInfo::getIgnore(); 2136 2137 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2138 return ABIArgInfo::getIndirect(0); 2139 2140 // Treat an enum type as its underlying type. 2141 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2142 RetTy = EnumTy->getDecl()->getIntegerType(); 2143 2144 return (RetTy->isPromotableIntegerType() ? 2145 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2146} 2147 2148//===----------------------------------------------------------------------===// 2149// SystemZ ABI Implementation 2150//===----------------------------------------------------------------------===// 2151 2152namespace { 2153 2154class SystemZABIInfo : public ABIInfo { 2155public: 2156 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2157 2158 bool isPromotableIntegerType(QualType Ty) const; 2159 2160 ABIArgInfo classifyReturnType(QualType RetTy) const; 2161 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2162 2163 virtual void computeInfo(CGFunctionInfo &FI, 2164 const llvm::Type *const *PrefTypes, 2165 unsigned NumPrefTypes) const { 2166 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2167 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2168 it != ie; ++it) 2169 it->info = classifyArgumentType(it->type); 2170 } 2171 2172 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2173 CodeGenFunction &CGF) const; 2174}; 2175 2176class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 2177public: 2178 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 2179 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 2180}; 2181 2182} 2183 2184bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 2185 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended. 2186 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2187 switch (BT->getKind()) { 2188 case BuiltinType::Bool: 2189 case BuiltinType::Char_S: 2190 case BuiltinType::Char_U: 2191 case BuiltinType::SChar: 2192 case BuiltinType::UChar: 2193 case BuiltinType::Short: 2194 case BuiltinType::UShort: 2195 case BuiltinType::Int: 2196 case BuiltinType::UInt: 2197 return true; 2198 default: 2199 return false; 2200 } 2201 return false; 2202} 2203 2204llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2205 CodeGenFunction &CGF) const { 2206 // FIXME: Implement 2207 return 0; 2208} 2209 2210 2211ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 2212 if (RetTy->isVoidType()) 2213 return ABIArgInfo::getIgnore(); 2214 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2215 return ABIArgInfo::getIndirect(0); 2216 2217 return (isPromotableIntegerType(RetTy) ? 2218 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2219} 2220 2221ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 2222 if (CodeGenFunction::hasAggregateLLVMType(Ty)) 2223 return ABIArgInfo::getIndirect(0); 2224 2225 return (isPromotableIntegerType(Ty) ? 2226 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2227} 2228 2229//===----------------------------------------------------------------------===// 2230// MSP430 ABI Implementation 2231//===----------------------------------------------------------------------===// 2232 2233namespace { 2234 2235class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 2236public: 2237 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 2238 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2239 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2240 CodeGen::CodeGenModule &M) const; 2241}; 2242 2243} 2244 2245void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2246 llvm::GlobalValue *GV, 2247 CodeGen::CodeGenModule &M) const { 2248 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2249 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 2250 // Handle 'interrupt' attribute: 2251 llvm::Function *F = cast<llvm::Function>(GV); 2252 2253 // Step 1: Set ISR calling convention. 2254 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 2255 2256 // Step 2: Add attributes goodness. 2257 F->addFnAttr(llvm::Attribute::NoInline); 2258 2259 // Step 3: Emit ISR vector alias. 2260 unsigned Num = attr->getNumber() + 0xffe0; 2261 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2262 "vector_" + 2263 llvm::LowercaseString(llvm::utohexstr(Num)), 2264 GV, &M.getModule()); 2265 } 2266 } 2267} 2268 2269//===----------------------------------------------------------------------===// 2270// MIPS ABI Implementation. This works for both little-endian and 2271// big-endian variants. 2272//===----------------------------------------------------------------------===// 2273 2274namespace { 2275class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 2276public: 2277 MIPSTargetCodeGenInfo(CodeGenTypes &CGT) 2278 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2279 2280 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 2281 return 29; 2282 } 2283 2284 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2285 llvm::Value *Address) const; 2286}; 2287} 2288 2289bool 2290MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2291 llvm::Value *Address) const { 2292 // This information comes from gcc's implementation, which seems to 2293 // as canonical as it gets. 2294 2295 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2296 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2297 2298 // Everything on MIPS is 4 bytes. Double-precision FP registers 2299 // are aliased to pairs of single-precision FP registers. 2300 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2301 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2302 2303 // 0-31 are the general purpose registers, $0 - $31. 2304 // 32-63 are the floating-point registers, $f0 - $f31. 2305 // 64 and 65 are the multiply/divide registers, $hi and $lo. 2306 // 66 is the (notional, I think) register for signal-handler return. 2307 AssignToArrayRange(Builder, Address, Four8, 0, 65); 2308 2309 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 2310 // They are one bit wide and ignored here. 2311 2312 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 2313 // (coprocessor 1 is the FP unit) 2314 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 2315 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 2316 // 176-181 are the DSP accumulator registers. 2317 AssignToArrayRange(Builder, Address, Four8, 80, 181); 2318 2319 return false; 2320} 2321 2322 2323const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 2324 if (TheTargetCodeGenInfo) 2325 return *TheTargetCodeGenInfo; 2326 2327 // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't 2328 // free it. 2329 2330 const llvm::Triple &Triple = getContext().Target.getTriple(); 2331 switch (Triple.getArch()) { 2332 default: 2333 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 2334 2335 case llvm::Triple::mips: 2336 case llvm::Triple::mipsel: 2337 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types)); 2338 2339 case llvm::Triple::arm: 2340 case llvm::Triple::thumb: 2341 // FIXME: We want to know the float calling convention as well. 2342 if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0) 2343 return *(TheTargetCodeGenInfo = 2344 new ARMTargetCodeGenInfo(Types, ARMABIInfo::APCS)); 2345 2346 return *(TheTargetCodeGenInfo = 2347 new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS)); 2348 2349 case llvm::Triple::pic16: 2350 return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo(Types)); 2351 2352 case llvm::Triple::ppc: 2353 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 2354 2355 case llvm::Triple::systemz: 2356 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 2357 2358 case llvm::Triple::msp430: 2359 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 2360 2361 case llvm::Triple::x86: 2362 switch (Triple.getOS()) { 2363 case llvm::Triple::Darwin: 2364 return *(TheTargetCodeGenInfo = 2365 new X86_32TargetCodeGenInfo(Types, true, true)); 2366 case llvm::Triple::Cygwin: 2367 case llvm::Triple::MinGW32: 2368 case llvm::Triple::MinGW64: 2369 case llvm::Triple::AuroraUX: 2370 case llvm::Triple::DragonFly: 2371 case llvm::Triple::FreeBSD: 2372 case llvm::Triple::OpenBSD: 2373 return *(TheTargetCodeGenInfo = 2374 new X86_32TargetCodeGenInfo(Types, false, true)); 2375 2376 default: 2377 return *(TheTargetCodeGenInfo = 2378 new X86_32TargetCodeGenInfo(Types, false, false)); 2379 } 2380 2381 case llvm::Triple::x86_64: 2382 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types)); 2383 } 2384} 2385