CGCall.cpp revision 8e03444e924665d4d90f5cfc0624c815256e0309
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CodeGenFunction.h" 17#include "CodeGenModule.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/Decl.h" 21#include "clang/AST/DeclCXX.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/AST/RecordLayout.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/Attributes.h" 26#include "llvm/Support/CallSite.h" 27#include "llvm/Support/CommandLine.h" 28#include "llvm/Support/MathExtras.h" 29#include "llvm/Support/raw_ostream.h" 30#include "llvm/Target/TargetData.h" 31 32#include "ABIInfo.h" 33 34using namespace clang; 35using namespace CodeGen; 36 37/***/ 38 39// FIXME: Use iterator and sidestep silly type array creation. 40 41const 42CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) { 43 return getFunctionInfo(FTNP->getResultType(), 44 llvm::SmallVector<QualType, 16>()); 45} 46 47const 48CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) { 49 llvm::SmallVector<QualType, 16> ArgTys; 50 // FIXME: Kill copy. 51 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 52 ArgTys.push_back(FTP->getArgType(i)); 53 return getFunctionInfo(FTP->getResultType(), ArgTys); 54} 55 56const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 57 llvm::SmallVector<QualType, 16> ArgTys; 58 // Add the 'this' pointer. 59 ArgTys.push_back(MD->getThisType(Context)); 60 61 const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType(); 62 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 63 ArgTys.push_back(FTP->getArgType(i)); 64 return getFunctionInfo(FTP->getResultType(), ArgTys); 65} 66 67const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 68 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 69 if (MD->isInstance()) 70 return getFunctionInfo(MD); 71 } 72 73 const FunctionType *FTy = FD->getType()->getAsFunctionType(); 74 if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy)) 75 return getFunctionInfo(FTP); 76 return getFunctionInfo(cast<FunctionNoProtoType>(FTy)); 77} 78 79const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 80 llvm::SmallVector<QualType, 16> ArgTys; 81 ArgTys.push_back(MD->getSelfDecl()->getType()); 82 ArgTys.push_back(Context.getObjCSelType()); 83 // FIXME: Kill copy? 84 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 85 e = MD->param_end(); i != e; ++i) 86 ArgTys.push_back((*i)->getType()); 87 return getFunctionInfo(MD->getResultType(), ArgTys); 88} 89 90const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 91 const CallArgList &Args) { 92 // FIXME: Kill copy. 93 llvm::SmallVector<QualType, 16> ArgTys; 94 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 95 i != e; ++i) 96 ArgTys.push_back(i->second); 97 return getFunctionInfo(ResTy, ArgTys); 98} 99 100const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 101 const FunctionArgList &Args) { 102 // FIXME: Kill copy. 103 llvm::SmallVector<QualType, 16> ArgTys; 104 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 105 i != e; ++i) 106 ArgTys.push_back(i->second); 107 return getFunctionInfo(ResTy, ArgTys); 108} 109 110const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 111 const llvm::SmallVector<QualType, 16> &ArgTys) { 112 // Lookup or create unique function info. 113 llvm::FoldingSetNodeID ID; 114 CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end()); 115 116 void *InsertPos = 0; 117 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 118 if (FI) 119 return *FI; 120 121 // Construct the function info. 122 FI = new CGFunctionInfo(ResTy, ArgTys); 123 FunctionInfos.InsertNode(FI, InsertPos); 124 125 // Compute ABI information. 126 getABIInfo().computeInfo(*FI, getContext()); 127 128 return *FI; 129} 130 131/***/ 132 133ABIInfo::~ABIInfo() {} 134 135void ABIArgInfo::dump() const { 136 fprintf(stderr, "(ABIArgInfo Kind="); 137 switch (TheKind) { 138 case Direct: 139 fprintf(stderr, "Direct"); 140 break; 141 case Ignore: 142 fprintf(stderr, "Ignore"); 143 break; 144 case Coerce: 145 fprintf(stderr, "Coerce Type="); 146 getCoerceToType()->print(llvm::errs()); 147 break; 148 case Indirect: 149 fprintf(stderr, "Indirect Align=%d", getIndirectAlign()); 150 break; 151 case Expand: 152 fprintf(stderr, "Expand"); 153 break; 154 } 155 fprintf(stderr, ")\n"); 156} 157 158/***/ 159 160/// isEmptyRecord - Return true iff a structure has no non-empty 161/// members. Note that a structure with a flexible array member is not 162/// considered empty. 163static bool isEmptyRecord(ASTContext &Context, QualType T) { 164 const RecordType *RT = T->getAsRecordType(); 165 if (!RT) 166 return 0; 167 const RecordDecl *RD = RT->getDecl(); 168 if (RD->hasFlexibleArrayMember()) 169 return false; 170 for (RecordDecl::field_iterator i = RD->field_begin(Context), 171 e = RD->field_end(Context); i != e; ++i) { 172 const FieldDecl *FD = *i; 173 if (!isEmptyRecord(Context, FD->getType())) 174 return false; 175 } 176 return true; 177} 178 179/// isSingleElementStruct - Determine if a structure is a "single 180/// element struct", i.e. it has exactly one non-empty field or 181/// exactly one field which is itself a single element 182/// struct. Structures with flexible array members are never 183/// considered single element structs. 184/// 185/// \return The field declaration for the single non-empty field, if 186/// it exists. 187static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 188 const RecordType *RT = T->getAsStructureType(); 189 if (!RT) 190 return 0; 191 192 const RecordDecl *RD = RT->getDecl(); 193 if (RD->hasFlexibleArrayMember()) 194 return 0; 195 196 const Type *Found = 0; 197 for (RecordDecl::field_iterator i = RD->field_begin(Context), 198 e = RD->field_end(Context); i != e; ++i) { 199 const FieldDecl *FD = *i; 200 QualType FT = FD->getType(); 201 202 // Treat single element arrays as the element 203 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 204 if (AT->getSize().getZExtValue() == 1) 205 FT = AT->getElementType(); 206 207 if (isEmptyRecord(Context, FT)) { 208 // Ignore 209 } else if (Found) { 210 return 0; 211 } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) { 212 Found = FT.getTypePtr(); 213 } else { 214 Found = isSingleElementStruct(FT, Context); 215 if (!Found) 216 return 0; 217 } 218 } 219 220 return Found; 221} 222 223static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 224 if (!Ty->getAsBuiltinType() && !Ty->isPointerType()) 225 return false; 226 227 uint64_t Size = Context.getTypeSize(Ty); 228 return Size == 32 || Size == 64; 229} 230 231static bool areAllFields32Or64BitBasicType(const RecordDecl *RD, 232 ASTContext &Context) { 233 for (RecordDecl::field_iterator i = RD->field_begin(Context), 234 e = RD->field_end(Context); i != e; ++i) { 235 const FieldDecl *FD = *i; 236 237 if (!is32Or64BitBasicType(FD->getType(), Context)) 238 return false; 239 240 // FIXME: Reject bit-fields wholesale; there are two problems, we 241 // don't know how to expand them yet, and the predicate for 242 // telling if a bitfield still counts as "basic" is more 243 // complicated than what we were doing previously. 244 if (FD->isBitField()) 245 return false; 246 } 247 248 return true; 249} 250 251namespace { 252/// DefaultABIInfo - The default implementation for ABI specific 253/// details. This implementation provides information which results in 254/// self-consistent and sensible LLVM IR generation, but does not 255/// conform to any particular ABI. 256class DefaultABIInfo : public ABIInfo { 257 ABIArgInfo classifyReturnType(QualType RetTy, 258 ASTContext &Context) const; 259 260 ABIArgInfo classifyArgumentType(QualType RetTy, 261 ASTContext &Context) const; 262 263 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 264 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 265 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 266 it != ie; ++it) 267 it->info = classifyArgumentType(it->type, Context); 268 } 269 270 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 271 CodeGenFunction &CGF) const; 272}; 273 274/// X86_32ABIInfo - The X86-32 ABI information. 275class X86_32ABIInfo : public ABIInfo { 276 ASTContext &Context; 277 bool IsDarwin; 278 279 static bool isRegisterSize(unsigned Size) { 280 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 281 } 282 283 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 284 285public: 286 ABIArgInfo classifyReturnType(QualType RetTy, 287 ASTContext &Context) const; 288 289 ABIArgInfo classifyArgumentType(QualType RetTy, 290 ASTContext &Context) const; 291 292 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 293 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 294 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 295 it != ie; ++it) 296 it->info = classifyArgumentType(it->type, Context); 297 } 298 299 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 300 CodeGenFunction &CGF) const; 301 302 X86_32ABIInfo(ASTContext &Context, bool d) 303 : ABIInfo(), Context(Context), IsDarwin(d) {} 304}; 305} 306 307 308/// shouldReturnTypeInRegister - Determine if the given type should be 309/// passed in a register (for the Darwin ABI). 310bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 311 ASTContext &Context) { 312 uint64_t Size = Context.getTypeSize(Ty); 313 314 // Type must be register sized. 315 if (!isRegisterSize(Size)) 316 return false; 317 318 if (Ty->isVectorType()) { 319 // 64- and 128- bit vectors inside structures are not returned in 320 // registers. 321 if (Size == 64 || Size == 128) 322 return false; 323 324 return true; 325 } 326 327 // If this is a builtin, pointer, or complex type, it is ok. 328 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType()) 329 return true; 330 331 // Arrays are treated like records. 332 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 333 return shouldReturnTypeInRegister(AT->getElementType(), Context); 334 335 // Otherwise, it must be a record type. 336 const RecordType *RT = Ty->getAsRecordType(); 337 if (!RT) return false; 338 339 // Structure types are passed in register if all fields would be 340 // passed in a register. 341 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context), 342 e = RT->getDecl()->field_end(Context); i != e; ++i) { 343 const FieldDecl *FD = *i; 344 345 // FIXME: Reject bit-fields wholesale for now; this is incorrect. 346 if (FD->isBitField()) 347 return false; 348 349 // Empty structures are ignored. 350 if (isEmptyRecord(Context, FD->getType())) 351 continue; 352 353 // Check fields recursively. 354 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 355 return false; 356 } 357 358 return true; 359} 360 361ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 362 ASTContext &Context) const { 363 if (RetTy->isVoidType()) { 364 return ABIArgInfo::getIgnore(); 365 } else if (const VectorType *VT = RetTy->getAsVectorType()) { 366 // On Darwin, some vectors are returned in registers. 367 if (IsDarwin) { 368 uint64_t Size = Context.getTypeSize(RetTy); 369 370 // 128-bit vectors are a special case; they are returned in 371 // registers and we need to make sure to pick a type the LLVM 372 // backend will like. 373 if (Size == 128) 374 return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty, 375 2)); 376 377 // Always return in register if it fits in a general purpose 378 // register, or if it is 64 bits and has a single element. 379 if ((Size == 8 || Size == 16 || Size == 32) || 380 (Size == 64 && VT->getNumElements() == 1)) 381 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 382 383 return ABIArgInfo::getIndirect(0); 384 } 385 386 return ABIArgInfo::getDirect(); 387 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 388 // Structures with flexible arrays are always indirect. 389 if (const RecordType *RT = RetTy->getAsStructureType()) 390 if (RT->getDecl()->hasFlexibleArrayMember()) 391 return ABIArgInfo::getIndirect(0); 392 393 // Outside of Darwin, structs and unions are always indirect. 394 if (!IsDarwin && !RetTy->isAnyComplexType()) 395 return ABIArgInfo::getIndirect(0); 396 397 // Classify "single element" structs as their element type. 398 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) { 399 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) { 400 // FIXME: This is gross, it would be nice if we could just 401 // pass back SeltTy and have clients deal with it. Is it worth 402 // supporting coerce to both LLVM and clang Types? 403 if (BT->isIntegerType()) { 404 uint64_t Size = Context.getTypeSize(SeltTy); 405 return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size)); 406 } else if (BT->getKind() == BuiltinType::Float) { 407 return ABIArgInfo::getCoerce(llvm::Type::FloatTy); 408 } else if (BT->getKind() == BuiltinType::Double) { 409 return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); 410 } 411 } else if (SeltTy->isPointerType()) { 412 // FIXME: It would be really nice if this could come out as 413 // the proper pointer type. 414 llvm::Type *PtrTy = 415 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 416 return ABIArgInfo::getCoerce(PtrTy); 417 } else if (SeltTy->isVectorType()) { 418 // 64- and 128-bit vectors are never returned in a 419 // register when inside a structure. 420 uint64_t Size = Context.getTypeSize(RetTy); 421 if (Size == 64 || Size == 128) 422 return ABIArgInfo::getIndirect(0); 423 424 return classifyReturnType(QualType(SeltTy, 0), Context); 425 } 426 } 427 428 uint64_t Size = Context.getTypeSize(RetTy); 429 if (isRegisterSize(Size)) { 430 // Always return in register for unions for now. 431 // FIXME: This is wrong, but better than treating as a 432 // structure. 433 if (RetTy->isUnionType()) 434 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 435 436 // Small structures which are register sized are generally returned 437 // in a register. 438 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) 439 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 440 } 441 442 return ABIArgInfo::getIndirect(0); 443 } else { 444 return ABIArgInfo::getDirect(); 445 } 446} 447 448ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 449 ASTContext &Context) const { 450 // FIXME: Set alignment on indirect arguments. 451 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 452 // Structures with flexible arrays are always indirect. 453 if (const RecordType *RT = Ty->getAsStructureType()) 454 if (RT->getDecl()->hasFlexibleArrayMember()) 455 return ABIArgInfo::getIndirect(0); 456 457 // Ignore empty structs. 458 uint64_t Size = Context.getTypeSize(Ty); 459 if (Ty->isStructureType() && Size == 0) 460 return ABIArgInfo::getIgnore(); 461 462 // Expand structs with size <= 128-bits which consist only of 463 // basic types (int, long long, float, double, xxx*). This is 464 // non-recursive and does not ignore empty fields. 465 if (const RecordType *RT = Ty->getAsStructureType()) { 466 if (Context.getTypeSize(Ty) <= 4*32 && 467 areAllFields32Or64BitBasicType(RT->getDecl(), Context)) 468 return ABIArgInfo::getExpand(); 469 } 470 471 return ABIArgInfo::getIndirect(0); 472 } else { 473 return ABIArgInfo::getDirect(); 474 } 475} 476 477llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 478 CodeGenFunction &CGF) const { 479 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 480 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 481 482 CGBuilderTy &Builder = CGF.Builder; 483 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 484 "ap"); 485 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 486 llvm::Type *PTy = 487 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 488 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 489 490 uint64_t Offset = 491 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 492 llvm::Value *NextAddr = 493 Builder.CreateGEP(Addr, 494 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), 495 "ap.next"); 496 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 497 498 return AddrTyped; 499} 500 501namespace { 502/// X86_64ABIInfo - The X86_64 ABI information. 503class X86_64ABIInfo : public ABIInfo { 504 enum Class { 505 Integer = 0, 506 SSE, 507 SSEUp, 508 X87, 509 X87Up, 510 ComplexX87, 511 NoClass, 512 Memory 513 }; 514 515 /// merge - Implement the X86_64 ABI merging algorithm. 516 /// 517 /// Merge an accumulating classification \arg Accum with a field 518 /// classification \arg Field. 519 /// 520 /// \param Accum - The accumulating classification. This should 521 /// always be either NoClass or the result of a previous merge 522 /// call. In addition, this should never be Memory (the caller 523 /// should just return Memory for the aggregate). 524 Class merge(Class Accum, Class Field) const; 525 526 /// classify - Determine the x86_64 register classes in which the 527 /// given type T should be passed. 528 /// 529 /// \param Lo - The classification for the parts of the type 530 /// residing in the low word of the containing object. 531 /// 532 /// \param Hi - The classification for the parts of the type 533 /// residing in the high word of the containing object. 534 /// 535 /// \param OffsetBase - The bit offset of this type in the 536 /// containing object. Some parameters are classified different 537 /// depending on whether they straddle an eightbyte boundary. 538 /// 539 /// If a word is unused its result will be NoClass; if a type should 540 /// be passed in Memory then at least the classification of \arg Lo 541 /// will be Memory. 542 /// 543 /// The \arg Lo class will be NoClass iff the argument is ignored. 544 /// 545 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 546 /// also be ComplexX87. 547 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase, 548 Class &Lo, Class &Hi) const; 549 550 /// getCoerceResult - Given a source type \arg Ty and an LLVM type 551 /// to coerce to, chose the best way to pass Ty in the same place 552 /// that \arg CoerceTo would be passed, but while keeping the 553 /// emitted code as simple as possible. 554 /// 555 /// FIXME: Note, this should be cleaned up to just take an 556 /// enumeration of all the ways we might want to pass things, 557 /// instead of constructing an LLVM type. This makes this code more 558 /// explicit, and it makes it clearer that we are also doing this 559 /// for correctness in the case of passing scalar types. 560 ABIArgInfo getCoerceResult(QualType Ty, 561 const llvm::Type *CoerceTo, 562 ASTContext &Context) const; 563 564 ABIArgInfo classifyReturnType(QualType RetTy, 565 ASTContext &Context) const; 566 567 ABIArgInfo classifyArgumentType(QualType Ty, 568 ASTContext &Context, 569 unsigned &neededInt, 570 unsigned &neededSSE) const; 571 572public: 573 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; 574 575 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 576 CodeGenFunction &CGF) const; 577}; 578} 579 580X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, 581 Class Field) const { 582 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 583 // classified recursively so that always two fields are 584 // considered. The resulting class is calculated according to 585 // the classes of the fields in the eightbyte: 586 // 587 // (a) If both classes are equal, this is the resulting class. 588 // 589 // (b) If one of the classes is NO_CLASS, the resulting class is 590 // the other class. 591 // 592 // (c) If one of the classes is MEMORY, the result is the MEMORY 593 // class. 594 // 595 // (d) If one of the classes is INTEGER, the result is the 596 // INTEGER. 597 // 598 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 599 // MEMORY is used as class. 600 // 601 // (f) Otherwise class SSE is used. 602 603 // Accum should never be memory (we should have returned) or 604 // ComplexX87 (because this cannot be passed in a structure). 605 assert((Accum != Memory && Accum != ComplexX87) && 606 "Invalid accumulated classification during merge."); 607 if (Accum == Field || Field == NoClass) 608 return Accum; 609 else if (Field == Memory) 610 return Memory; 611 else if (Accum == NoClass) 612 return Field; 613 else if (Accum == Integer || Field == Integer) 614 return Integer; 615 else if (Field == X87 || Field == X87Up || Field == ComplexX87) 616 return Memory; 617 else 618 return SSE; 619} 620 621void X86_64ABIInfo::classify(QualType Ty, 622 ASTContext &Context, 623 uint64_t OffsetBase, 624 Class &Lo, Class &Hi) const { 625 // FIXME: This code can be simplified by introducing a simple value 626 // class for Class pairs with appropriate constructor methods for 627 // the various situations. 628 629 // FIXME: Some of the split computations are wrong; unaligned 630 // vectors shouldn't be passed in registers for example, so there is 631 // no chance they can straddle an eightbyte. Verify & simplify. 632 633 Lo = Hi = NoClass; 634 635 Class &Current = OffsetBase < 64 ? Lo : Hi; 636 Current = Memory; 637 638 if (const BuiltinType *BT = Ty->getAsBuiltinType()) { 639 BuiltinType::Kind k = BT->getKind(); 640 641 if (k == BuiltinType::Void) { 642 Current = NoClass; 643 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 644 Current = Integer; 645 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 646 Current = SSE; 647 } else if (k == BuiltinType::LongDouble) { 648 Lo = X87; 649 Hi = X87Up; 650 } 651 // FIXME: _Decimal32 and _Decimal64 are SSE. 652 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 653 // FIXME: __int128 is (Integer, Integer). 654 } else if (const EnumType *ET = Ty->getAsEnumType()) { 655 // Classify the underlying integer type. 656 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi); 657 } else if (Ty->hasPointerRepresentation()) { 658 Current = Integer; 659 } else if (const VectorType *VT = Ty->getAsVectorType()) { 660 uint64_t Size = Context.getTypeSize(VT); 661 if (Size == 32) { 662 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 663 // float> as integer. 664 Current = Integer; 665 666 // If this type crosses an eightbyte boundary, it should be 667 // split. 668 uint64_t EB_Real = (OffsetBase) / 64; 669 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 670 if (EB_Real != EB_Imag) 671 Hi = Lo; 672 } else if (Size == 64) { 673 // gcc passes <1 x double> in memory. :( 674 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 675 return; 676 677 // gcc passes <1 x long long> as INTEGER. 678 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong)) 679 Current = Integer; 680 else 681 Current = SSE; 682 683 // If this type crosses an eightbyte boundary, it should be 684 // split. 685 if (OffsetBase && OffsetBase != 64) 686 Hi = Lo; 687 } else if (Size == 128) { 688 Lo = SSE; 689 Hi = SSEUp; 690 } 691 } else if (const ComplexType *CT = Ty->getAsComplexType()) { 692 QualType ET = Context.getCanonicalType(CT->getElementType()); 693 694 uint64_t Size = Context.getTypeSize(Ty); 695 if (ET->isIntegralType()) { 696 if (Size <= 64) 697 Current = Integer; 698 else if (Size <= 128) 699 Lo = Hi = Integer; 700 } else if (ET == Context.FloatTy) 701 Current = SSE; 702 else if (ET == Context.DoubleTy) 703 Lo = Hi = SSE; 704 else if (ET == Context.LongDoubleTy) 705 Current = ComplexX87; 706 707 // If this complex type crosses an eightbyte boundary then it 708 // should be split. 709 uint64_t EB_Real = (OffsetBase) / 64; 710 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64; 711 if (Hi == NoClass && EB_Real != EB_Imag) 712 Hi = Lo; 713 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 714 // Arrays are treated like structures. 715 716 uint64_t Size = Context.getTypeSize(Ty); 717 718 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 719 // than two eightbytes, ..., it has class MEMORY. 720 if (Size > 128) 721 return; 722 723 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 724 // fields, it has class MEMORY. 725 // 726 // Only need to check alignment of array base. 727 if (OffsetBase % Context.getTypeAlign(AT->getElementType())) 728 return; 729 730 // Otherwise implement simplified merge. We could be smarter about 731 // this, but it isn't worth it and would be harder to verify. 732 Current = NoClass; 733 uint64_t EltSize = Context.getTypeSize(AT->getElementType()); 734 uint64_t ArraySize = AT->getSize().getZExtValue(); 735 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 736 Class FieldLo, FieldHi; 737 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi); 738 Lo = merge(Lo, FieldLo); 739 Hi = merge(Hi, FieldHi); 740 if (Lo == Memory || Hi == Memory) 741 break; 742 } 743 744 // Do post merger cleanup (see below). Only case we worry about is Memory. 745 if (Hi == Memory) 746 Lo = Memory; 747 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 748 } else if (const RecordType *RT = Ty->getAsRecordType()) { 749 uint64_t Size = Context.getTypeSize(Ty); 750 751 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 752 // than two eightbytes, ..., it has class MEMORY. 753 if (Size > 128) 754 return; 755 756 const RecordDecl *RD = RT->getDecl(); 757 758 // Assume variable sized types are passed in memory. 759 if (RD->hasFlexibleArrayMember()) 760 return; 761 762 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 763 764 // Reset Lo class, this will be recomputed. 765 Current = NoClass; 766 unsigned idx = 0; 767 for (RecordDecl::field_iterator i = RD->field_begin(Context), 768 e = RD->field_end(Context); i != e; ++i, ++idx) { 769 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 770 bool BitField = i->isBitField(); 771 772 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 773 // fields, it has class MEMORY. 774 // 775 // Note, skip this test for bit-fields, see below. 776 if (!BitField && Offset % Context.getTypeAlign(i->getType())) { 777 Lo = Memory; 778 return; 779 } 780 781 // Classify this field. 782 // 783 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 784 // exceeds a single eightbyte, each is classified 785 // separately. Each eightbyte gets initialized to class 786 // NO_CLASS. 787 Class FieldLo, FieldHi; 788 789 // Bit-fields require special handling, they do not force the 790 // structure to be passed in memory even if unaligned, and 791 // therefore they can straddle an eightbyte. 792 if (BitField) { 793 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 794 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue(); 795 796 uint64_t EB_Lo = Offset / 64; 797 uint64_t EB_Hi = (Offset + Size - 1) / 64; 798 FieldLo = FieldHi = NoClass; 799 if (EB_Lo) { 800 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 801 FieldLo = NoClass; 802 FieldHi = Integer; 803 } else { 804 FieldLo = Integer; 805 FieldHi = EB_Hi ? Integer : NoClass; 806 } 807 } else 808 classify(i->getType(), Context, Offset, FieldLo, FieldHi); 809 Lo = merge(Lo, FieldLo); 810 Hi = merge(Hi, FieldHi); 811 if (Lo == Memory || Hi == Memory) 812 break; 813 } 814 815 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 816 // 817 // (a) If one of the classes is MEMORY, the whole argument is 818 // passed in memory. 819 // 820 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. 821 822 // The first of these conditions is guaranteed by how we implement 823 // the merge (just bail). 824 // 825 // The second condition occurs in the case of unions; for example 826 // union { _Complex double; unsigned; }. 827 if (Hi == Memory) 828 Lo = Memory; 829 if (Hi == SSEUp && Lo != SSE) 830 Hi = SSE; 831 } 832} 833 834ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, 835 const llvm::Type *CoerceTo, 836 ASTContext &Context) const { 837 if (CoerceTo == llvm::Type::Int64Ty) { 838 // Integer and pointer types will end up in a general purpose 839 // register. 840 if (Ty->isIntegralType() || Ty->isPointerType()) 841 return ABIArgInfo::getDirect(); 842 843 } else if (CoerceTo == llvm::Type::DoubleTy) { 844 // FIXME: It would probably be better to make CGFunctionInfo only 845 // map using canonical types than to canonize here. 846 QualType CTy = Context.getCanonicalType(Ty); 847 848 // Float and double end up in a single SSE reg. 849 if (CTy == Context.FloatTy || CTy == Context.DoubleTy) 850 return ABIArgInfo::getDirect(); 851 852 } 853 854 return ABIArgInfo::getCoerce(CoerceTo); 855} 856 857ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, 858 ASTContext &Context) const { 859 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 860 // classification algorithm. 861 X86_64ABIInfo::Class Lo, Hi; 862 classify(RetTy, Context, 0, Lo, Hi); 863 864 // Check some invariants. 865 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 866 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 867 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 868 869 const llvm::Type *ResType = 0; 870 switch (Lo) { 871 case NoClass: 872 return ABIArgInfo::getIgnore(); 873 874 case SSEUp: 875 case X87Up: 876 assert(0 && "Invalid classification for lo word."); 877 878 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 879 // hidden argument. 880 case Memory: 881 return ABIArgInfo::getIndirect(0); 882 883 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 884 // available register of the sequence %rax, %rdx is used. 885 case Integer: 886 ResType = llvm::Type::Int64Ty; break; 887 888 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 889 // available SSE register of the sequence %xmm0, %xmm1 is used. 890 case SSE: 891 ResType = llvm::Type::DoubleTy; break; 892 893 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 894 // returned on the X87 stack in %st0 as 80-bit x87 number. 895 case X87: 896 ResType = llvm::Type::X86_FP80Ty; break; 897 898 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 899 // part of the value is returned in %st0 and the imaginary part in 900 // %st1. 901 case ComplexX87: 902 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 903 ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty, 904 llvm::Type::X86_FP80Ty, 905 NULL); 906 break; 907 } 908 909 switch (Hi) { 910 // Memory was handled previously and X87 should 911 // never occur as a hi class. 912 case Memory: 913 case X87: 914 assert(0 && "Invalid classification for hi word."); 915 916 case ComplexX87: // Previously handled. 917 case NoClass: break; 918 919 case Integer: 920 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 921 break; 922 case SSE: 923 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 924 break; 925 926 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 927 // is passed in the upper half of the last used SSE register. 928 // 929 // SSEUP should always be preceeded by SSE, just widen. 930 case SSEUp: 931 assert(Lo == SSE && "Unexpected SSEUp classification."); 932 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 933 break; 934 935 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 936 // returned together with the previous X87 value in %st0. 937 case X87Up: 938 // If X87Up is preceeded by X87, we don't need to do 939 // anything. However, in some cases with unions it may not be 940 // preceeded by X87. In such situations we follow gcc and pass the 941 // extra bits in an SSE reg. 942 if (Lo != X87) 943 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 944 break; 945 } 946 947 return getCoerceResult(RetTy, ResType, Context); 948} 949 950ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, 951 unsigned &neededInt, 952 unsigned &neededSSE) const { 953 X86_64ABIInfo::Class Lo, Hi; 954 classify(Ty, Context, 0, Lo, Hi); 955 956 // Check some invariants. 957 // FIXME: Enforce these by construction. 958 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 959 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 960 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 961 962 neededInt = 0; 963 neededSSE = 0; 964 const llvm::Type *ResType = 0; 965 switch (Lo) { 966 case NoClass: 967 return ABIArgInfo::getIgnore(); 968 969 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 970 // on the stack. 971 case Memory: 972 973 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 974 // COMPLEX_X87, it is passed in memory. 975 case X87: 976 case ComplexX87: 977 return ABIArgInfo::getIndirect(0); 978 979 case SSEUp: 980 case X87Up: 981 assert(0 && "Invalid classification for lo word."); 982 983 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 984 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 985 // and %r9 is used. 986 case Integer: 987 ++neededInt; 988 ResType = llvm::Type::Int64Ty; 989 break; 990 991 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 992 // available SSE register is used, the registers are taken in the 993 // order from %xmm0 to %xmm7. 994 case SSE: 995 ++neededSSE; 996 ResType = llvm::Type::DoubleTy; 997 break; 998 } 999 1000 switch (Hi) { 1001 // Memory was handled previously, ComplexX87 and X87 should 1002 // never occur as hi classes, and X87Up must be preceed by X87, 1003 // which is passed in memory. 1004 case Memory: 1005 case X87: 1006 case ComplexX87: 1007 assert(0 && "Invalid classification for hi word."); 1008 break; 1009 1010 case NoClass: break; 1011 case Integer: 1012 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 1013 ++neededInt; 1014 break; 1015 1016 // X87Up generally doesn't occur here (long double is passed in 1017 // memory), except in situations involving unions. 1018 case X87Up: 1019 case SSE: 1020 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 1021 ++neededSSE; 1022 break; 1023 1024 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1025 // eightbyte is passed in the upper half of the last used SSE 1026 // register. 1027 case SSEUp: 1028 assert(Lo == SSE && "Unexpected SSEUp classification."); 1029 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 1030 break; 1031 } 1032 1033 return getCoerceResult(Ty, ResType, Context); 1034} 1035 1036void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1037 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1038 1039 // Keep track of the number of assigned registers. 1040 unsigned freeIntRegs = 6, freeSSERegs = 8; 1041 1042 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1043 // get assigned (in left-to-right order) for passing as follows... 1044 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1045 it != ie; ++it) { 1046 unsigned neededInt, neededSSE; 1047 it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE); 1048 1049 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1050 // eightbyte of an argument, the whole argument is passed on the 1051 // stack. If registers have already been assigned for some 1052 // eightbytes of such an argument, the assignments get reverted. 1053 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1054 freeIntRegs -= neededInt; 1055 freeSSERegs -= neededSSE; 1056 } else { 1057 it->info = ABIArgInfo::getIndirect(0); 1058 } 1059 } 1060} 1061 1062static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1063 QualType Ty, 1064 CodeGenFunction &CGF) { 1065 llvm::Value *overflow_arg_area_p = 1066 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1067 llvm::Value *overflow_arg_area = 1068 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1069 1070 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1071 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1072 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1073 if (Align > 8) { 1074 // Note that we follow the ABI & gcc here, even though the type 1075 // could in theory have an alignment greater than 16. This case 1076 // shouldn't ever matter in practice. 1077 1078 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1079 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15); 1080 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1081 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1082 llvm::Type::Int64Ty); 1083 llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL); 1084 overflow_arg_area = 1085 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1086 overflow_arg_area->getType(), 1087 "overflow_arg_area.align"); 1088 } 1089 1090 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1091 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1092 llvm::Value *Res = 1093 CGF.Builder.CreateBitCast(overflow_arg_area, 1094 llvm::PointerType::getUnqual(LTy)); 1095 1096 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1097 // l->overflow_arg_area + sizeof(type). 1098 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1099 // an 8 byte boundary. 1100 1101 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1102 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1103 (SizeInBytes + 7) & ~7); 1104 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1105 "overflow_arg_area.next"); 1106 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1107 1108 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1109 return Res; 1110} 1111 1112llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1113 CodeGenFunction &CGF) const { 1114 // Assume that va_list type is correct; should be pointer to LLVM type: 1115 // struct { 1116 // i32 gp_offset; 1117 // i32 fp_offset; 1118 // i8* overflow_arg_area; 1119 // i8* reg_save_area; 1120 // }; 1121 unsigned neededInt, neededSSE; 1122 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), 1123 neededInt, neededSSE); 1124 1125 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 1126 // in the registers. If not go to step 7. 1127 if (!neededInt && !neededSSE) 1128 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1129 1130 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 1131 // general purpose registers needed to pass type and num_fp to hold 1132 // the number of floating point registers needed. 1133 1134 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 1135 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 1136 // l->fp_offset > 304 - num_fp * 16 go to step 7. 1137 // 1138 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 1139 // register save space). 1140 1141 llvm::Value *InRegs = 0; 1142 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 1143 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 1144 if (neededInt) { 1145 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 1146 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 1147 InRegs = 1148 CGF.Builder.CreateICmpULE(gp_offset, 1149 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1150 48 - neededInt * 8), 1151 "fits_in_gp"); 1152 } 1153 1154 if (neededSSE) { 1155 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 1156 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 1157 llvm::Value *FitsInFP = 1158 CGF.Builder.CreateICmpULE(fp_offset, 1159 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1160 176 - neededSSE * 16), 1161 "fits_in_fp"); 1162 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 1163 } 1164 1165 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 1166 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 1167 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 1168 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 1169 1170 // Emit code to load the value if it was passed in registers. 1171 1172 CGF.EmitBlock(InRegBlock); 1173 1174 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 1175 // an offset of l->gp_offset and/or l->fp_offset. This may require 1176 // copying to a temporary location in case the parameter is passed 1177 // in different register classes or requires an alignment greater 1178 // than 8 for general purpose registers and 16 for XMM registers. 1179 // 1180 // FIXME: This really results in shameful code when we end up 1181 // needing to collect arguments from different places; often what 1182 // should result in a simple assembling of a structure from 1183 // scattered addresses has many more loads than necessary. Can we 1184 // clean this up? 1185 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1186 llvm::Value *RegAddr = 1187 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 1188 "reg_save_area"); 1189 if (neededInt && neededSSE) { 1190 // FIXME: Cleanup. 1191 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs"); 1192 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 1193 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 1194 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 1195 const llvm::Type *TyLo = ST->getElementType(0); 1196 const llvm::Type *TyHi = ST->getElementType(1); 1197 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) && 1198 "Unexpected ABI info for mixed regs"); 1199 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 1200 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 1201 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1202 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1203 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr; 1204 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr; 1205 llvm::Value *V = 1206 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 1207 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1208 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 1209 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1210 1211 RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); 1212 } else if (neededInt) { 1213 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1214 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1215 llvm::PointerType::getUnqual(LTy)); 1216 } else { 1217 if (neededSSE == 1) { 1218 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1219 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1220 llvm::PointerType::getUnqual(LTy)); 1221 } else { 1222 assert(neededSSE == 2 && "Invalid number of needed registers!"); 1223 // SSE registers are spaced 16 bytes apart in the register save 1224 // area, we need to collect the two eightbytes together. 1225 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1226 llvm::Value *RegAddrHi = 1227 CGF.Builder.CreateGEP(RegAddrLo, 1228 llvm::ConstantInt::get(llvm::Type::Int32Ty, 16)); 1229 const llvm::Type *DblPtrTy = 1230 llvm::PointerType::getUnqual(llvm::Type::DoubleTy); 1231 const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy, 1232 llvm::Type::DoubleTy, 1233 NULL); 1234 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 1235 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 1236 DblPtrTy)); 1237 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1238 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 1239 DblPtrTy)); 1240 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1241 RegAddr = CGF.Builder.CreateBitCast(Tmp, 1242 llvm::PointerType::getUnqual(LTy)); 1243 } 1244 } 1245 1246 // AMD64-ABI 3.5.7p5: Step 5. Set: 1247 // l->gp_offset = l->gp_offset + num_gp * 8 1248 // l->fp_offset = l->fp_offset + num_fp * 16. 1249 if (neededInt) { 1250 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1251 neededInt * 8); 1252 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 1253 gp_offset_p); 1254 } 1255 if (neededSSE) { 1256 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1257 neededSSE * 16); 1258 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 1259 fp_offset_p); 1260 } 1261 CGF.EmitBranch(ContBlock); 1262 1263 // Emit code to load the value if it was passed in memory. 1264 1265 CGF.EmitBlock(InMemBlock); 1266 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1267 1268 // Return the appropriate result. 1269 1270 CGF.EmitBlock(ContBlock); 1271 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 1272 "vaarg.addr"); 1273 ResAddr->reserveOperandSpace(2); 1274 ResAddr->addIncoming(RegAddr, InRegBlock); 1275 ResAddr->addIncoming(MemAddr, InMemBlock); 1276 1277 return ResAddr; 1278} 1279 1280// ABI Info for PIC16 1281class PIC16ABIInfo : public ABIInfo { 1282 ABIArgInfo classifyReturnType(QualType RetTy, 1283 ASTContext &Context) const; 1284 1285 ABIArgInfo classifyArgumentType(QualType RetTy, 1286 ASTContext &Context) const; 1287 1288 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1289 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1290 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1291 it != ie; ++it) 1292 it->info = classifyArgumentType(it->type, Context); 1293 } 1294 1295 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1296 CodeGenFunction &CGF) const; 1297 1298}; 1299 1300ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy, 1301 ASTContext &Context) const { 1302 if (RetTy->isVoidType()) { 1303 return ABIArgInfo::getIgnore(); 1304 } else { 1305 return ABIArgInfo::getDirect(); 1306 } 1307} 1308 1309ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty, 1310 ASTContext &Context) const { 1311 return ABIArgInfo::getDirect(); 1312} 1313 1314llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1315 CodeGenFunction &CGF) const { 1316 return 0; 1317} 1318 1319class ARMABIInfo : public ABIInfo { 1320 ABIArgInfo classifyReturnType(QualType RetTy, 1321 ASTContext &Context) const; 1322 1323 ABIArgInfo classifyArgumentType(QualType RetTy, 1324 ASTContext &Context) const; 1325 1326 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; 1327 1328 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1329 CodeGenFunction &CGF) const; 1330}; 1331 1332void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1333 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1334 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1335 it != ie; ++it) { 1336 it->info = classifyArgumentType(it->type, Context); 1337 } 1338} 1339 1340ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 1341 ASTContext &Context) const { 1342 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1343 return ABIArgInfo::getDirect(); 1344 } 1345 // FIXME: This is kind of nasty... but there isn't much choice 1346 // because the ARM backend doesn't support byval. 1347 // FIXME: This doesn't handle alignment > 64 bits. 1348 const llvm::Type* ElemTy; 1349 unsigned SizeRegs; 1350 if (Context.getTypeAlign(Ty) > 32) { 1351 ElemTy = llvm::Type::Int64Ty; 1352 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64; 1353 } else { 1354 ElemTy = llvm::Type::Int32Ty; 1355 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32; 1356 } 1357 std::vector<const llvm::Type*> LLVMFields; 1358 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); 1359 const llvm::Type* STy = llvm::StructType::get(LLVMFields, true); 1360 return ABIArgInfo::getCoerce(STy); 1361} 1362 1363ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 1364 ASTContext &Context) const { 1365 if (RetTy->isVoidType()) { 1366 return ABIArgInfo::getIgnore(); 1367 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1368 // Aggregates <= 4 bytes are returned in r0; other aggregates 1369 // are returned indirectly. 1370 uint64_t Size = Context.getTypeSize(RetTy); 1371 if (Size <= 32) 1372 return ABIArgInfo::getCoerce(llvm::Type::Int32Ty); 1373 return ABIArgInfo::getIndirect(0); 1374 } else { 1375 return ABIArgInfo::getDirect(); 1376 } 1377} 1378 1379llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1380 CodeGenFunction &CGF) const { 1381 // FIXME: Need to handle alignment 1382 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 1383 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 1384 1385 CGBuilderTy &Builder = CGF.Builder; 1386 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1387 "ap"); 1388 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1389 llvm::Type *PTy = 1390 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1391 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1392 1393 uint64_t Offset = 1394 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 1395 llvm::Value *NextAddr = 1396 Builder.CreateGEP(Addr, 1397 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), 1398 "ap.next"); 1399 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1400 1401 return AddrTyped; 1402} 1403 1404ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy, 1405 ASTContext &Context) const { 1406 if (RetTy->isVoidType()) { 1407 return ABIArgInfo::getIgnore(); 1408 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1409 return ABIArgInfo::getIndirect(0); 1410 } else { 1411 return ABIArgInfo::getDirect(); 1412 } 1413} 1414 1415ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty, 1416 ASTContext &Context) const { 1417 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 1418 return ABIArgInfo::getIndirect(0); 1419 } else { 1420 return ABIArgInfo::getDirect(); 1421 } 1422} 1423 1424llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1425 CodeGenFunction &CGF) const { 1426 return 0; 1427} 1428 1429const ABIInfo &CodeGenTypes::getABIInfo() const { 1430 if (TheABIInfo) 1431 return *TheABIInfo; 1432 1433 // For now we just cache this in the CodeGenTypes and don't bother 1434 // to free it. 1435 const char *TargetPrefix = getContext().Target.getTargetPrefix(); 1436 if (strcmp(TargetPrefix, "x86") == 0) { 1437 bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin"); 1438 switch (getContext().Target.getPointerWidth(0)) { 1439 case 32: 1440 return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin)); 1441 case 64: 1442 return *(TheABIInfo = new X86_64ABIInfo()); 1443 } 1444 } else if (strcmp(TargetPrefix, "arm") == 0) { 1445 // FIXME: Support for OABI? 1446 return *(TheABIInfo = new ARMABIInfo()); 1447 } else if (strcmp(TargetPrefix, "pic16") == 0) { 1448 return *(TheABIInfo = new PIC16ABIInfo()); 1449 } 1450 1451 return *(TheABIInfo = new DefaultABIInfo); 1452} 1453 1454/***/ 1455 1456CGFunctionInfo::CGFunctionInfo(QualType ResTy, 1457 const llvm::SmallVector<QualType, 16> &ArgTys) { 1458 NumArgs = ArgTys.size(); 1459 Args = new ArgInfo[1 + NumArgs]; 1460 Args[0].type = ResTy; 1461 for (unsigned i = 0; i < NumArgs; ++i) 1462 Args[1 + i].type = ArgTys[i]; 1463} 1464 1465/***/ 1466 1467void CodeGenTypes::GetExpandedTypes(QualType Ty, 1468 std::vector<const llvm::Type*> &ArgTys) { 1469 const RecordType *RT = Ty->getAsStructureType(); 1470 assert(RT && "Can only expand structure types."); 1471 const RecordDecl *RD = RT->getDecl(); 1472 assert(!RD->hasFlexibleArrayMember() && 1473 "Cannot expand structure with flexible array."); 1474 1475 for (RecordDecl::field_iterator i = RD->field_begin(Context), 1476 e = RD->field_end(Context); i != e; ++i) { 1477 const FieldDecl *FD = *i; 1478 assert(!FD->isBitField() && 1479 "Cannot expand structure with bit-field members."); 1480 1481 QualType FT = FD->getType(); 1482 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1483 GetExpandedTypes(FT, ArgTys); 1484 } else { 1485 ArgTys.push_back(ConvertType(FT)); 1486 } 1487 } 1488} 1489 1490llvm::Function::arg_iterator 1491CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1492 llvm::Function::arg_iterator AI) { 1493 const RecordType *RT = Ty->getAsStructureType(); 1494 assert(RT && "Can only expand structure types."); 1495 1496 RecordDecl *RD = RT->getDecl(); 1497 assert(LV.isSimple() && 1498 "Unexpected non-simple lvalue during struct expansion."); 1499 llvm::Value *Addr = LV.getAddress(); 1500 for (RecordDecl::field_iterator i = RD->field_begin(getContext()), 1501 e = RD->field_end(getContext()); i != e; ++i) { 1502 FieldDecl *FD = *i; 1503 QualType FT = FD->getType(); 1504 1505 // FIXME: What are the right qualifiers here? 1506 LValue LV = EmitLValueForField(Addr, FD, false, 0); 1507 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1508 AI = ExpandTypeFromArgs(FT, LV, AI); 1509 } else { 1510 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 1511 ++AI; 1512 } 1513 } 1514 1515 return AI; 1516} 1517 1518void 1519CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1520 llvm::SmallVector<llvm::Value*, 16> &Args) { 1521 const RecordType *RT = Ty->getAsStructureType(); 1522 assert(RT && "Can only expand structure types."); 1523 1524 RecordDecl *RD = RT->getDecl(); 1525 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1526 llvm::Value *Addr = RV.getAggregateAddr(); 1527 for (RecordDecl::field_iterator i = RD->field_begin(getContext()), 1528 e = RD->field_end(getContext()); i != e; ++i) { 1529 FieldDecl *FD = *i; 1530 QualType FT = FD->getType(); 1531 1532 // FIXME: What are the right qualifiers here? 1533 LValue LV = EmitLValueForField(Addr, FD, false, 0); 1534 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1535 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 1536 } else { 1537 RValue RV = EmitLoadOfLValue(LV, FT); 1538 assert(RV.isScalar() && 1539 "Unexpected non-scalar rvalue during struct expansion."); 1540 Args.push_back(RV.getScalarVal()); 1541 } 1542 } 1543} 1544 1545/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1546/// a pointer to an object of type \arg Ty. 1547/// 1548/// This safely handles the case when the src type is smaller than the 1549/// destination type; in this situation the values of bits which not 1550/// present in the src are undefined. 1551static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 1552 const llvm::Type *Ty, 1553 CodeGenFunction &CGF) { 1554 const llvm::Type *SrcTy = 1555 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 1556 uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy); 1557 uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty); 1558 1559 // If load is legal, just bitcast the src pointer. 1560 if (SrcSize == DstSize) { 1561 llvm::Value *Casted = 1562 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 1563 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 1564 // FIXME: Use better alignment / avoid requiring aligned load. 1565 Load->setAlignment(1); 1566 return Load; 1567 } else { 1568 assert(SrcSize < DstSize && "Coercion is losing source bits!"); 1569 1570 // Otherwise do coercion through memory. This is stupid, but 1571 // simple. 1572 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 1573 llvm::Value *Casted = 1574 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 1575 llvm::StoreInst *Store = 1576 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 1577 // FIXME: Use better alignment / avoid requiring aligned store. 1578 Store->setAlignment(1); 1579 return CGF.Builder.CreateLoad(Tmp); 1580 } 1581} 1582 1583/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1584/// where the source and destination may have different types. 1585/// 1586/// This safely handles the case when the src type is larger than the 1587/// destination type; the upper bits of the src will be lost. 1588static void CreateCoercedStore(llvm::Value *Src, 1589 llvm::Value *DstPtr, 1590 CodeGenFunction &CGF) { 1591 const llvm::Type *SrcTy = Src->getType(); 1592 const llvm::Type *DstTy = 1593 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1594 1595 uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy); 1596 uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy); 1597 1598 // If store is legal, just bitcast the src pointer. 1599 if (SrcSize == DstSize) { 1600 llvm::Value *Casted = 1601 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1602 // FIXME: Use better alignment / avoid requiring aligned store. 1603 CGF.Builder.CreateStore(Src, Casted)->setAlignment(1); 1604 } else { 1605 assert(SrcSize > DstSize && "Coercion is missing bits!"); 1606 1607 // Otherwise do coercion through memory. This is stupid, but 1608 // simple. 1609 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 1610 CGF.Builder.CreateStore(Src, Tmp); 1611 llvm::Value *Casted = 1612 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 1613 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 1614 // FIXME: Use better alignment / avoid requiring aligned load. 1615 Load->setAlignment(1); 1616 CGF.Builder.CreateStore(Load, DstPtr); 1617 } 1618} 1619 1620/***/ 1621 1622bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) { 1623 return FI.getReturnInfo().isIndirect(); 1624} 1625 1626const llvm::FunctionType * 1627CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { 1628 std::vector<const llvm::Type*> ArgTys; 1629 1630 const llvm::Type *ResultType = 0; 1631 1632 QualType RetTy = FI.getReturnType(); 1633 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1634 switch (RetAI.getKind()) { 1635 case ABIArgInfo::Expand: 1636 assert(0 && "Invalid ABI kind for return argument"); 1637 1638 case ABIArgInfo::Direct: 1639 ResultType = ConvertType(RetTy); 1640 break; 1641 1642 case ABIArgInfo::Indirect: { 1643 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 1644 ResultType = llvm::Type::VoidTy; 1645 const llvm::Type *STy = ConvertType(RetTy); 1646 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 1647 break; 1648 } 1649 1650 case ABIArgInfo::Ignore: 1651 ResultType = llvm::Type::VoidTy; 1652 break; 1653 1654 case ABIArgInfo::Coerce: 1655 ResultType = RetAI.getCoerceToType(); 1656 break; 1657 } 1658 1659 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1660 ie = FI.arg_end(); it != ie; ++it) { 1661 const ABIArgInfo &AI = it->info; 1662 1663 switch (AI.getKind()) { 1664 case ABIArgInfo::Ignore: 1665 break; 1666 1667 case ABIArgInfo::Coerce: 1668 ArgTys.push_back(AI.getCoerceToType()); 1669 break; 1670 1671 case ABIArgInfo::Indirect: { 1672 // indirect arguments are always on the stack, which is addr space #0. 1673 const llvm::Type *LTy = ConvertTypeForMem(it->type); 1674 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 1675 break; 1676 } 1677 1678 case ABIArgInfo::Direct: 1679 ArgTys.push_back(ConvertType(it->type)); 1680 break; 1681 1682 case ABIArgInfo::Expand: 1683 GetExpandedTypes(it->type, ArgTys); 1684 break; 1685 } 1686 } 1687 1688 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 1689} 1690 1691void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1692 const Decl *TargetDecl, 1693 AttributeListType &PAL) { 1694 unsigned FuncAttrs = 0; 1695 unsigned RetAttrs = 0; 1696 1697 // FIXME: handle sseregparm someday... 1698 if (TargetDecl) { 1699 if (TargetDecl->hasAttr<NoThrowAttr>()) 1700 FuncAttrs |= llvm::Attribute::NoUnwind; 1701 if (TargetDecl->hasAttr<NoReturnAttr>()) 1702 FuncAttrs |= llvm::Attribute::NoReturn; 1703 if (TargetDecl->hasAttr<ConstAttr>()) 1704 FuncAttrs |= llvm::Attribute::ReadNone; 1705 else if (TargetDecl->hasAttr<PureAttr>()) 1706 FuncAttrs |= llvm::Attribute::ReadOnly; 1707 } 1708 1709 QualType RetTy = FI.getReturnType(); 1710 unsigned Index = 1; 1711 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1712 switch (RetAI.getKind()) { 1713 case ABIArgInfo::Direct: 1714 if (RetTy->isPromotableIntegerType()) { 1715 if (RetTy->isSignedIntegerType()) { 1716 RetAttrs |= llvm::Attribute::SExt; 1717 } else if (RetTy->isUnsignedIntegerType()) { 1718 RetAttrs |= llvm::Attribute::ZExt; 1719 } 1720 } 1721 break; 1722 1723 case ABIArgInfo::Indirect: 1724 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1725 llvm::Attribute::StructRet | 1726 llvm::Attribute::NoAlias)); 1727 ++Index; 1728 // sret disables readnone and readonly 1729 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 1730 llvm::Attribute::ReadNone); 1731 break; 1732 1733 case ABIArgInfo::Ignore: 1734 case ABIArgInfo::Coerce: 1735 break; 1736 1737 case ABIArgInfo::Expand: 1738 assert(0 && "Invalid ABI kind for return argument"); 1739 } 1740 1741 if (RetAttrs) 1742 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 1743 1744 // FIXME: we need to honour command line settings also... 1745 // FIXME: RegParm should be reduced in case of nested functions and/or global 1746 // register variable. 1747 signed RegParm = 0; 1748 if (TargetDecl) 1749 if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>()) 1750 RegParm = RegParmAttr->getNumParams(); 1751 1752 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 1753 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1754 ie = FI.arg_end(); it != ie; ++it) { 1755 QualType ParamType = it->type; 1756 const ABIArgInfo &AI = it->info; 1757 unsigned Attributes = 0; 1758 1759 switch (AI.getKind()) { 1760 case ABIArgInfo::Coerce: 1761 break; 1762 1763 case ABIArgInfo::Indirect: 1764 Attributes |= llvm::Attribute::ByVal; 1765 Attributes |= 1766 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 1767 // byval disables readnone and readonly. 1768 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 1769 llvm::Attribute::ReadNone); 1770 break; 1771 1772 case ABIArgInfo::Direct: 1773 if (ParamType->isPromotableIntegerType()) { 1774 if (ParamType->isSignedIntegerType()) { 1775 Attributes |= llvm::Attribute::SExt; 1776 } else if (ParamType->isUnsignedIntegerType()) { 1777 Attributes |= llvm::Attribute::ZExt; 1778 } 1779 } 1780 if (RegParm > 0 && 1781 (ParamType->isIntegerType() || ParamType->isPointerType())) { 1782 RegParm -= 1783 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 1784 if (RegParm >= 0) 1785 Attributes |= llvm::Attribute::InReg; 1786 } 1787 // FIXME: handle sseregparm someday... 1788 break; 1789 1790 case ABIArgInfo::Ignore: 1791 // Skip increment, no matching LLVM parameter. 1792 continue; 1793 1794 case ABIArgInfo::Expand: { 1795 std::vector<const llvm::Type*> Tys; 1796 // FIXME: This is rather inefficient. Do we ever actually need 1797 // to do anything here? The result should be just reconstructed 1798 // on the other side, so extension should be a non-issue. 1799 getTypes().GetExpandedTypes(ParamType, Tys); 1800 Index += Tys.size(); 1801 continue; 1802 } 1803 } 1804 1805 if (Attributes) 1806 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 1807 ++Index; 1808 } 1809 if (FuncAttrs) 1810 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 1811} 1812 1813void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1814 llvm::Function *Fn, 1815 const FunctionArgList &Args) { 1816 // FIXME: We no longer need the types from FunctionArgList; lift up 1817 // and simplify. 1818 1819 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1820 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1821 1822 // Name the struct return argument. 1823 if (CGM.ReturnTypeUsesSret(FI)) { 1824 AI->setName("agg.result"); 1825 ++AI; 1826 } 1827 1828 assert(FI.arg_size() == Args.size() && 1829 "Mismatch between function signature & arguments."); 1830 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1831 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1832 i != e; ++i, ++info_it) { 1833 const VarDecl *Arg = i->first; 1834 QualType Ty = info_it->type; 1835 const ABIArgInfo &ArgI = info_it->info; 1836 1837 switch (ArgI.getKind()) { 1838 case ABIArgInfo::Indirect: { 1839 llvm::Value* V = AI; 1840 if (hasAggregateLLVMType(Ty)) { 1841 // Do nothing, aggregates and complex variables are accessed by 1842 // reference. 1843 } else { 1844 // Load scalar value from indirect argument. 1845 V = EmitLoadOfScalar(V, false, Ty); 1846 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1847 // This must be a promotion, for something like 1848 // "void a(x) short x; {..." 1849 V = EmitScalarConversion(V, Ty, Arg->getType()); 1850 } 1851 } 1852 EmitParmDecl(*Arg, V); 1853 break; 1854 } 1855 1856 case ABIArgInfo::Direct: { 1857 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1858 llvm::Value* V = AI; 1859 if (hasAggregateLLVMType(Ty)) { 1860 // Create a temporary alloca to hold the argument; the rest of 1861 // codegen expects to access aggregates & complex values by 1862 // reference. 1863 V = CreateTempAlloca(ConvertTypeForMem(Ty)); 1864 Builder.CreateStore(AI, V); 1865 } else { 1866 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1867 // This must be a promotion, for something like 1868 // "void a(x) short x; {..." 1869 V = EmitScalarConversion(V, Ty, Arg->getType()); 1870 } 1871 } 1872 EmitParmDecl(*Arg, V); 1873 break; 1874 } 1875 1876 case ABIArgInfo::Expand: { 1877 // If this structure was expanded into multiple arguments then 1878 // we need to create a temporary and reconstruct it from the 1879 // arguments. 1880 std::string Name = Arg->getNameAsString(); 1881 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), 1882 (Name + ".addr").c_str()); 1883 // FIXME: What are the right qualifiers here? 1884 llvm::Function::arg_iterator End = 1885 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); 1886 EmitParmDecl(*Arg, Temp); 1887 1888 // Name the arguments used in expansion and increment AI. 1889 unsigned Index = 0; 1890 for (; AI != End; ++AI, ++Index) 1891 AI->setName(Name + "." + llvm::utostr(Index)); 1892 continue; 1893 } 1894 1895 case ABIArgInfo::Ignore: 1896 // Initialize the local variable appropriately. 1897 if (hasAggregateLLVMType(Ty)) { 1898 EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty))); 1899 } else { 1900 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); 1901 } 1902 1903 // Skip increment, no matching LLVM parameter. 1904 continue; 1905 1906 case ABIArgInfo::Coerce: { 1907 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1908 // FIXME: This is very wasteful; EmitParmDecl is just going to 1909 // drop the result in a new alloca anyway, so we could just 1910 // store into that directly if we broke the abstraction down 1911 // more. 1912 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce"); 1913 CreateCoercedStore(AI, V, *this); 1914 // Match to what EmitParmDecl is expecting for this type. 1915 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1916 V = EmitLoadOfScalar(V, false, Ty); 1917 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1918 // This must be a promotion, for something like 1919 // "void a(x) short x; {..." 1920 V = EmitScalarConversion(V, Ty, Arg->getType()); 1921 } 1922 } 1923 EmitParmDecl(*Arg, V); 1924 break; 1925 } 1926 } 1927 1928 ++AI; 1929 } 1930 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1931} 1932 1933void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1934 llvm::Value *ReturnValue) { 1935 llvm::Value *RV = 0; 1936 1937 // Functions with no result always return void. 1938 if (ReturnValue) { 1939 QualType RetTy = FI.getReturnType(); 1940 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1941 1942 switch (RetAI.getKind()) { 1943 case ABIArgInfo::Indirect: 1944 if (RetTy->isAnyComplexType()) { 1945 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1946 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1947 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1948 EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy); 1949 } else { 1950 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1951 false); 1952 } 1953 break; 1954 1955 case ABIArgInfo::Direct: 1956 // The internal return value temp always will have 1957 // pointer-to-return-type type. 1958 RV = Builder.CreateLoad(ReturnValue); 1959 break; 1960 1961 case ABIArgInfo::Ignore: 1962 break; 1963 1964 case ABIArgInfo::Coerce: 1965 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); 1966 break; 1967 1968 case ABIArgInfo::Expand: 1969 assert(0 && "Invalid ABI kind for return argument"); 1970 } 1971 } 1972 1973 if (RV) { 1974 Builder.CreateRet(RV); 1975 } else { 1976 Builder.CreateRetVoid(); 1977 } 1978} 1979 1980RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { 1981 return EmitAnyExprToTemp(E); 1982} 1983 1984RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1985 llvm::Value *Callee, 1986 const CallArgList &CallArgs, 1987 const Decl *TargetDecl) { 1988 // FIXME: We no longer need the types from CallArgs; lift up and 1989 // simplify. 1990 llvm::SmallVector<llvm::Value*, 16> Args; 1991 1992 // Handle struct-return functions by passing a pointer to the 1993 // location that we would like to return into. 1994 QualType RetTy = CallInfo.getReturnType(); 1995 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1996 if (CGM.ReturnTypeUsesSret(CallInfo)) { 1997 // Create a temporary alloca to hold the result of the call. :( 1998 Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy))); 1999 } 2000 2001 assert(CallInfo.arg_size() == CallArgs.size() && 2002 "Mismatch between function signature & arguments."); 2003 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2004 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2005 I != E; ++I, ++info_it) { 2006 const ABIArgInfo &ArgInfo = info_it->info; 2007 RValue RV = I->first; 2008 2009 switch (ArgInfo.getKind()) { 2010 case ABIArgInfo::Indirect: 2011 if (RV.isScalar() || RV.isComplex()) { 2012 // Make a temporary alloca to pass the argument. 2013 Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second))); 2014 if (RV.isScalar()) 2015 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false); 2016 else 2017 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 2018 } else { 2019 Args.push_back(RV.getAggregateAddr()); 2020 } 2021 break; 2022 2023 case ABIArgInfo::Direct: 2024 if (RV.isScalar()) { 2025 Args.push_back(RV.getScalarVal()); 2026 } else if (RV.isComplex()) { 2027 llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second)); 2028 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0); 2029 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1); 2030 Args.push_back(Tmp); 2031 } else { 2032 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 2033 } 2034 break; 2035 2036 case ABIArgInfo::Ignore: 2037 break; 2038 2039 case ABIArgInfo::Coerce: { 2040 // FIXME: Avoid the conversion through memory if possible. 2041 llvm::Value *SrcPtr; 2042 if (RV.isScalar()) { 2043 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); 2044 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false); 2045 } else if (RV.isComplex()) { 2046 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); 2047 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2048 } else 2049 SrcPtr = RV.getAggregateAddr(); 2050 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2051 *this)); 2052 break; 2053 } 2054 2055 case ABIArgInfo::Expand: 2056 ExpandTypeToArgs(I->second, RV, Args); 2057 break; 2058 } 2059 } 2060 2061 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2062 CodeGen::AttributeListType AttributeList; 2063 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList); 2064 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 2065 AttributeList.end()); 2066 2067 llvm::CallSite CS; 2068 if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { 2069 CS = Builder.CreateCall(Callee, &Args[0], &Args[0]+Args.size()); 2070 } else { 2071 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2072 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 2073 &Args[0], &Args[0]+Args.size()); 2074 EmitBlock(Cont); 2075 } 2076 2077 CS.setAttributes(Attrs); 2078 if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 2079 CS.setCallingConv(F->getCallingConv()); 2080 2081 // If the call doesn't return, finish the basic block and clear the 2082 // insertion point; this allows the rest of IRgen to discard 2083 // unreachable code. 2084 if (CS.doesNotReturn()) { 2085 Builder.CreateUnreachable(); 2086 Builder.ClearInsertionPoint(); 2087 2088 // FIXME: For now, emit a dummy basic block because expr 2089 // emitters in generally are not ready to handle emitting 2090 // expressions at unreachable points. 2091 EnsureInsertPoint(); 2092 2093 // Return a reasonable RValue. 2094 return GetUndefRValue(RetTy); 2095 } 2096 2097 llvm::Instruction *CI = CS.getInstruction(); 2098 if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy) 2099 CI->setName("call"); 2100 2101 switch (RetAI.getKind()) { 2102 case ABIArgInfo::Indirect: 2103 if (RetTy->isAnyComplexType()) 2104 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2105 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2106 return RValue::getAggregate(Args[0]); 2107 return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); 2108 2109 case ABIArgInfo::Direct: 2110 if (RetTy->isAnyComplexType()) { 2111 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2112 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2113 return RValue::getComplex(std::make_pair(Real, Imag)); 2114 } 2115 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2116 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp"); 2117 Builder.CreateStore(CI, V); 2118 return RValue::getAggregate(V); 2119 } 2120 return RValue::get(CI); 2121 2122 case ABIArgInfo::Ignore: 2123 // If we are ignoring an argument that had a result, make sure to 2124 // construct the appropriate return value for our caller. 2125 return GetUndefRValue(RetTy); 2126 2127 case ABIArgInfo::Coerce: { 2128 // FIXME: Avoid the conversion through memory if possible. 2129 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce"); 2130 CreateCoercedStore(CI, V, *this); 2131 if (RetTy->isAnyComplexType()) 2132 return RValue::getComplex(LoadComplexFromAddr(V, false)); 2133 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2134 return RValue::getAggregate(V); 2135 return RValue::get(EmitLoadOfScalar(V, false, RetTy)); 2136 } 2137 2138 case ABIArgInfo::Expand: 2139 assert(0 && "Invalid ABI kind for return argument"); 2140 } 2141 2142 assert(0 && "Unhandled ABIArgInfo::Kind"); 2143 return RValue::get(0); 2144} 2145 2146/* VarArg handling */ 2147 2148llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2149 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2150} 2151