CGCall.cpp revision b11fa0d25d86169f0e0a29d5398116c0212bb787
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CodeGenFunction.h" 17#include "CodeGenModule.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/Decl.h" 21#include "clang/AST/DeclCXX.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/AST/RecordLayout.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/Attributes.h" 26#include "llvm/Support/CallSite.h" 27#include "llvm/Support/CommandLine.h" 28#include "llvm/Support/MathExtras.h" 29#include "llvm/Support/raw_ostream.h" 30#include "llvm/Target/TargetData.h" 31 32#include "ABIInfo.h" 33 34using namespace clang; 35using namespace CodeGen; 36 37/***/ 38 39// FIXME: Use iterator and sidestep silly type array creation. 40 41const 42CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) { 43 return getFunctionInfo(FTNP->getResultType(), 44 llvm::SmallVector<QualType, 16>()); 45} 46 47const 48CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) { 49 llvm::SmallVector<QualType, 16> ArgTys; 50 // FIXME: Kill copy. 51 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 52 ArgTys.push_back(FTP->getArgType(i)); 53 return getFunctionInfo(FTP->getResultType(), ArgTys); 54} 55 56const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 57 llvm::SmallVector<QualType, 16> ArgTys; 58 // Add the 'this' pointer. 59 ArgTys.push_back(MD->getThisType(Context)); 60 61 const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType(); 62 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 63 ArgTys.push_back(FTP->getArgType(i)); 64 return getFunctionInfo(FTP->getResultType(), ArgTys); 65} 66 67const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 68 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 69 if (MD->isInstance()) 70 return getFunctionInfo(MD); 71 } 72 73 const FunctionType *FTy = FD->getType()->getAsFunctionType(); 74 if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy)) 75 return getFunctionInfo(FTP); 76 return getFunctionInfo(cast<FunctionNoProtoType>(FTy)); 77} 78 79const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 80 llvm::SmallVector<QualType, 16> ArgTys; 81 ArgTys.push_back(MD->getSelfDecl()->getType()); 82 ArgTys.push_back(Context.getObjCSelType()); 83 // FIXME: Kill copy? 84 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 85 e = MD->param_end(); i != e; ++i) 86 ArgTys.push_back((*i)->getType()); 87 return getFunctionInfo(MD->getResultType(), ArgTys); 88} 89 90const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 91 const CallArgList &Args) { 92 // FIXME: Kill copy. 93 llvm::SmallVector<QualType, 16> ArgTys; 94 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 95 i != e; ++i) 96 ArgTys.push_back(i->second); 97 return getFunctionInfo(ResTy, ArgTys); 98} 99 100const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 101 const FunctionArgList &Args) { 102 // FIXME: Kill copy. 103 llvm::SmallVector<QualType, 16> ArgTys; 104 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 105 i != e; ++i) 106 ArgTys.push_back(i->second); 107 return getFunctionInfo(ResTy, ArgTys); 108} 109 110const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 111 const llvm::SmallVector<QualType, 16> &ArgTys) { 112 // Lookup or create unique function info. 113 llvm::FoldingSetNodeID ID; 114 CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end()); 115 116 void *InsertPos = 0; 117 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 118 if (FI) 119 return *FI; 120 121 // Construct the function info. 122 FI = new CGFunctionInfo(ResTy, ArgTys); 123 FunctionInfos.InsertNode(FI, InsertPos); 124 125 // Compute ABI information. 126 getABIInfo().computeInfo(*FI, getContext()); 127 128 return *FI; 129} 130 131/***/ 132 133ABIInfo::~ABIInfo() {} 134 135void ABIArgInfo::dump() const { 136 fprintf(stderr, "(ABIArgInfo Kind="); 137 switch (TheKind) { 138 case Direct: 139 fprintf(stderr, "Direct"); 140 break; 141 case Ignore: 142 fprintf(stderr, "Ignore"); 143 break; 144 case Coerce: 145 fprintf(stderr, "Coerce Type="); 146 getCoerceToType()->print(llvm::errs()); 147 break; 148 case Indirect: 149 fprintf(stderr, "Indirect Align=%d", getIndirectAlign()); 150 break; 151 case Expand: 152 fprintf(stderr, "Expand"); 153 break; 154 } 155 fprintf(stderr, ")\n"); 156} 157 158/***/ 159 160/// isEmptyRecord - Return true iff a structure has no non-empty 161/// members. Note that a structure with a flexible array member is not 162/// considered empty. 163static bool isEmptyRecord(ASTContext &Context, QualType T) { 164 const RecordType *RT = T->getAsRecordType(); 165 if (!RT) 166 return 0; 167 const RecordDecl *RD = RT->getDecl(); 168 if (RD->hasFlexibleArrayMember()) 169 return false; 170 for (RecordDecl::field_iterator i = RD->field_begin(Context), 171 e = RD->field_end(Context); i != e; ++i) { 172 const FieldDecl *FD = *i; 173 if (!isEmptyRecord(Context, FD->getType())) 174 return false; 175 } 176 return true; 177} 178 179/// isSingleElementStruct - Determine if a structure is a "single 180/// element struct", i.e. it has exactly one non-empty field or 181/// exactly one field which is itself a single element 182/// struct. Structures with flexible array members are never 183/// considered single element structs. 184/// 185/// \return The field declaration for the single non-empty field, if 186/// it exists. 187static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 188 const RecordType *RT = T->getAsStructureType(); 189 if (!RT) 190 return 0; 191 192 const RecordDecl *RD = RT->getDecl(); 193 if (RD->hasFlexibleArrayMember()) 194 return 0; 195 196 const Type *Found = 0; 197 for (RecordDecl::field_iterator i = RD->field_begin(Context), 198 e = RD->field_end(Context); i != e; ++i) { 199 const FieldDecl *FD = *i; 200 QualType FT = FD->getType(); 201 202 // Treat single element arrays as the element 203 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 204 if (AT->getSize().getZExtValue() == 1) 205 FT = AT->getElementType(); 206 207 if (isEmptyRecord(Context, FT)) { 208 // Ignore 209 } else if (Found) { 210 return 0; 211 } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) { 212 Found = FT.getTypePtr(); 213 } else { 214 Found = isSingleElementStruct(FT, Context); 215 if (!Found) 216 return 0; 217 } 218 } 219 220 return Found; 221} 222 223static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 224 if (!Ty->getAsBuiltinType() && !Ty->isPointerType()) 225 return false; 226 227 uint64_t Size = Context.getTypeSize(Ty); 228 return Size == 32 || Size == 64; 229} 230 231static bool areAllFields32Or64BitBasicType(const RecordDecl *RD, 232 ASTContext &Context) { 233 for (RecordDecl::field_iterator i = RD->field_begin(Context), 234 e = RD->field_end(Context); i != e; ++i) { 235 const FieldDecl *FD = *i; 236 237 if (!is32Or64BitBasicType(FD->getType(), Context)) 238 return false; 239 240 // FIXME: Reject bitfields wholesale; there are two problems, we 241 // don't know how to expand them yet, and the predicate for 242 // telling if a bitfield still counts as "basic" is more 243 // complicated than what we were doing previously. 244 if (FD->isBitField()) 245 return false; 246 } 247 248 return true; 249} 250 251namespace { 252/// DefaultABIInfo - The default implementation for ABI specific 253/// details. This implementation provides information which results in 254/// self-consistent and sensible LLVM IR generation, but does not 255/// conform to any particular ABI. 256class DefaultABIInfo : public ABIInfo { 257 ABIArgInfo classifyReturnType(QualType RetTy, 258 ASTContext &Context) const; 259 260 ABIArgInfo classifyArgumentType(QualType RetTy, 261 ASTContext &Context) const; 262 263 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 264 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 265 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 266 it != ie; ++it) 267 it->info = classifyArgumentType(it->type, Context); 268 } 269 270 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 271 CodeGenFunction &CGF) const; 272}; 273 274/// X86_32ABIInfo - The X86-32 ABI information. 275class X86_32ABIInfo : public ABIInfo { 276 ASTContext &Context; 277 bool IsDarwin; 278 279 static bool isRegisterSize(unsigned Size) { 280 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 281 } 282 283 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 284 285public: 286 ABIArgInfo classifyReturnType(QualType RetTy, 287 ASTContext &Context) const; 288 289 ABIArgInfo classifyArgumentType(QualType RetTy, 290 ASTContext &Context) const; 291 292 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 293 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 294 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 295 it != ie; ++it) 296 it->info = classifyArgumentType(it->type, Context); 297 } 298 299 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 300 CodeGenFunction &CGF) const; 301 302 X86_32ABIInfo(ASTContext &Context, bool d) 303 : ABIInfo(), Context(Context), IsDarwin(d) {} 304}; 305} 306 307 308/// shouldReturnTypeInRegister - Determine if the given type should be 309/// passed in a register (for the Darwin ABI). 310bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 311 ASTContext &Context) { 312 uint64_t Size = Context.getTypeSize(Ty); 313 314 // Type must be register sized. 315 if (!isRegisterSize(Size)) 316 return false; 317 318 if (Ty->isVectorType()) { 319 // 64- and 128- bit vectors inside structures are not returned in 320 // registers. 321 if (Size == 64 || Size == 128) 322 return false; 323 324 return true; 325 } 326 327 // If this is a builtin, pointer, or complex type, it is ok. 328 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType()) 329 return true; 330 331 // Arrays are treated like records. 332 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 333 return shouldReturnTypeInRegister(AT->getElementType(), Context); 334 335 // Otherwise, it must be a record type. 336 const RecordType *RT = Ty->getAsRecordType(); 337 if (!RT) return false; 338 339 // Structure types are passed in register if all fields would be 340 // passed in a register. 341 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context), 342 e = RT->getDecl()->field_end(Context); i != e; ++i) { 343 const FieldDecl *FD = *i; 344 345 // FIXME: Reject bitfields wholesale for now; this is incorrect. 346 if (FD->isBitField()) 347 return false; 348 349 // Empty structures are ignored. 350 if (isEmptyRecord(Context, FD->getType())) 351 continue; 352 353 // Check fields recursively. 354 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 355 return false; 356 } 357 358 return true; 359} 360 361ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 362 ASTContext &Context) const { 363 if (RetTy->isVoidType()) { 364 return ABIArgInfo::getIgnore(); 365 } else if (const VectorType *VT = RetTy->getAsVectorType()) { 366 // On Darwin, some vectors are returned in registers. 367 if (IsDarwin) { 368 uint64_t Size = Context.getTypeSize(RetTy); 369 370 // 128-bit vectors are a special case; they are returned in 371 // registers and we need to make sure to pick a type the LLVM 372 // backend will like. 373 if (Size == 128) 374 return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty, 375 2)); 376 377 // Always return in register if it fits in a general purpose 378 // register, or if it is 64 bits and has a single element. 379 if ((Size == 8 || Size == 16 || Size == 32) || 380 (Size == 64 && VT->getNumElements() == 1)) 381 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 382 383 return ABIArgInfo::getIndirect(0); 384 } 385 386 return ABIArgInfo::getDirect(); 387 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 388 // Outside of Darwin, structs and unions are always indirect. 389 if (!IsDarwin && !RetTy->isAnyComplexType()) 390 return ABIArgInfo::getIndirect(0); 391 // Classify "single element" structs as their element type. 392 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) { 393 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) { 394 // FIXME: This is gross, it would be nice if we could just 395 // pass back SeltTy and have clients deal with it. Is it worth 396 // supporting coerce to both LLVM and clang Types? 397 if (BT->isIntegerType()) { 398 uint64_t Size = Context.getTypeSize(SeltTy); 399 return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size)); 400 } else if (BT->getKind() == BuiltinType::Float) { 401 return ABIArgInfo::getCoerce(llvm::Type::FloatTy); 402 } else if (BT->getKind() == BuiltinType::Double) { 403 return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); 404 } 405 } else if (SeltTy->isPointerType()) { 406 // FIXME: It would be really nice if this could come out as 407 // the proper pointer type. 408 llvm::Type *PtrTy = 409 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 410 return ABIArgInfo::getCoerce(PtrTy); 411 } else if (SeltTy->isVectorType()) { 412 // 64- and 128-bit vectors are never returned in a 413 // register when inside a structure. 414 uint64_t Size = Context.getTypeSize(RetTy); 415 if (Size == 64 || Size == 128) 416 return ABIArgInfo::getIndirect(0); 417 418 return classifyReturnType(QualType(SeltTy, 0), Context); 419 } 420 } 421 422 uint64_t Size = Context.getTypeSize(RetTy); 423 if (isRegisterSize(Size)) { 424 // Always return in register for unions for now. 425 // FIXME: This is wrong, but better than treating as a 426 // structure. 427 if (RetTy->isUnionType()) 428 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 429 430 // Small structures which are register sized are generally returned 431 // in a register. 432 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) 433 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 434 } 435 436 return ABIArgInfo::getIndirect(0); 437 } else { 438 return ABIArgInfo::getDirect(); 439 } 440} 441 442ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 443 ASTContext &Context) const { 444 // FIXME: Set alignment on indirect arguments. 445 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 446 // Structures with flexible arrays are always indirect. 447 if (const RecordType *RT = Ty->getAsStructureType()) 448 if (RT->getDecl()->hasFlexibleArrayMember()) 449 return ABIArgInfo::getIndirect(0); 450 451 // Ignore empty structs. 452 uint64_t Size = Context.getTypeSize(Ty); 453 if (Ty->isStructureType() && Size == 0) 454 return ABIArgInfo::getIgnore(); 455 456 // Expand structs with size <= 128-bits which consist only of 457 // basic types (int, long long, float, double, xxx*). This is 458 // non-recursive and does not ignore empty fields. 459 if (const RecordType *RT = Ty->getAsStructureType()) { 460 if (Context.getTypeSize(Ty) <= 4*32 && 461 areAllFields32Or64BitBasicType(RT->getDecl(), Context)) 462 return ABIArgInfo::getExpand(); 463 } 464 465 return ABIArgInfo::getIndirect(0); 466 } else { 467 return ABIArgInfo::getDirect(); 468 } 469} 470 471llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 472 CodeGenFunction &CGF) const { 473 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 474 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 475 476 CGBuilderTy &Builder = CGF.Builder; 477 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 478 "ap"); 479 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 480 llvm::Type *PTy = 481 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 482 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 483 484 uint64_t Offset = 485 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 486 llvm::Value *NextAddr = 487 Builder.CreateGEP(Addr, 488 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), 489 "ap.next"); 490 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 491 492 return AddrTyped; 493} 494 495namespace { 496/// X86_64ABIInfo - The X86_64 ABI information. 497class X86_64ABIInfo : public ABIInfo { 498 enum Class { 499 Integer = 0, 500 SSE, 501 SSEUp, 502 X87, 503 X87Up, 504 ComplexX87, 505 NoClass, 506 Memory 507 }; 508 509 /// merge - Implement the X86_64 ABI merging algorithm. 510 /// 511 /// Merge an accumulating classification \arg Accum with a field 512 /// classification \arg Field. 513 /// 514 /// \param Accum - The accumulating classification. This should 515 /// always be either NoClass or the result of a previous merge 516 /// call. In addition, this should never be Memory (the caller 517 /// should just return Memory for the aggregate). 518 Class merge(Class Accum, Class Field) const; 519 520 /// classify - Determine the x86_64 register classes in which the 521 /// given type T should be passed. 522 /// 523 /// \param Lo - The classification for the parts of the type 524 /// residing in the low word of the containing object. 525 /// 526 /// \param Hi - The classification for the parts of the type 527 /// residing in the high word of the containing object. 528 /// 529 /// \param OffsetBase - The bit offset of this type in the 530 /// containing object. Some parameters are classified different 531 /// depending on whether they straddle an eightbyte boundary. 532 /// 533 /// If a word is unused its result will be NoClass; if a type should 534 /// be passed in Memory then at least the classification of \arg Lo 535 /// will be Memory. 536 /// 537 /// The \arg Lo class will be NoClass iff the argument is ignored. 538 /// 539 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 540 /// also be ComplexX87. 541 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase, 542 Class &Lo, Class &Hi) const; 543 544 /// getCoerceResult - Given a source type \arg Ty and an LLVM type 545 /// to coerce to, chose the best way to pass Ty in the same place 546 /// that \arg CoerceTo would be passed, but while keeping the 547 /// emitted code as simple as possible. 548 /// 549 /// FIXME: Note, this should be cleaned up to just take an 550 /// enumeration of all the ways we might want to pass things, 551 /// instead of constructing an LLVM type. This makes this code more 552 /// explicit, and it makes it clearer that we are also doing this 553 /// for correctness in the case of passing scalar types. 554 ABIArgInfo getCoerceResult(QualType Ty, 555 const llvm::Type *CoerceTo, 556 ASTContext &Context) const; 557 558 ABIArgInfo classifyReturnType(QualType RetTy, 559 ASTContext &Context) const; 560 561 ABIArgInfo classifyArgumentType(QualType Ty, 562 ASTContext &Context, 563 unsigned &neededInt, 564 unsigned &neededSSE) const; 565 566public: 567 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; 568 569 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 570 CodeGenFunction &CGF) const; 571}; 572} 573 574X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, 575 Class Field) const { 576 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 577 // classified recursively so that always two fields are 578 // considered. The resulting class is calculated according to 579 // the classes of the fields in the eightbyte: 580 // 581 // (a) If both classes are equal, this is the resulting class. 582 // 583 // (b) If one of the classes is NO_CLASS, the resulting class is 584 // the other class. 585 // 586 // (c) If one of the classes is MEMORY, the result is the MEMORY 587 // class. 588 // 589 // (d) If one of the classes is INTEGER, the result is the 590 // INTEGER. 591 // 592 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 593 // MEMORY is used as class. 594 // 595 // (f) Otherwise class SSE is used. 596 597 // Accum should never be memory (we should have returned) or 598 // ComplexX87 (because this cannot be passed in a structure). 599 assert((Accum != Memory && Accum != ComplexX87) && 600 "Invalid accumulated classification during merge."); 601 if (Accum == Field || Field == NoClass) 602 return Accum; 603 else if (Field == Memory) 604 return Memory; 605 else if (Accum == NoClass) 606 return Field; 607 else if (Accum == Integer || Field == Integer) 608 return Integer; 609 else if (Field == X87 || Field == X87Up || Field == ComplexX87) 610 return Memory; 611 else 612 return SSE; 613} 614 615void X86_64ABIInfo::classify(QualType Ty, 616 ASTContext &Context, 617 uint64_t OffsetBase, 618 Class &Lo, Class &Hi) const { 619 // FIXME: This code can be simplified by introducing a simple value 620 // class for Class pairs with appropriate constructor methods for 621 // the various situations. 622 623 // FIXME: Some of the split computations are wrong; unaligned 624 // vectors shouldn't be passed in registers for example, so there is 625 // no chance they can straddle an eightbyte. Verify & simplify. 626 627 Lo = Hi = NoClass; 628 629 Class &Current = OffsetBase < 64 ? Lo : Hi; 630 Current = Memory; 631 632 if (const BuiltinType *BT = Ty->getAsBuiltinType()) { 633 BuiltinType::Kind k = BT->getKind(); 634 635 if (k == BuiltinType::Void) { 636 Current = NoClass; 637 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 638 Current = Integer; 639 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 640 Current = SSE; 641 } else if (k == BuiltinType::LongDouble) { 642 Lo = X87; 643 Hi = X87Up; 644 } 645 // FIXME: _Decimal32 and _Decimal64 are SSE. 646 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 647 // FIXME: __int128 is (Integer, Integer). 648 } else if (const EnumType *ET = Ty->getAsEnumType()) { 649 // Classify the underlying integer type. 650 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi); 651 } else if (Ty->hasPointerRepresentation()) { 652 Current = Integer; 653 } else if (const VectorType *VT = Ty->getAsVectorType()) { 654 uint64_t Size = Context.getTypeSize(VT); 655 if (Size == 32) { 656 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 657 // float> as integer. 658 Current = Integer; 659 660 // If this type crosses an eightbyte boundary, it should be 661 // split. 662 uint64_t EB_Real = (OffsetBase) / 64; 663 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 664 if (EB_Real != EB_Imag) 665 Hi = Lo; 666 } else if (Size == 64) { 667 // gcc passes <1 x double> in memory. :( 668 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 669 return; 670 671 // gcc passes <1 x long long> as INTEGER. 672 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong)) 673 Current = Integer; 674 else 675 Current = SSE; 676 677 // If this type crosses an eightbyte boundary, it should be 678 // split. 679 if (OffsetBase && OffsetBase != 64) 680 Hi = Lo; 681 } else if (Size == 128) { 682 Lo = SSE; 683 Hi = SSEUp; 684 } 685 } else if (const ComplexType *CT = Ty->getAsComplexType()) { 686 QualType ET = Context.getCanonicalType(CT->getElementType()); 687 688 uint64_t Size = Context.getTypeSize(Ty); 689 if (ET->isIntegralType()) { 690 if (Size <= 64) 691 Current = Integer; 692 else if (Size <= 128) 693 Lo = Hi = Integer; 694 } else if (ET == Context.FloatTy) 695 Current = SSE; 696 else if (ET == Context.DoubleTy) 697 Lo = Hi = SSE; 698 else if (ET == Context.LongDoubleTy) 699 Current = ComplexX87; 700 701 // If this complex type crosses an eightbyte boundary then it 702 // should be split. 703 uint64_t EB_Real = (OffsetBase) / 64; 704 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64; 705 if (Hi == NoClass && EB_Real != EB_Imag) 706 Hi = Lo; 707 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 708 // Arrays are treated like structures. 709 710 uint64_t Size = Context.getTypeSize(Ty); 711 712 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 713 // than two eightbytes, ..., it has class MEMORY. 714 if (Size > 128) 715 return; 716 717 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 718 // fields, it has class MEMORY. 719 // 720 // Only need to check alignment of array base. 721 if (OffsetBase % Context.getTypeAlign(AT->getElementType())) 722 return; 723 724 // Otherwise implement simplified merge. We could be smarter about 725 // this, but it isn't worth it and would be harder to verify. 726 Current = NoClass; 727 uint64_t EltSize = Context.getTypeSize(AT->getElementType()); 728 uint64_t ArraySize = AT->getSize().getZExtValue(); 729 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 730 Class FieldLo, FieldHi; 731 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi); 732 Lo = merge(Lo, FieldLo); 733 Hi = merge(Hi, FieldHi); 734 if (Lo == Memory || Hi == Memory) 735 break; 736 } 737 738 // Do post merger cleanup (see below). Only case we worry about is Memory. 739 if (Hi == Memory) 740 Lo = Memory; 741 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 742 } else if (const RecordType *RT = Ty->getAsRecordType()) { 743 uint64_t Size = Context.getTypeSize(Ty); 744 745 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 746 // than two eightbytes, ..., it has class MEMORY. 747 if (Size > 128) 748 return; 749 750 const RecordDecl *RD = RT->getDecl(); 751 752 // Assume variable sized types are passed in memory. 753 if (RD->hasFlexibleArrayMember()) 754 return; 755 756 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 757 758 // Reset Lo class, this will be recomputed. 759 Current = NoClass; 760 unsigned idx = 0; 761 for (RecordDecl::field_iterator i = RD->field_begin(Context), 762 e = RD->field_end(Context); i != e; ++i, ++idx) { 763 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 764 bool BitField = i->isBitField(); 765 766 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 767 // fields, it has class MEMORY. 768 // 769 // Note, skip this test for bitfields, see below. 770 if (!BitField && Offset % Context.getTypeAlign(i->getType())) { 771 Lo = Memory; 772 return; 773 } 774 775 // Classify this field. 776 // 777 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 778 // exceeds a single eightbyte, each is classified 779 // separately. Each eightbyte gets initialized to class 780 // NO_CLASS. 781 Class FieldLo, FieldHi; 782 783 // Bitfields require special handling, they do not force the 784 // structure to be passed in memory even if unaligned, and 785 // therefore they can straddle an eightbyte. 786 if (BitField) { 787 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 788 uint64_t Size = 789 i->getBitWidth()->getIntegerConstantExprValue(Context).getZExtValue(); 790 791 uint64_t EB_Lo = Offset / 64; 792 uint64_t EB_Hi = (Offset + Size - 1) / 64; 793 FieldLo = FieldHi = NoClass; 794 if (EB_Lo) { 795 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 796 FieldLo = NoClass; 797 FieldHi = Integer; 798 } else { 799 FieldLo = Integer; 800 FieldHi = EB_Hi ? Integer : NoClass; 801 } 802 } else 803 classify(i->getType(), Context, Offset, FieldLo, FieldHi); 804 Lo = merge(Lo, FieldLo); 805 Hi = merge(Hi, FieldHi); 806 if (Lo == Memory || Hi == Memory) 807 break; 808 } 809 810 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 811 // 812 // (a) If one of the classes is MEMORY, the whole argument is 813 // passed in memory. 814 // 815 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. 816 817 // The first of these conditions is guaranteed by how we implement 818 // the merge (just bail). 819 // 820 // The second condition occurs in the case of unions; for example 821 // union { _Complex double; unsigned; }. 822 if (Hi == Memory) 823 Lo = Memory; 824 if (Hi == SSEUp && Lo != SSE) 825 Hi = SSE; 826 } 827} 828 829ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, 830 const llvm::Type *CoerceTo, 831 ASTContext &Context) const { 832 if (CoerceTo == llvm::Type::Int64Ty) { 833 // Integer and pointer types will end up in a general purpose 834 // register. 835 if (Ty->isIntegralType() || Ty->isPointerType()) 836 return ABIArgInfo::getDirect(); 837 838 } else if (CoerceTo == llvm::Type::DoubleTy) { 839 // FIXME: It would probably be better to make CGFunctionInfo only 840 // map using canonical types than to canonize here. 841 QualType CTy = Context.getCanonicalType(Ty); 842 843 // Float and double end up in a single SSE reg. 844 if (CTy == Context.FloatTy || CTy == Context.DoubleTy) 845 return ABIArgInfo::getDirect(); 846 847 } 848 849 return ABIArgInfo::getCoerce(CoerceTo); 850} 851 852ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, 853 ASTContext &Context) const { 854 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 855 // classification algorithm. 856 X86_64ABIInfo::Class Lo, Hi; 857 classify(RetTy, Context, 0, Lo, Hi); 858 859 // Check some invariants. 860 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 861 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 862 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 863 864 const llvm::Type *ResType = 0; 865 switch (Lo) { 866 case NoClass: 867 return ABIArgInfo::getIgnore(); 868 869 case SSEUp: 870 case X87Up: 871 assert(0 && "Invalid classification for lo word."); 872 873 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 874 // hidden argument. 875 case Memory: 876 return ABIArgInfo::getIndirect(0); 877 878 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 879 // available register of the sequence %rax, %rdx is used. 880 case Integer: 881 ResType = llvm::Type::Int64Ty; break; 882 883 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 884 // available SSE register of the sequence %xmm0, %xmm1 is used. 885 case SSE: 886 ResType = llvm::Type::DoubleTy; break; 887 888 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 889 // returned on the X87 stack in %st0 as 80-bit x87 number. 890 case X87: 891 ResType = llvm::Type::X86_FP80Ty; break; 892 893 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 894 // part of the value is returned in %st0 and the imaginary part in 895 // %st1. 896 case ComplexX87: 897 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 898 ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty, 899 llvm::Type::X86_FP80Ty, 900 NULL); 901 break; 902 } 903 904 switch (Hi) { 905 // Memory was handled previously and X87 should 906 // never occur as a hi class. 907 case Memory: 908 case X87: 909 assert(0 && "Invalid classification for hi word."); 910 911 case ComplexX87: // Previously handled. 912 case NoClass: break; 913 914 case Integer: 915 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 916 break; 917 case SSE: 918 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 919 break; 920 921 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 922 // is passed in the upper half of the last used SSE register. 923 // 924 // SSEUP should always be preceeded by SSE, just widen. 925 case SSEUp: 926 assert(Lo == SSE && "Unexpected SSEUp classification."); 927 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 928 break; 929 930 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 931 // returned together with the previous X87 value in %st0. 932 case X87Up: 933 // If X87Up is preceeded by X87, we don't need to do 934 // anything. However, in some cases with unions it may not be 935 // preceeded by X87. In such situations we follow gcc and pass the 936 // extra bits in an SSE reg. 937 if (Lo != X87) 938 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 939 break; 940 } 941 942 return getCoerceResult(RetTy, ResType, Context); 943} 944 945ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, 946 unsigned &neededInt, 947 unsigned &neededSSE) const { 948 X86_64ABIInfo::Class Lo, Hi; 949 classify(Ty, Context, 0, Lo, Hi); 950 951 // Check some invariants. 952 // FIXME: Enforce these by construction. 953 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 954 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 955 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 956 957 neededInt = 0; 958 neededSSE = 0; 959 const llvm::Type *ResType = 0; 960 switch (Lo) { 961 case NoClass: 962 return ABIArgInfo::getIgnore(); 963 964 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 965 // on the stack. 966 case Memory: 967 968 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 969 // COMPLEX_X87, it is passed in memory. 970 case X87: 971 case ComplexX87: 972 return ABIArgInfo::getIndirect(0); 973 974 case SSEUp: 975 case X87Up: 976 assert(0 && "Invalid classification for lo word."); 977 978 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 979 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 980 // and %r9 is used. 981 case Integer: 982 ++neededInt; 983 ResType = llvm::Type::Int64Ty; 984 break; 985 986 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 987 // available SSE register is used, the registers are taken in the 988 // order from %xmm0 to %xmm7. 989 case SSE: 990 ++neededSSE; 991 ResType = llvm::Type::DoubleTy; 992 break; 993 } 994 995 switch (Hi) { 996 // Memory was handled previously, ComplexX87 and X87 should 997 // never occur as hi classes, and X87Up must be preceed by X87, 998 // which is passed in memory. 999 case Memory: 1000 case X87: 1001 case ComplexX87: 1002 assert(0 && "Invalid classification for hi word."); 1003 break; 1004 1005 case NoClass: break; 1006 case Integer: 1007 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 1008 ++neededInt; 1009 break; 1010 1011 // X87Up generally doesn't occur here (long double is passed in 1012 // memory), except in situations involving unions. 1013 case X87Up: 1014 case SSE: 1015 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 1016 ++neededSSE; 1017 break; 1018 1019 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1020 // eightbyte is passed in the upper half of the last used SSE 1021 // register. 1022 case SSEUp: 1023 assert(Lo == SSE && "Unexpected SSEUp classification."); 1024 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 1025 break; 1026 } 1027 1028 return getCoerceResult(Ty, ResType, Context); 1029} 1030 1031void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1032 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1033 1034 // Keep track of the number of assigned registers. 1035 unsigned freeIntRegs = 6, freeSSERegs = 8; 1036 1037 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1038 // get assigned (in left-to-right order) for passing as follows... 1039 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1040 it != ie; ++it) { 1041 unsigned neededInt, neededSSE; 1042 it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE); 1043 1044 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1045 // eightbyte of an argument, the whole argument is passed on the 1046 // stack. If registers have already been assigned for some 1047 // eightbytes of such an argument, the assignments get reverted. 1048 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1049 freeIntRegs -= neededInt; 1050 freeSSERegs -= neededSSE; 1051 } else { 1052 it->info = ABIArgInfo::getIndirect(0); 1053 } 1054 } 1055} 1056 1057static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1058 QualType Ty, 1059 CodeGenFunction &CGF) { 1060 llvm::Value *overflow_arg_area_p = 1061 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1062 llvm::Value *overflow_arg_area = 1063 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1064 1065 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1066 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1067 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1068 if (Align > 8) { 1069 // Note that we follow the ABI & gcc here, even though the type 1070 // could in theory have an alignment greater than 16. This case 1071 // shouldn't ever matter in practice. 1072 1073 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1074 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15); 1075 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1076 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1077 llvm::Type::Int64Ty); 1078 llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL); 1079 overflow_arg_area = 1080 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1081 overflow_arg_area->getType(), 1082 "overflow_arg_area.align"); 1083 } 1084 1085 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1086 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1087 llvm::Value *Res = 1088 CGF.Builder.CreateBitCast(overflow_arg_area, 1089 llvm::PointerType::getUnqual(LTy)); 1090 1091 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1092 // l->overflow_arg_area + sizeof(type). 1093 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1094 // an 8 byte boundary. 1095 1096 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1097 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1098 (SizeInBytes + 7) & ~7); 1099 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1100 "overflow_arg_area.next"); 1101 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1102 1103 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1104 return Res; 1105} 1106 1107llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1108 CodeGenFunction &CGF) const { 1109 // Assume that va_list type is correct; should be pointer to LLVM type: 1110 // struct { 1111 // i32 gp_offset; 1112 // i32 fp_offset; 1113 // i8* overflow_arg_area; 1114 // i8* reg_save_area; 1115 // }; 1116 unsigned neededInt, neededSSE; 1117 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), 1118 neededInt, neededSSE); 1119 1120 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 1121 // in the registers. If not go to step 7. 1122 if (!neededInt && !neededSSE) 1123 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1124 1125 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 1126 // general purpose registers needed to pass type and num_fp to hold 1127 // the number of floating point registers needed. 1128 1129 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 1130 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 1131 // l->fp_offset > 304 - num_fp * 16 go to step 7. 1132 // 1133 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 1134 // register save space). 1135 1136 llvm::Value *InRegs = 0; 1137 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 1138 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 1139 if (neededInt) { 1140 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 1141 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 1142 InRegs = 1143 CGF.Builder.CreateICmpULE(gp_offset, 1144 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1145 48 - neededInt * 8), 1146 "fits_in_gp"); 1147 } 1148 1149 if (neededSSE) { 1150 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 1151 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 1152 llvm::Value *FitsInFP = 1153 CGF.Builder.CreateICmpULE(fp_offset, 1154 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1155 176 - neededSSE * 16), 1156 "fits_in_fp"); 1157 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 1158 } 1159 1160 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 1161 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 1162 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 1163 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 1164 1165 // Emit code to load the value if it was passed in registers. 1166 1167 CGF.EmitBlock(InRegBlock); 1168 1169 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 1170 // an offset of l->gp_offset and/or l->fp_offset. This may require 1171 // copying to a temporary location in case the parameter is passed 1172 // in different register classes or requires an alignment greater 1173 // than 8 for general purpose registers and 16 for XMM registers. 1174 // 1175 // FIXME: This really results in shameful code when we end up 1176 // needing to collect arguments from different places; often what 1177 // should result in a simple assembling of a structure from 1178 // scattered addresses has many more loads than necessary. Can we 1179 // clean this up? 1180 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1181 llvm::Value *RegAddr = 1182 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 1183 "reg_save_area"); 1184 if (neededInt && neededSSE) { 1185 // FIXME: Cleanup. 1186 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs"); 1187 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 1188 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 1189 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 1190 const llvm::Type *TyLo = ST->getElementType(0); 1191 const llvm::Type *TyHi = ST->getElementType(1); 1192 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) && 1193 "Unexpected ABI info for mixed regs"); 1194 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 1195 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 1196 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1197 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1198 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr; 1199 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr; 1200 llvm::Value *V = 1201 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 1202 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1203 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 1204 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1205 1206 RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); 1207 } else if (neededInt) { 1208 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1209 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1210 llvm::PointerType::getUnqual(LTy)); 1211 } else { 1212 if (neededSSE == 1) { 1213 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1214 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1215 llvm::PointerType::getUnqual(LTy)); 1216 } else { 1217 assert(neededSSE == 2 && "Invalid number of needed registers!"); 1218 // SSE registers are spaced 16 bytes apart in the register save 1219 // area, we need to collect the two eightbytes together. 1220 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1221 llvm::Value *RegAddrHi = 1222 CGF.Builder.CreateGEP(RegAddrLo, 1223 llvm::ConstantInt::get(llvm::Type::Int32Ty, 16)); 1224 const llvm::Type *DblPtrTy = 1225 llvm::PointerType::getUnqual(llvm::Type::DoubleTy); 1226 const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy, 1227 llvm::Type::DoubleTy, 1228 NULL); 1229 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 1230 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 1231 DblPtrTy)); 1232 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1233 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 1234 DblPtrTy)); 1235 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1236 RegAddr = CGF.Builder.CreateBitCast(Tmp, 1237 llvm::PointerType::getUnqual(LTy)); 1238 } 1239 } 1240 1241 // AMD64-ABI 3.5.7p5: Step 5. Set: 1242 // l->gp_offset = l->gp_offset + num_gp * 8 1243 // l->fp_offset = l->fp_offset + num_fp * 16. 1244 if (neededInt) { 1245 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1246 neededInt * 8); 1247 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 1248 gp_offset_p); 1249 } 1250 if (neededSSE) { 1251 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1252 neededSSE * 16); 1253 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 1254 fp_offset_p); 1255 } 1256 CGF.EmitBranch(ContBlock); 1257 1258 // Emit code to load the value if it was passed in memory. 1259 1260 CGF.EmitBlock(InMemBlock); 1261 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1262 1263 // Return the appropriate result. 1264 1265 CGF.EmitBlock(ContBlock); 1266 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 1267 "vaarg.addr"); 1268 ResAddr->reserveOperandSpace(2); 1269 ResAddr->addIncoming(RegAddr, InRegBlock); 1270 ResAddr->addIncoming(MemAddr, InMemBlock); 1271 1272 return ResAddr; 1273} 1274 1275class ARMABIInfo : public ABIInfo { 1276 ABIArgInfo classifyReturnType(QualType RetTy, 1277 ASTContext &Context) const; 1278 1279 ABIArgInfo classifyArgumentType(QualType RetTy, 1280 ASTContext &Context) const; 1281 1282 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; 1283 1284 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1285 CodeGenFunction &CGF) const; 1286}; 1287 1288void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1289 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1290 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1291 it != ie; ++it) { 1292 it->info = classifyArgumentType(it->type, Context); 1293 } 1294} 1295 1296ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 1297 ASTContext &Context) const { 1298 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1299 return ABIArgInfo::getDirect(); 1300 } 1301 // FIXME: This is kind of nasty... but there isn't much choice 1302 // because the ARM backend doesn't support byval. 1303 // FIXME: This doesn't handle alignment > 64 bits. 1304 const llvm::Type* ElemTy; 1305 unsigned SizeRegs; 1306 if (Context.getTypeAlign(Ty) > 32) { 1307 ElemTy = llvm::Type::Int64Ty; 1308 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64; 1309 } else { 1310 ElemTy = llvm::Type::Int32Ty; 1311 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32; 1312 } 1313 std::vector<const llvm::Type*> LLVMFields; 1314 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); 1315 const llvm::Type* STy = llvm::StructType::get(LLVMFields, true); 1316 return ABIArgInfo::getCoerce(STy); 1317} 1318 1319ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 1320 ASTContext &Context) const { 1321 if (RetTy->isVoidType()) { 1322 return ABIArgInfo::getIgnore(); 1323 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1324 // Aggregates <= 4 bytes are returned in r0; other aggregates 1325 // are returned indirectly. 1326 uint64_t Size = Context.getTypeSize(RetTy); 1327 if (Size <= 32) 1328 return ABIArgInfo::getCoerce(llvm::Type::Int32Ty); 1329 return ABIArgInfo::getIndirect(0); 1330 } else { 1331 return ABIArgInfo::getDirect(); 1332 } 1333} 1334 1335llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1336 CodeGenFunction &CGF) const { 1337 // FIXME: Need to handle alignment 1338 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 1339 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 1340 1341 CGBuilderTy &Builder = CGF.Builder; 1342 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1343 "ap"); 1344 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1345 llvm::Type *PTy = 1346 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1347 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1348 1349 uint64_t Offset = 1350 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 1351 llvm::Value *NextAddr = 1352 Builder.CreateGEP(Addr, 1353 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), 1354 "ap.next"); 1355 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1356 1357 return AddrTyped; 1358} 1359 1360ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy, 1361 ASTContext &Context) const { 1362 if (RetTy->isVoidType()) { 1363 return ABIArgInfo::getIgnore(); 1364 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1365 return ABIArgInfo::getIndirect(0); 1366 } else { 1367 return ABIArgInfo::getDirect(); 1368 } 1369} 1370 1371ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty, 1372 ASTContext &Context) const { 1373 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 1374 return ABIArgInfo::getIndirect(0); 1375 } else { 1376 return ABIArgInfo::getDirect(); 1377 } 1378} 1379 1380llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1381 CodeGenFunction &CGF) const { 1382 return 0; 1383} 1384 1385const ABIInfo &CodeGenTypes::getABIInfo() const { 1386 if (TheABIInfo) 1387 return *TheABIInfo; 1388 1389 // For now we just cache this in the CodeGenTypes and don't bother 1390 // to free it. 1391 const char *TargetPrefix = getContext().Target.getTargetPrefix(); 1392 if (strcmp(TargetPrefix, "x86") == 0) { 1393 bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin"); 1394 switch (getContext().Target.getPointerWidth(0)) { 1395 case 32: 1396 return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin)); 1397 case 64: 1398 return *(TheABIInfo = new X86_64ABIInfo()); 1399 } 1400 } else if (strcmp(TargetPrefix, "arm") == 0) { 1401 // FIXME: Support for OABI? 1402 return *(TheABIInfo = new ARMABIInfo()); 1403 } 1404 1405 return *(TheABIInfo = new DefaultABIInfo); 1406} 1407 1408/***/ 1409 1410CGFunctionInfo::CGFunctionInfo(QualType ResTy, 1411 const llvm::SmallVector<QualType, 16> &ArgTys) { 1412 NumArgs = ArgTys.size(); 1413 Args = new ArgInfo[1 + NumArgs]; 1414 Args[0].type = ResTy; 1415 for (unsigned i = 0; i < NumArgs; ++i) 1416 Args[1 + i].type = ArgTys[i]; 1417} 1418 1419/***/ 1420 1421void CodeGenTypes::GetExpandedTypes(QualType Ty, 1422 std::vector<const llvm::Type*> &ArgTys) { 1423 const RecordType *RT = Ty->getAsStructureType(); 1424 assert(RT && "Can only expand structure types."); 1425 const RecordDecl *RD = RT->getDecl(); 1426 assert(!RD->hasFlexibleArrayMember() && 1427 "Cannot expand structure with flexible array."); 1428 1429 for (RecordDecl::field_iterator i = RD->field_begin(Context), 1430 e = RD->field_end(Context); i != e; ++i) { 1431 const FieldDecl *FD = *i; 1432 assert(!FD->isBitField() && 1433 "Cannot expand structure with bit-field members."); 1434 1435 QualType FT = FD->getType(); 1436 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1437 GetExpandedTypes(FT, ArgTys); 1438 } else { 1439 ArgTys.push_back(ConvertType(FT)); 1440 } 1441 } 1442} 1443 1444llvm::Function::arg_iterator 1445CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1446 llvm::Function::arg_iterator AI) { 1447 const RecordType *RT = Ty->getAsStructureType(); 1448 assert(RT && "Can only expand structure types."); 1449 1450 RecordDecl *RD = RT->getDecl(); 1451 assert(LV.isSimple() && 1452 "Unexpected non-simple lvalue during struct expansion."); 1453 llvm::Value *Addr = LV.getAddress(); 1454 for (RecordDecl::field_iterator i = RD->field_begin(getContext()), 1455 e = RD->field_end(getContext()); i != e; ++i) { 1456 FieldDecl *FD = *i; 1457 QualType FT = FD->getType(); 1458 1459 // FIXME: What are the right qualifiers here? 1460 LValue LV = EmitLValueForField(Addr, FD, false, 0); 1461 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1462 AI = ExpandTypeFromArgs(FT, LV, AI); 1463 } else { 1464 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 1465 ++AI; 1466 } 1467 } 1468 1469 return AI; 1470} 1471 1472void 1473CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1474 llvm::SmallVector<llvm::Value*, 16> &Args) { 1475 const RecordType *RT = Ty->getAsStructureType(); 1476 assert(RT && "Can only expand structure types."); 1477 1478 RecordDecl *RD = RT->getDecl(); 1479 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1480 llvm::Value *Addr = RV.getAggregateAddr(); 1481 for (RecordDecl::field_iterator i = RD->field_begin(getContext()), 1482 e = RD->field_end(getContext()); i != e; ++i) { 1483 FieldDecl *FD = *i; 1484 QualType FT = FD->getType(); 1485 1486 // FIXME: What are the right qualifiers here? 1487 LValue LV = EmitLValueForField(Addr, FD, false, 0); 1488 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1489 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 1490 } else { 1491 RValue RV = EmitLoadOfLValue(LV, FT); 1492 assert(RV.isScalar() && 1493 "Unexpected non-scalar rvalue during struct expansion."); 1494 Args.push_back(RV.getScalarVal()); 1495 } 1496 } 1497} 1498 1499/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1500/// a pointer to an object of type \arg Ty. 1501/// 1502/// This safely handles the case when the src type is smaller than the 1503/// destination type; in this situation the values of bits which not 1504/// present in the src are undefined. 1505static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 1506 const llvm::Type *Ty, 1507 CodeGenFunction &CGF) { 1508 const llvm::Type *SrcTy = 1509 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 1510 uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy); 1511 uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty); 1512 1513 // If load is legal, just bitcast the src pointer. 1514 if (SrcSize == DstSize) { 1515 llvm::Value *Casted = 1516 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 1517 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 1518 // FIXME: Use better alignment / avoid requiring aligned load. 1519 Load->setAlignment(1); 1520 return Load; 1521 } else { 1522 assert(SrcSize < DstSize && "Coercion is losing source bits!"); 1523 1524 // Otherwise do coercion through memory. This is stupid, but 1525 // simple. 1526 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 1527 llvm::Value *Casted = 1528 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 1529 llvm::StoreInst *Store = 1530 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 1531 // FIXME: Use better alignment / avoid requiring aligned store. 1532 Store->setAlignment(1); 1533 return CGF.Builder.CreateLoad(Tmp); 1534 } 1535} 1536 1537/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1538/// where the source and destination may have different types. 1539/// 1540/// This safely handles the case when the src type is larger than the 1541/// destination type; the upper bits of the src will be lost. 1542static void CreateCoercedStore(llvm::Value *Src, 1543 llvm::Value *DstPtr, 1544 CodeGenFunction &CGF) { 1545 const llvm::Type *SrcTy = Src->getType(); 1546 const llvm::Type *DstTy = 1547 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1548 1549 uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy); 1550 uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy); 1551 1552 // If store is legal, just bitcast the src pointer. 1553 if (SrcSize == DstSize) { 1554 llvm::Value *Casted = 1555 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1556 // FIXME: Use better alignment / avoid requiring aligned store. 1557 CGF.Builder.CreateStore(Src, Casted)->setAlignment(1); 1558 } else { 1559 assert(SrcSize > DstSize && "Coercion is missing bits!"); 1560 1561 // Otherwise do coercion through memory. This is stupid, but 1562 // simple. 1563 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 1564 CGF.Builder.CreateStore(Src, Tmp); 1565 llvm::Value *Casted = 1566 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 1567 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 1568 // FIXME: Use better alignment / avoid requiring aligned load. 1569 Load->setAlignment(1); 1570 CGF.Builder.CreateStore(Load, DstPtr); 1571 } 1572} 1573 1574/***/ 1575 1576bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) { 1577 return FI.getReturnInfo().isIndirect(); 1578} 1579 1580const llvm::FunctionType * 1581CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { 1582 std::vector<const llvm::Type*> ArgTys; 1583 1584 const llvm::Type *ResultType = 0; 1585 1586 QualType RetTy = FI.getReturnType(); 1587 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1588 switch (RetAI.getKind()) { 1589 case ABIArgInfo::Expand: 1590 assert(0 && "Invalid ABI kind for return argument"); 1591 1592 case ABIArgInfo::Direct: 1593 ResultType = ConvertType(RetTy); 1594 break; 1595 1596 case ABIArgInfo::Indirect: { 1597 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 1598 ResultType = llvm::Type::VoidTy; 1599 const llvm::Type *STy = ConvertType(RetTy); 1600 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 1601 break; 1602 } 1603 1604 case ABIArgInfo::Ignore: 1605 ResultType = llvm::Type::VoidTy; 1606 break; 1607 1608 case ABIArgInfo::Coerce: 1609 ResultType = RetAI.getCoerceToType(); 1610 break; 1611 } 1612 1613 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1614 ie = FI.arg_end(); it != ie; ++it) { 1615 const ABIArgInfo &AI = it->info; 1616 1617 switch (AI.getKind()) { 1618 case ABIArgInfo::Ignore: 1619 break; 1620 1621 case ABIArgInfo::Coerce: 1622 ArgTys.push_back(AI.getCoerceToType()); 1623 break; 1624 1625 case ABIArgInfo::Indirect: { 1626 // indirect arguments are always on the stack, which is addr space #0. 1627 const llvm::Type *LTy = ConvertTypeForMem(it->type); 1628 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 1629 break; 1630 } 1631 1632 case ABIArgInfo::Direct: 1633 ArgTys.push_back(ConvertType(it->type)); 1634 break; 1635 1636 case ABIArgInfo::Expand: 1637 GetExpandedTypes(it->type, ArgTys); 1638 break; 1639 } 1640 } 1641 1642 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 1643} 1644 1645void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1646 const Decl *TargetDecl, 1647 AttributeListType &PAL) { 1648 unsigned FuncAttrs = 0; 1649 unsigned RetAttrs = 0; 1650 1651 // FIXME: handle sseregparm someday... 1652 if (TargetDecl) { 1653 if (TargetDecl->hasAttr<NoThrowAttr>()) 1654 FuncAttrs |= llvm::Attribute::NoUnwind; 1655 if (TargetDecl->hasAttr<NoReturnAttr>()) 1656 FuncAttrs |= llvm::Attribute::NoReturn; 1657 if (TargetDecl->hasAttr<ConstAttr>()) 1658 FuncAttrs |= llvm::Attribute::ReadNone; 1659 else if (TargetDecl->hasAttr<PureAttr>()) 1660 FuncAttrs |= llvm::Attribute::ReadOnly; 1661 } 1662 1663 QualType RetTy = FI.getReturnType(); 1664 unsigned Index = 1; 1665 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1666 switch (RetAI.getKind()) { 1667 case ABIArgInfo::Direct: 1668 if (RetTy->isPromotableIntegerType()) { 1669 if (RetTy->isSignedIntegerType()) { 1670 RetAttrs |= llvm::Attribute::SExt; 1671 } else if (RetTy->isUnsignedIntegerType()) { 1672 RetAttrs |= llvm::Attribute::ZExt; 1673 } 1674 } 1675 break; 1676 1677 case ABIArgInfo::Indirect: 1678 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1679 llvm::Attribute::StructRet | 1680 llvm::Attribute::NoAlias)); 1681 ++Index; 1682 // sret disables readnone and readonly 1683 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 1684 llvm::Attribute::ReadNone); 1685 break; 1686 1687 case ABIArgInfo::Ignore: 1688 case ABIArgInfo::Coerce: 1689 break; 1690 1691 case ABIArgInfo::Expand: 1692 assert(0 && "Invalid ABI kind for return argument"); 1693 } 1694 1695 if (RetAttrs) 1696 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 1697 1698 // FIXME: we need to honour command line settings also... 1699 // FIXME: RegParm should be reduced in case of nested functions and/or global 1700 // register variable. 1701 signed RegParm = 0; 1702 if (TargetDecl) 1703 if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>()) 1704 RegParm = RegParmAttr->getNumParams(); 1705 1706 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 1707 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1708 ie = FI.arg_end(); it != ie; ++it) { 1709 QualType ParamType = it->type; 1710 const ABIArgInfo &AI = it->info; 1711 unsigned Attributes = 0; 1712 1713 switch (AI.getKind()) { 1714 case ABIArgInfo::Coerce: 1715 break; 1716 1717 case ABIArgInfo::Indirect: 1718 Attributes |= llvm::Attribute::ByVal; 1719 Attributes |= 1720 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 1721 // byval disables readnone and readonly. 1722 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 1723 llvm::Attribute::ReadNone); 1724 break; 1725 1726 case ABIArgInfo::Direct: 1727 if (ParamType->isPromotableIntegerType()) { 1728 if (ParamType->isSignedIntegerType()) { 1729 Attributes |= llvm::Attribute::SExt; 1730 } else if (ParamType->isUnsignedIntegerType()) { 1731 Attributes |= llvm::Attribute::ZExt; 1732 } 1733 } 1734 if (RegParm > 0 && 1735 (ParamType->isIntegerType() || ParamType->isPointerType())) { 1736 RegParm -= 1737 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 1738 if (RegParm >= 0) 1739 Attributes |= llvm::Attribute::InReg; 1740 } 1741 // FIXME: handle sseregparm someday... 1742 break; 1743 1744 case ABIArgInfo::Ignore: 1745 // Skip increment, no matching LLVM parameter. 1746 continue; 1747 1748 case ABIArgInfo::Expand: { 1749 std::vector<const llvm::Type*> Tys; 1750 // FIXME: This is rather inefficient. Do we ever actually need 1751 // to do anything here? The result should be just reconstructed 1752 // on the other side, so extension should be a non-issue. 1753 getTypes().GetExpandedTypes(ParamType, Tys); 1754 Index += Tys.size(); 1755 continue; 1756 } 1757 } 1758 1759 if (Attributes) 1760 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 1761 ++Index; 1762 } 1763 if (FuncAttrs) 1764 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 1765} 1766 1767void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1768 llvm::Function *Fn, 1769 const FunctionArgList &Args) { 1770 // FIXME: We no longer need the types from FunctionArgList; lift up 1771 // and simplify. 1772 1773 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1774 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1775 1776 // Name the struct return argument. 1777 if (CGM.ReturnTypeUsesSret(FI)) { 1778 AI->setName("agg.result"); 1779 ++AI; 1780 } 1781 1782 assert(FI.arg_size() == Args.size() && 1783 "Mismatch between function signature & arguments."); 1784 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1785 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1786 i != e; ++i, ++info_it) { 1787 const VarDecl *Arg = i->first; 1788 QualType Ty = info_it->type; 1789 const ABIArgInfo &ArgI = info_it->info; 1790 1791 switch (ArgI.getKind()) { 1792 case ABIArgInfo::Indirect: { 1793 llvm::Value* V = AI; 1794 if (hasAggregateLLVMType(Ty)) { 1795 // Do nothing, aggregates and complex variables are accessed by 1796 // reference. 1797 } else { 1798 // Load scalar value from indirect argument. 1799 V = EmitLoadOfScalar(V, false, Ty); 1800 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1801 // This must be a promotion, for something like 1802 // "void a(x) short x; {..." 1803 V = EmitScalarConversion(V, Ty, Arg->getType()); 1804 } 1805 } 1806 EmitParmDecl(*Arg, V); 1807 break; 1808 } 1809 1810 case ABIArgInfo::Direct: { 1811 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1812 llvm::Value* V = AI; 1813 if (hasAggregateLLVMType(Ty)) { 1814 // Create a temporary alloca to hold the argument; the rest of 1815 // codegen expects to access aggregates & complex values by 1816 // reference. 1817 V = CreateTempAlloca(ConvertTypeForMem(Ty)); 1818 Builder.CreateStore(AI, V); 1819 } else { 1820 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1821 // This must be a promotion, for something like 1822 // "void a(x) short x; {..." 1823 V = EmitScalarConversion(V, Ty, Arg->getType()); 1824 } 1825 } 1826 EmitParmDecl(*Arg, V); 1827 break; 1828 } 1829 1830 case ABIArgInfo::Expand: { 1831 // If this structure was expanded into multiple arguments then 1832 // we need to create a temporary and reconstruct it from the 1833 // arguments. 1834 std::string Name = Arg->getNameAsString(); 1835 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), 1836 (Name + ".addr").c_str()); 1837 // FIXME: What are the right qualifiers here? 1838 llvm::Function::arg_iterator End = 1839 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); 1840 EmitParmDecl(*Arg, Temp); 1841 1842 // Name the arguments used in expansion and increment AI. 1843 unsigned Index = 0; 1844 for (; AI != End; ++AI, ++Index) 1845 AI->setName(Name + "." + llvm::utostr(Index)); 1846 continue; 1847 } 1848 1849 case ABIArgInfo::Ignore: 1850 // Initialize the local variable appropriately. 1851 if (hasAggregateLLVMType(Ty)) { 1852 EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty))); 1853 } else { 1854 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); 1855 } 1856 1857 // Skip increment, no matching LLVM parameter. 1858 continue; 1859 1860 case ABIArgInfo::Coerce: { 1861 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1862 // FIXME: This is very wasteful; EmitParmDecl is just going to 1863 // drop the result in a new alloca anyway, so we could just 1864 // store into that directly if we broke the abstraction down 1865 // more. 1866 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce"); 1867 CreateCoercedStore(AI, V, *this); 1868 // Match to what EmitParmDecl is expecting for this type. 1869 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1870 V = EmitLoadOfScalar(V, false, Ty); 1871 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1872 // This must be a promotion, for something like 1873 // "void a(x) short x; {..." 1874 V = EmitScalarConversion(V, Ty, Arg->getType()); 1875 } 1876 } 1877 EmitParmDecl(*Arg, V); 1878 break; 1879 } 1880 } 1881 1882 ++AI; 1883 } 1884 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1885} 1886 1887void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1888 llvm::Value *ReturnValue) { 1889 llvm::Value *RV = 0; 1890 1891 // Functions with no result always return void. 1892 if (ReturnValue) { 1893 QualType RetTy = FI.getReturnType(); 1894 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1895 1896 switch (RetAI.getKind()) { 1897 case ABIArgInfo::Indirect: 1898 if (RetTy->isAnyComplexType()) { 1899 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1900 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1901 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1902 EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy); 1903 } else { 1904 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1905 false); 1906 } 1907 break; 1908 1909 case ABIArgInfo::Direct: 1910 // The internal return value temp always will have 1911 // pointer-to-return-type type. 1912 RV = Builder.CreateLoad(ReturnValue); 1913 break; 1914 1915 case ABIArgInfo::Ignore: 1916 break; 1917 1918 case ABIArgInfo::Coerce: 1919 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); 1920 break; 1921 1922 case ABIArgInfo::Expand: 1923 assert(0 && "Invalid ABI kind for return argument"); 1924 } 1925 } 1926 1927 if (RV) { 1928 Builder.CreateRet(RV); 1929 } else { 1930 Builder.CreateRetVoid(); 1931 } 1932} 1933 1934RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { 1935 return EmitAnyExprToTemp(E); 1936} 1937 1938void CodeGenFunction::EmitCallArgs(CallArgList& Args, 1939 const FunctionProtoType *FPT, 1940 CallExpr::const_arg_iterator ArgBeg, 1941 CallExpr::const_arg_iterator ArgEnd) { 1942 CallExpr::const_arg_iterator Arg = ArgBeg; 1943 1944 // First, use the function argument types. 1945 if (FPT) { 1946 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(), 1947 E = FPT->arg_type_end(); I != E; ++I, ++Arg) { 1948 assert(getContext().getCanonicalType(I->getNonReferenceType()). 1949 getTypePtr() == 1950 getContext().getCanonicalType(Arg->getType()).getTypePtr() && 1951 "type mismatch in call argument!"); 1952 1953 QualType ArgType = *I; 1954 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType), 1955 ArgType)); 1956 } 1957 1958 assert(Arg == ArgEnd || FPT->isVariadic() && 1959 "Extra arguments in non-variadic function!"); 1960 } 1961 1962 // If we still have any arguments, emit them using the type of the argument. 1963 for (; Arg != ArgEnd; ++Arg) { 1964 QualType ArgType = Arg->getType(); 1965 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType), 1966 ArgType)); 1967 } 1968} 1969 1970RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1971 llvm::Value *Callee, 1972 const CallArgList &CallArgs, 1973 const Decl *TargetDecl) { 1974 // FIXME: We no longer need the types from CallArgs; lift up and 1975 // simplify. 1976 llvm::SmallVector<llvm::Value*, 16> Args; 1977 1978 // Handle struct-return functions by passing a pointer to the 1979 // location that we would like to return into. 1980 QualType RetTy = CallInfo.getReturnType(); 1981 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1982 if (CGM.ReturnTypeUsesSret(CallInfo)) { 1983 // Create a temporary alloca to hold the result of the call. :( 1984 Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy))); 1985 } 1986 1987 assert(CallInfo.arg_size() == CallArgs.size() && 1988 "Mismatch between function signature & arguments."); 1989 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1990 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1991 I != E; ++I, ++info_it) { 1992 const ABIArgInfo &ArgInfo = info_it->info; 1993 RValue RV = I->first; 1994 1995 switch (ArgInfo.getKind()) { 1996 case ABIArgInfo::Indirect: 1997 if (RV.isScalar() || RV.isComplex()) { 1998 // Make a temporary alloca to pass the argument. 1999 Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second))); 2000 if (RV.isScalar()) 2001 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false); 2002 else 2003 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 2004 } else { 2005 Args.push_back(RV.getAggregateAddr()); 2006 } 2007 break; 2008 2009 case ABIArgInfo::Direct: 2010 if (RV.isScalar()) { 2011 Args.push_back(RV.getScalarVal()); 2012 } else if (RV.isComplex()) { 2013 llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second)); 2014 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0); 2015 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1); 2016 Args.push_back(Tmp); 2017 } else { 2018 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 2019 } 2020 break; 2021 2022 case ABIArgInfo::Ignore: 2023 break; 2024 2025 case ABIArgInfo::Coerce: { 2026 // FIXME: Avoid the conversion through memory if possible. 2027 llvm::Value *SrcPtr; 2028 if (RV.isScalar()) { 2029 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); 2030 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false); 2031 } else if (RV.isComplex()) { 2032 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); 2033 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2034 } else 2035 SrcPtr = RV.getAggregateAddr(); 2036 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2037 *this)); 2038 break; 2039 } 2040 2041 case ABIArgInfo::Expand: 2042 ExpandTypeToArgs(I->second, RV, Args); 2043 break; 2044 } 2045 } 2046 2047 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2048 CodeGen::AttributeListType AttributeList; 2049 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList); 2050 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 2051 AttributeList.end()); 2052 2053 llvm::CallSite CS; 2054 if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { 2055 CS = Builder.CreateCall(Callee, &Args[0], &Args[0]+Args.size()); 2056 } else { 2057 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2058 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 2059 &Args[0], &Args[0]+Args.size()); 2060 EmitBlock(Cont); 2061 } 2062 2063 CS.setAttributes(Attrs); 2064 if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 2065 CS.setCallingConv(F->getCallingConv()); 2066 2067 // If the call doesn't return, finish the basic block and clear the 2068 // insertion point; this allows the rest of IRgen to discard 2069 // unreachable code. 2070 if (CS.doesNotReturn()) { 2071 Builder.CreateUnreachable(); 2072 Builder.ClearInsertionPoint(); 2073 2074 // FIXME: For now, emit a dummy basic block because expr 2075 // emitters in generally are not ready to handle emitting 2076 // expressions at unreachable points. 2077 EnsureInsertPoint(); 2078 2079 // Return a reasonable RValue. 2080 return GetUndefRValue(RetTy); 2081 } 2082 2083 llvm::Instruction *CI = CS.getInstruction(); 2084 if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy) 2085 CI->setName("call"); 2086 2087 switch (RetAI.getKind()) { 2088 case ABIArgInfo::Indirect: 2089 if (RetTy->isAnyComplexType()) 2090 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2091 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2092 return RValue::getAggregate(Args[0]); 2093 return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); 2094 2095 case ABIArgInfo::Direct: 2096 if (RetTy->isAnyComplexType()) { 2097 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2098 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2099 return RValue::getComplex(std::make_pair(Real, Imag)); 2100 } 2101 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2102 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp"); 2103 Builder.CreateStore(CI, V); 2104 return RValue::getAggregate(V); 2105 } 2106 return RValue::get(CI); 2107 2108 case ABIArgInfo::Ignore: 2109 // If we are ignoring an argument that had a result, make sure to 2110 // construct the appropriate return value for our caller. 2111 return GetUndefRValue(RetTy); 2112 2113 case ABIArgInfo::Coerce: { 2114 // FIXME: Avoid the conversion through memory if possible. 2115 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce"); 2116 CreateCoercedStore(CI, V, *this); 2117 if (RetTy->isAnyComplexType()) 2118 return RValue::getComplex(LoadComplexFromAddr(V, false)); 2119 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2120 return RValue::getAggregate(V); 2121 return RValue::get(EmitLoadOfScalar(V, false, RetTy)); 2122 } 2123 2124 case ABIArgInfo::Expand: 2125 assert(0 && "Invalid ABI kind for return argument"); 2126 } 2127 2128 assert(0 && "Unhandled ABIArgInfo::Kind"); 2129 return RValue::get(0); 2130} 2131 2132/* VarArg handling */ 2133 2134llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2135 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2136} 2137