CGCall.cpp revision f04d69bbb25eb681fff1a108f13f67c6ca70cf6a
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CodeGenFunction.h" 17#include "CodeGenModule.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/Decl.h" 21#include "clang/AST/DeclObjC.h" 22#include "clang/AST/RecordLayout.h" 23#include "llvm/ADT/StringExtras.h" 24#include "llvm/Attributes.h" 25#include "llvm/Support/CommandLine.h" 26#include "llvm/Target/TargetData.h" 27using namespace clang; 28using namespace CodeGen; 29 30static llvm::cl::opt<bool> 31UseX86_64ABI("use-x86_64-abi", 32 llvm::cl::desc("Enable use of experimental x86_64 ABI."), 33 llvm::cl::init(false)); 34 35/***/ 36 37// FIXME: Use iterator and sidestep silly type array creation. 38 39CGFunctionInfo::CGFunctionInfo(const FunctionTypeNoProto *FTNP) 40 : IsVariadic(true) 41{ 42 ArgTypes.push_back(FTNP->getResultType()); 43} 44 45CGFunctionInfo::CGFunctionInfo(const FunctionTypeProto *FTP) 46 : IsVariadic(FTP->isVariadic()) 47{ 48 ArgTypes.push_back(FTP->getResultType()); 49 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 50 ArgTypes.push_back(FTP->getArgType(i)); 51} 52 53// FIXME: Is there really any reason to have this still? 54CGFunctionInfo::CGFunctionInfo(const FunctionDecl *FD) 55{ 56 const FunctionType *FTy = FD->getType()->getAsFunctionType(); 57 const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(FTy); 58 59 ArgTypes.push_back(FTy->getResultType()); 60 if (FTP) { 61 IsVariadic = FTP->isVariadic(); 62 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 63 ArgTypes.push_back(FTP->getArgType(i)); 64 } else { 65 IsVariadic = true; 66 } 67} 68 69CGFunctionInfo::CGFunctionInfo(const ObjCMethodDecl *MD, 70 const ASTContext &Context) 71 : IsVariadic(MD->isVariadic()) 72{ 73 ArgTypes.push_back(MD->getResultType()); 74 ArgTypes.push_back(MD->getSelfDecl()->getType()); 75 ArgTypes.push_back(Context.getObjCSelType()); 76 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 77 e = MD->param_end(); i != e; ++i) 78 ArgTypes.push_back((*i)->getType()); 79} 80 81ArgTypeIterator CGFunctionInfo::argtypes_begin() const { 82 return ArgTypes.begin(); 83} 84 85ArgTypeIterator CGFunctionInfo::argtypes_end() const { 86 return ArgTypes.end(); 87} 88 89/***/ 90 91CGCallInfo::CGCallInfo(QualType _ResultType, const CallArgList &_Args) { 92 ArgTypes.push_back(_ResultType); 93 for (CallArgList::const_iterator i = _Args.begin(), e = _Args.end(); i!=e; ++i) 94 ArgTypes.push_back(i->second); 95} 96 97ArgTypeIterator CGCallInfo::argtypes_begin() const { 98 return ArgTypes.begin(); 99} 100 101ArgTypeIterator CGCallInfo::argtypes_end() const { 102 return ArgTypes.end(); 103} 104 105/***/ 106 107/// ABIArgInfo - Helper class to encapsulate information about how a 108/// specific C type should be passed to or returned from a function. 109class ABIArgInfo { 110public: 111 enum Kind { 112 Default, 113 StructRet, /// Only valid for return values. The return value 114 /// should be passed through a pointer to a caller 115 /// allocated location passed as an implicit first 116 /// argument to the function. 117 118 Ignore, /// Ignore the argument (treat as void). Useful for 119 /// void and empty structs. 120 121 Coerce, /// Only valid for aggregate return types, the argument 122 /// should be accessed by coercion to a provided type. 123 124 ByVal, /// Only valid for aggregate argument types. The 125 /// structure should be passed "byval" with the 126 /// specified alignment (0 indicates default 127 /// alignment). 128 129 Expand, /// Only valid for aggregate argument types. The 130 /// structure should be expanded into consecutive 131 /// arguments for its constituent fields. Currently 132 /// expand is only allowed on structures whose fields 133 /// are all scalar types or are themselves expandable 134 /// types. 135 136 KindFirst=Default, KindLast=Expand 137 }; 138 139private: 140 Kind TheKind; 141 const llvm::Type *TypeData; 142 unsigned UIntData; 143 144 ABIArgInfo(Kind K, const llvm::Type *TD=0, 145 unsigned UI=0) : TheKind(K), 146 TypeData(TD), 147 UIntData(0) {} 148public: 149 static ABIArgInfo getDefault() { 150 return ABIArgInfo(Default); 151 } 152 static ABIArgInfo getStructRet() { 153 return ABIArgInfo(StructRet); 154 } 155 static ABIArgInfo getIgnore() { 156 return ABIArgInfo(Ignore); 157 } 158 static ABIArgInfo getCoerce(const llvm::Type *T) { 159 return ABIArgInfo(Coerce, T); 160 } 161 static ABIArgInfo getByVal(unsigned Alignment) { 162 return ABIArgInfo(ByVal, 0, Alignment); 163 } 164 static ABIArgInfo getExpand() { 165 return ABIArgInfo(Expand); 166 } 167 168 Kind getKind() const { return TheKind; } 169 bool isDefault() const { return TheKind == Default; } 170 bool isStructRet() const { return TheKind == StructRet; } 171 bool isIgnore() const { return TheKind == Ignore; } 172 bool isCoerce() const { return TheKind == Coerce; } 173 bool isByVal() const { return TheKind == ByVal; } 174 bool isExpand() const { return TheKind == Expand; } 175 176 // Coerce accessors 177 const llvm::Type *getCoerceToType() const { 178 assert(TheKind == Coerce && "Invalid kind!"); 179 return TypeData; 180 } 181 182 // ByVal accessors 183 unsigned getByValAlignment() const { 184 assert(TheKind == ByVal && "Invalid kind!"); 185 return UIntData; 186 } 187}; 188 189/***/ 190 191/* FIXME: All of this stuff should be part of the target interface 192 somehow. It is currently here because it is not clear how to factor 193 the targets to support this, since the Targets currently live in a 194 layer below types n'stuff. 195 */ 196 197/// ABIInfo - Target specific hooks for defining how a type should be 198/// passed or returned from functions. 199class clang::ABIInfo { 200public: 201 virtual ~ABIInfo(); 202 203 virtual ABIArgInfo classifyReturnType(QualType RetTy, 204 ASTContext &Context) const = 0; 205 206 virtual ABIArgInfo classifyArgumentType(QualType Ty, 207 ASTContext &Context) const = 0; 208}; 209 210ABIInfo::~ABIInfo() {} 211 212/// isEmptyStruct - Return true iff a structure has no non-empty 213/// members. Note that a structure with a flexible array member is not 214/// considered empty. 215static bool isEmptyStruct(QualType T) { 216 const RecordType *RT = T->getAsStructureType(); 217 if (!RT) 218 return 0; 219 const RecordDecl *RD = RT->getDecl(); 220 if (RD->hasFlexibleArrayMember()) 221 return false; 222 for (RecordDecl::field_iterator i = RD->field_begin(), 223 e = RD->field_end(); i != e; ++i) { 224 const FieldDecl *FD = *i; 225 if (!isEmptyStruct(FD->getType())) 226 return false; 227 } 228 return true; 229} 230 231/// isSingleElementStruct - Determine if a structure is a "single 232/// element struct", i.e. it has exactly one non-empty field or 233/// exactly one field which is itself a single element 234/// struct. Structures with flexible array members are never 235/// considered single element structs. 236/// 237/// \return The field declaration for the single non-empty field, if 238/// it exists. 239static const FieldDecl *isSingleElementStruct(QualType T) { 240 const RecordType *RT = T->getAsStructureType(); 241 if (!RT) 242 return 0; 243 244 const RecordDecl *RD = RT->getDecl(); 245 if (RD->hasFlexibleArrayMember()) 246 return 0; 247 248 const FieldDecl *Found = 0; 249 for (RecordDecl::field_iterator i = RD->field_begin(), 250 e = RD->field_end(); i != e; ++i) { 251 const FieldDecl *FD = *i; 252 QualType FT = FD->getType(); 253 254 if (isEmptyStruct(FT)) { 255 // Ignore 256 } else if (Found) { 257 return 0; 258 } else if (!CodeGenFunction::hasAggregateLLVMType(FT)) { 259 Found = FD; 260 } else { 261 Found = isSingleElementStruct(FT); 262 if (!Found) 263 return 0; 264 } 265 } 266 267 return Found; 268} 269 270static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 271 if (!Ty->getAsBuiltinType() && !Ty->isPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276} 277 278static bool areAllFields32Or64BitBasicType(const RecordDecl *RD, 279 ASTContext &Context) { 280 for (RecordDecl::field_iterator i = RD->field_begin(), 281 e = RD->field_end(); i != e; ++i) { 282 const FieldDecl *FD = *i; 283 284 if (!is32Or64BitBasicType(FD->getType(), Context)) 285 return false; 286 287 // If this is a bit-field we need to make sure it is still a 288 // 32-bit or 64-bit type. 289 if (Expr *BW = FD->getBitWidth()) { 290 unsigned Width = BW->getIntegerConstantExprValue(Context).getZExtValue(); 291 if (Width <= 16) 292 return false; 293 } 294 } 295 return true; 296} 297 298namespace { 299/// DefaultABIInfo - The default implementation for ABI specific 300/// details. This implementation provides information which results in 301/// sensible LLVM IR generation, but does not conform to any 302/// particular ABI. 303class DefaultABIInfo : public ABIInfo { 304 virtual ABIArgInfo classifyReturnType(QualType RetTy, 305 ASTContext &Context) const; 306 307 virtual ABIArgInfo classifyArgumentType(QualType RetTy, 308 ASTContext &Context) const; 309}; 310 311/// X86_32ABIInfo - The X86-32 ABI information. 312class X86_32ABIInfo : public ABIInfo { 313public: 314 virtual ABIArgInfo classifyReturnType(QualType RetTy, 315 ASTContext &Context) const; 316 317 virtual ABIArgInfo classifyArgumentType(QualType RetTy, 318 ASTContext &Context) const; 319}; 320} 321 322ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 323 ASTContext &Context) const { 324 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 325 // Classify "single element" structs as their element type. 326 const FieldDecl *SeltFD = isSingleElementStruct(RetTy); 327 if (SeltFD) { 328 QualType SeltTy = SeltFD->getType()->getDesugaredType(); 329 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) { 330 // FIXME: This is gross, it would be nice if we could just 331 // pass back SeltTy and have clients deal with it. Is it worth 332 // supporting coerce to both LLVM and clang Types? 333 if (BT->isIntegerType()) { 334 uint64_t Size = Context.getTypeSize(SeltTy); 335 return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size)); 336 } else if (BT->getKind() == BuiltinType::Float) { 337 return ABIArgInfo::getCoerce(llvm::Type::FloatTy); 338 } else if (BT->getKind() == BuiltinType::Double) { 339 return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); 340 } 341 } else if (SeltTy->isPointerType()) { 342 // FIXME: It would be really nice if this could come out as 343 // the proper pointer type. 344 llvm::Type *PtrTy = 345 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 346 return ABIArgInfo::getCoerce(PtrTy); 347 } 348 } 349 350 uint64_t Size = Context.getTypeSize(RetTy); 351 if (Size == 8) { 352 return ABIArgInfo::getCoerce(llvm::Type::Int8Ty); 353 } else if (Size == 16) { 354 return ABIArgInfo::getCoerce(llvm::Type::Int16Ty); 355 } else if (Size == 32) { 356 return ABIArgInfo::getCoerce(llvm::Type::Int32Ty); 357 } else if (Size == 64) { 358 return ABIArgInfo::getCoerce(llvm::Type::Int64Ty); 359 } else { 360 return ABIArgInfo::getStructRet(); 361 } 362 } else { 363 return ABIArgInfo::getDefault(); 364 } 365} 366 367ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 368 ASTContext &Context) const { 369 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 370 // Structures with flexible arrays are always byval. 371 if (const RecordType *RT = Ty->getAsStructureType()) 372 if (RT->getDecl()->hasFlexibleArrayMember()) 373 return ABIArgInfo::getByVal(0); 374 375 // Expand empty structs (i.e. ignore) 376 uint64_t Size = Context.getTypeSize(Ty); 377 if (Ty->isStructureType() && Size == 0) 378 return ABIArgInfo::getExpand(); 379 380 // Expand structs with size <= 128-bits which consist only of 381 // basic types (int, long long, float, double, xxx*). This is 382 // non-recursive and does not ignore empty fields. 383 if (const RecordType *RT = Ty->getAsStructureType()) { 384 if (Context.getTypeSize(Ty) <= 4*32 && 385 areAllFields32Or64BitBasicType(RT->getDecl(), Context)) 386 return ABIArgInfo::getExpand(); 387 } 388 389 return ABIArgInfo::getByVal(0); 390 } else { 391 return ABIArgInfo::getDefault(); 392 } 393} 394 395namespace { 396/// X86_32ABIInfo - The X86_64 ABI information. 397class X86_64ABIInfo : public ABIInfo { 398 enum Class { 399 Integer = 0, 400 SSE, 401 SSEUp, 402 X87, 403 X87Up, 404 ComplexX87, 405 NoClass, 406 Memory 407 }; 408 409 /// classify - Determine the x86_64 register classes in which the 410 /// given type T should be passed. 411 /// 412 /// \param Lo - The classification for the low word of the type. 413 /// \param Hi - The classification for the high word of the type. 414 /// \param OffsetBase - The byte position of the type in the root 415 /// structure. Some parameters are classified different depending on 416 /// whether they straddle an eightbyte boundary. 417 /// 418 /// If a word is unused its result will be NoClass; if a type should 419 /// be passed in Memory then at least the classification of \arg Lo 420 /// will be Memory. 421 /// 422 /// The \arg Lo class will be NoClass iff the argument is ignored. 423 /// 424 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 425 /// be NoClass. 426 void classify(QualType T, ASTContext &Context, unsigned OffsetBase, 427 Class &Lo, Class &Hi) const; 428 429public: 430 virtual ABIArgInfo classifyReturnType(QualType RetTy, 431 ASTContext &Context) const; 432 433 virtual ABIArgInfo classifyArgumentType(QualType RetTy, 434 ASTContext &Context) const; 435}; 436} 437 438void X86_64ABIInfo::classify(QualType Ty, 439 ASTContext &Context, 440 unsigned OffsetBase, 441 Class &Lo, Class &Hi) const { 442 Lo = Memory; 443 Hi = NoClass; 444 if (const BuiltinType *BT = Ty->getAsBuiltinType()) { 445 BuiltinType::Kind k = BT->getKind(); 446 447 if (k == BuiltinType::Void) { 448 Lo = NoClass; 449 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 450 Lo = Integer; 451 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 452 Lo = SSE; 453 } else if (k == BuiltinType::LongDouble) { 454 Lo = X87; 455 Hi = X87Up; 456 } 457 458 // FIXME: _Decimal32 and _Decimal64 are SSE. 459 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 460 // FIXME: __int128 is (Integer, Integer). 461 } else if (Ty->isPointerLikeType() || Ty->isBlockPointerType() || 462 Ty->isObjCQualifiedInterfaceType()) { 463 Lo = Integer; 464 } else if (const VectorType *VT = Ty->getAsVectorType()) { 465 unsigned Size = Context.getTypeSize(VT); 466 if (Size == 64) { 467 // FIXME: For some reason, gcc appears to be treating <1 x 468 // double> as INTEGER; this seems wrong, but we will match for 469 // now (icc rejects <1 x double>, so...). 470 Lo = (VT->getElementType() == Context.DoubleTy) ? Integer : SSE; 471 } else if (Size == 128) { 472 Lo = SSE; 473 Hi = SSEUp; 474 } 475 } else if (const ComplexType *CT = Ty->getAsComplexType()) { 476 QualType ET = CT->getElementType(); 477 478 if (ET->isIntegerType()) { 479 unsigned Size = Context.getTypeSize(Ty); 480 if (Size <= 64) 481 Lo = Integer; 482 else if (Size <= 128) 483 Lo = Hi = Integer; 484 } else if (ET == Context.FloatTy) 485 Lo = SSE; 486 else if (ET == Context.DoubleTy) 487 Lo = Hi = SSE; 488 else if (ET == Context.LongDoubleTy) 489 Lo = ComplexX87; 490 491 // If this complex type crosses an eightbyte boundary then it 492 // should be split. 493 unsigned EB_Real = (OffsetBase) >> 3; 494 unsigned EB_Imag = (OffsetBase + Context.getTypeSize(ET)) >> 3; 495 if (Hi == NoClass && EB_Real != EB_Imag) 496 Hi = Lo; 497 } else if (const RecordType *RT = Ty->getAsRecordType()) { 498 unsigned Size = Context.getTypeSize(Ty); 499 500 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 501 // than two eightbytes, ..., it has class MEMORY. 502 if (Size > 128) 503 return; 504 505 const RecordDecl *RD = RT->getDecl(); 506 507 // Assume variable sized types are passed in memory. 508 if (RD->hasFlexibleArrayMember()) 509 return; 510 511 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 512 513 // Reset Lo class, this will be recomputed. 514 Lo = NoClass; 515 unsigned idx = 0; 516 for (RecordDecl::field_iterator i = RD->field_begin(), 517 e = RD->field_end(); i != e; ++i, ++idx) { 518 unsigned Offset = OffsetBase + Layout.getFieldOffset(idx); 519 520 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 521 // fields, it has class MEMORY. 522 if (Offset % Context.getTypeAlign(i->getType())) { 523 Lo = Memory; 524 return; 525 } 526 527 // Determine which half of the structure we are classifying. 528 // 529 // AMD64-ABI 3.2.3p2: Rule 3. f the size of the aggregate 530 // exceeds a single eightbyte, each is classified 531 // separately. Each eightbyte gets initialized to class 532 // NO_CLASS. 533 Class &Target = Offset < 64 ? Lo : Hi; 534 535 // Classify this field. 536 Class FieldLo, FieldHi; 537 classify(i->getType(), Context, Offset, FieldLo, FieldHi); 538 539 // Merge the lo field classifcation. 540 // 541 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 542 // classified recursively so that always two fields are 543 // considered. The resulting class is calculated according to 544 // the classes of the fields in the eightbyte: 545 // 546 // (a) If both classes are equal, this is the resulting class. 547 // 548 // (b) If one of the classes is NO_CLASS, the resulting class is 549 // the other class. 550 // 551 // (c) If one of the classes is MEMORY, the result is the MEMORY 552 // class. 553 // 554 // (d) If one of the classes is INTEGER, the result is the 555 // INTEGER. 556 // 557 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 558 // MEMORY is used as class. 559 // 560 // (f) Otherwise class SSE is used. 561 if (Target == FieldLo || FieldLo == NoClass) ; 562 else if (FieldLo == Memory) { 563 // Memory is never over-ridden, just bail. 564 Lo = Memory; 565 return; 566 } 567 else if (Target == NoClass) 568 Target = FieldLo; 569 else if (Target == Integer || FieldLo == Integer) 570 Target = Integer; 571 else if (FieldLo == X87 || FieldLo == X87Up || FieldLo == ComplexX87) { 572 // As before, just bail once we generate a memory class. 573 Lo = Memory; 574 return; 575 } else 576 Target = SSE; 577 578 // It isn't clear from the ABI spec what the role of the high 579 // classification is here, but since this should only happen 580 // when we have a struct with a two eightbyte member, we can 581 // just push the field high class into the overall high class. 582 if (FieldHi != NoClass) 583 Hi = FieldHi; 584 } 585 586 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 587 // 588 // (a) If one of the classes is MEMORY, the whole argument is 589 // passed in memory. 590 // 591 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. 592 593 // The first of these conditions is guaranteed by how we implement 594 // the merge (just bail). I don't believe the second is actually 595 // possible at all. 596 assert(Lo != Memory && "Unexpected memory classification."); 597 if (Hi == SSEUp && Lo != SSE) 598 Hi = SSE; 599 } 600} 601 602ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, 603 ASTContext &Context) const { 604 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 605 // classification algorithm. 606 X86_64ABIInfo::Class Lo, Hi; 607 classify(RetTy, Context, 0, Lo, Hi); 608 609 const llvm::Type *ResType = 0; 610 switch (Lo) { 611 case NoClass: 612 return ABIArgInfo::getIgnore(); 613 614 case SSEUp: 615 case X87Up: 616 assert(0 && "Invalid classification for lo word."); 617 618 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 619 // hidden argument, i.e. structret. 620 case Memory: 621 return ABIArgInfo::getStructRet(); 622 623 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 624 // available register of the sequence %rax, %rdx is used. 625 case Integer: 626 ResType = llvm::Type::Int64Ty; break; 627 628 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 629 // available SSE register of the sequence %xmm0, %xmm1 is used. 630 case SSE: 631 ResType = llvm::Type::DoubleTy; break; 632 633 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 634 // returned on the X87 stack in %st0 as 80-bit x87 number. 635 case X87: 636 ResType = llvm::Type::X86_FP80Ty; break; 637 638 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 639 // part of the value is returned in %st0 and the imaginary part in 640 // %st1. 641 case ComplexX87: 642 assert(Hi == NoClass && "Unexpected ComplexX87 classification."); 643 ResType = llvm::VectorType::get(llvm::Type::X86_FP80Ty, 2); 644 break; 645 } 646 647 switch (Hi) { 648 // Memory was handled previously, and ComplexX87 and X87 should 649 // never occur as hi classes. 650 case Memory: 651 case X87: 652 case ComplexX87: 653 assert(0 && "Invalid classification for hi word."); 654 655 case NoClass: break; 656 case Integer: 657 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 658 break; 659 case SSE: 660 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 661 break; 662 663 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 664 // is passed in the upper half of the last used SSE register. 665 // 666 // SSEUP should always be preceeded by SSE, just widen. 667 case SSEUp: 668 assert(Lo == SSE && "Unexpected SSEUp classification."); 669 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 670 break; 671 672 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 673 // returned together with the previous X87 value in %st0. 674 // 675 // X87UP should always be preceeded by X87, so we don't need to do 676 // anything here. 677 case X87Up: 678 assert(Lo == X87 && "Unexpected X87Up classification."); 679 break; 680 } 681 682 return ABIArgInfo::getCoerce(ResType); 683} 684 685ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, 686 ASTContext &Context) const { 687 return ABIArgInfo::getDefault(); 688} 689 690ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy, 691 ASTContext &Context) const { 692 return ABIArgInfo::getDefault(); 693} 694 695ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty, 696 ASTContext &Context) const { 697 return ABIArgInfo::getDefault(); 698} 699 700const ABIInfo &CodeGenTypes::getABIInfo() const { 701 if (TheABIInfo) 702 return *TheABIInfo; 703 704 // For now we just cache this in the CodeGenTypes and don't bother 705 // to free it. 706 const char *TargetPrefix = getContext().Target.getTargetPrefix(); 707 if (strcmp(TargetPrefix, "x86") == 0) { 708 switch (getContext().Target.getPointerWidth(0)) { 709 case 32: 710 return *(TheABIInfo = new X86_32ABIInfo()); 711 case 64: 712 if (UseX86_64ABI) 713 return *(TheABIInfo = new X86_64ABIInfo()); 714 } 715 } 716 717 return *(TheABIInfo = new DefaultABIInfo); 718} 719 720// getABIReturnInfo - Wrap the ABIInfo getABIReturnInfo, altering 721// "default" types to StructRet when appropriate for simplicity. 722static ABIArgInfo getABIReturnInfo(QualType Ty, CodeGenTypes &CGT) { 723 assert(!Ty->isArrayType() && 724 "Array types cannot be passed directly."); 725 ABIArgInfo Info = CGT.getABIInfo().classifyReturnType(Ty, CGT.getContext()); 726 // Ensure default on aggregate types is StructRet. 727 if (Info.isDefault() && CodeGenFunction::hasAggregateLLVMType(Ty)) 728 return ABIArgInfo::getStructRet(); 729 return Info; 730} 731 732// getABIArgumentInfo - Wrap the ABIInfo getABIReturnInfo, altering 733// "default" types to ByVal when appropriate for simplicity. 734static ABIArgInfo getABIArgumentInfo(QualType Ty, CodeGenTypes &CGT) { 735 assert(!Ty->isArrayType() && 736 "Array types cannot be passed directly."); 737 ABIArgInfo Info = CGT.getABIInfo().classifyArgumentType(Ty, CGT.getContext()); 738 // Ensure default on aggregate types is ByVal. 739 if (Info.isDefault() && CodeGenFunction::hasAggregateLLVMType(Ty)) 740 return ABIArgInfo::getByVal(0); 741 return Info; 742} 743 744/***/ 745 746void CodeGenTypes::GetExpandedTypes(QualType Ty, 747 std::vector<const llvm::Type*> &ArgTys) { 748 const RecordType *RT = Ty->getAsStructureType(); 749 assert(RT && "Can only expand structure types."); 750 const RecordDecl *RD = RT->getDecl(); 751 assert(!RD->hasFlexibleArrayMember() && 752 "Cannot expand structure with flexible array."); 753 754 for (RecordDecl::field_iterator i = RD->field_begin(), 755 e = RD->field_end(); i != e; ++i) { 756 const FieldDecl *FD = *i; 757 assert(!FD->isBitField() && 758 "Cannot expand structure with bit-field members."); 759 760 QualType FT = FD->getType(); 761 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 762 GetExpandedTypes(FT, ArgTys); 763 } else { 764 ArgTys.push_back(ConvertType(FT)); 765 } 766 } 767} 768 769llvm::Function::arg_iterator 770CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 771 llvm::Function::arg_iterator AI) { 772 const RecordType *RT = Ty->getAsStructureType(); 773 assert(RT && "Can only expand structure types."); 774 775 RecordDecl *RD = RT->getDecl(); 776 assert(LV.isSimple() && 777 "Unexpected non-simple lvalue during struct expansion."); 778 llvm::Value *Addr = LV.getAddress(); 779 for (RecordDecl::field_iterator i = RD->field_begin(), 780 e = RD->field_end(); i != e; ++i) { 781 FieldDecl *FD = *i; 782 QualType FT = FD->getType(); 783 784 // FIXME: What are the right qualifiers here? 785 LValue LV = EmitLValueForField(Addr, FD, false, 0); 786 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 787 AI = ExpandTypeFromArgs(FT, LV, AI); 788 } else { 789 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 790 ++AI; 791 } 792 } 793 794 return AI; 795} 796 797void 798CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 799 llvm::SmallVector<llvm::Value*, 16> &Args) { 800 const RecordType *RT = Ty->getAsStructureType(); 801 assert(RT && "Can only expand structure types."); 802 803 RecordDecl *RD = RT->getDecl(); 804 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 805 llvm::Value *Addr = RV.getAggregateAddr(); 806 for (RecordDecl::field_iterator i = RD->field_begin(), 807 e = RD->field_end(); i != e; ++i) { 808 FieldDecl *FD = *i; 809 QualType FT = FD->getType(); 810 811 // FIXME: What are the right qualifiers here? 812 LValue LV = EmitLValueForField(Addr, FD, false, 0); 813 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 814 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 815 } else { 816 RValue RV = EmitLoadOfLValue(LV, FT); 817 assert(RV.isScalar() && 818 "Unexpected non-scalar rvalue during struct expansion."); 819 Args.push_back(RV.getScalarVal()); 820 } 821 } 822} 823 824/***/ 825 826const llvm::FunctionType * 827CodeGenTypes::GetFunctionType(const CGCallInfo &CI, bool IsVariadic) { 828 return GetFunctionType(CI.argtypes_begin(), CI.argtypes_end(), IsVariadic); 829} 830 831const llvm::FunctionType * 832CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 833 return GetFunctionType(FI.argtypes_begin(), FI.argtypes_end(), FI.isVariadic()); 834} 835 836const llvm::FunctionType * 837CodeGenTypes::GetFunctionType(ArgTypeIterator begin, ArgTypeIterator end, 838 bool IsVariadic) { 839 std::vector<const llvm::Type*> ArgTys; 840 841 const llvm::Type *ResultType = 0; 842 843 QualType RetTy = *begin; 844 ABIArgInfo RetAI = getABIReturnInfo(RetTy, *this); 845 switch (RetAI.getKind()) { 846 case ABIArgInfo::ByVal: 847 case ABIArgInfo::Expand: 848 assert(0 && "Invalid ABI kind for return argument"); 849 850 case ABIArgInfo::Default: 851 if (RetTy->isVoidType()) { 852 ResultType = llvm::Type::VoidTy; 853 } else { 854 ResultType = ConvertType(RetTy); 855 } 856 break; 857 858 case ABIArgInfo::StructRet: { 859 ResultType = llvm::Type::VoidTy; 860 const llvm::Type *STy = ConvertType(RetTy); 861 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 862 break; 863 } 864 865 case ABIArgInfo::Ignore: 866 ResultType = llvm::Type::VoidTy; 867 break; 868 869 case ABIArgInfo::Coerce: 870 ResultType = RetAI.getCoerceToType(); 871 break; 872 } 873 874 for (++begin; begin != end; ++begin) { 875 ABIArgInfo AI = getABIArgumentInfo(*begin, *this); 876 const llvm::Type *Ty = ConvertType(*begin); 877 878 switch (AI.getKind()) { 879 case ABIArgInfo::Ignore: 880 break; 881 882 case ABIArgInfo::Coerce: 883 case ABIArgInfo::StructRet: 884 assert(0 && "Invalid ABI kind for non-return argument"); 885 886 case ABIArgInfo::ByVal: 887 // byval arguments are always on the stack, which is addr space #0. 888 ArgTys.push_back(llvm::PointerType::getUnqual(Ty)); 889 assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled"); 890 break; 891 892 case ABIArgInfo::Default: 893 ArgTys.push_back(Ty); 894 break; 895 896 case ABIArgInfo::Expand: 897 GetExpandedTypes(*begin, ArgTys); 898 break; 899 } 900 } 901 902 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 903} 904 905bool CodeGenModule::ReturnTypeUsesSret(QualType RetTy) { 906 return getABIReturnInfo(RetTy, getTypes()).isStructRet(); 907} 908 909void CodeGenModule::ConstructAttributeList(const Decl *TargetDecl, 910 ArgTypeIterator begin, 911 ArgTypeIterator end, 912 AttributeListType &PAL) { 913 unsigned FuncAttrs = 0; 914 unsigned RetAttrs = 0; 915 916 if (TargetDecl) { 917 if (TargetDecl->getAttr<NoThrowAttr>()) 918 FuncAttrs |= llvm::Attribute::NoUnwind; 919 if (TargetDecl->getAttr<NoReturnAttr>()) 920 FuncAttrs |= llvm::Attribute::NoReturn; 921 if (TargetDecl->getAttr<PureAttr>()) 922 FuncAttrs |= llvm::Attribute::ReadOnly; 923 if (TargetDecl->getAttr<ConstAttr>()) 924 FuncAttrs |= llvm::Attribute::ReadNone; 925 } 926 927 QualType RetTy = *begin; 928 unsigned Index = 1; 929 ABIArgInfo RetAI = getABIReturnInfo(RetTy, getTypes()); 930 switch (RetAI.getKind()) { 931 case ABIArgInfo::Default: 932 if (RetTy->isPromotableIntegerType()) { 933 if (RetTy->isSignedIntegerType()) { 934 RetAttrs |= llvm::Attribute::SExt; 935 } else if (RetTy->isUnsignedIntegerType()) { 936 RetAttrs |= llvm::Attribute::ZExt; 937 } 938 } 939 break; 940 941 case ABIArgInfo::StructRet: 942 PAL.push_back(llvm::AttributeWithIndex::get(Index, 943 llvm::Attribute::StructRet| 944 llvm::Attribute::NoAlias)); 945 ++Index; 946 break; 947 948 case ABIArgInfo::Ignore: 949 case ABIArgInfo::Coerce: 950 break; 951 952 case ABIArgInfo::ByVal: 953 case ABIArgInfo::Expand: 954 assert(0 && "Invalid ABI kind for return argument"); 955 } 956 957 if (RetAttrs) 958 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 959 for (++begin; begin != end; ++begin) { 960 QualType ParamType = *begin; 961 unsigned Attributes = 0; 962 ABIArgInfo AI = getABIArgumentInfo(ParamType, getTypes()); 963 964 switch (AI.getKind()) { 965 case ABIArgInfo::StructRet: 966 case ABIArgInfo::Coerce: 967 assert(0 && "Invalid ABI kind for non-return argument"); 968 969 case ABIArgInfo::ByVal: 970 Attributes |= llvm::Attribute::ByVal; 971 assert(AI.getByValAlignment() == 0 && "FIXME: alignment unhandled"); 972 break; 973 974 case ABIArgInfo::Default: 975 if (ParamType->isPromotableIntegerType()) { 976 if (ParamType->isSignedIntegerType()) { 977 Attributes |= llvm::Attribute::SExt; 978 } else if (ParamType->isUnsignedIntegerType()) { 979 Attributes |= llvm::Attribute::ZExt; 980 } 981 } 982 break; 983 984 case ABIArgInfo::Ignore: 985 // Skip increment, no matching LLVM parameter. 986 continue; 987 988 case ABIArgInfo::Expand: { 989 std::vector<const llvm::Type*> Tys; 990 // FIXME: This is rather inefficient. Do we ever actually need 991 // to do anything here? The result should be just reconstructed 992 // on the other side, so extension should be a non-issue. 993 getTypes().GetExpandedTypes(ParamType, Tys); 994 Index += Tys.size(); 995 continue; 996 } 997 } 998 999 if (Attributes) 1000 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 1001 ++Index; 1002 } 1003 if (FuncAttrs) 1004 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 1005 1006} 1007 1008void CodeGenFunction::EmitFunctionProlog(llvm::Function *Fn, 1009 QualType RetTy, 1010 const FunctionArgList &Args) { 1011 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1012 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1013 1014 // Name the struct return argument. 1015 if (CGM.ReturnTypeUsesSret(RetTy)) { 1016 AI->setName("agg.result"); 1017 ++AI; 1018 } 1019 1020 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1021 i != e; ++i) { 1022 const VarDecl *Arg = i->first; 1023 QualType Ty = i->second; 1024 ABIArgInfo ArgI = getABIArgumentInfo(Ty, CGM.getTypes()); 1025 1026 switch (ArgI.getKind()) { 1027 case ABIArgInfo::ByVal: 1028 case ABIArgInfo::Default: { 1029 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1030 llvm::Value* V = AI; 1031 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1032 // This must be a promotion, for something like 1033 // "void a(x) short x; {..." 1034 V = EmitScalarConversion(V, Ty, Arg->getType()); 1035 } 1036 EmitParmDecl(*Arg, V); 1037 break; 1038 } 1039 1040 case ABIArgInfo::Expand: { 1041 // If this was structure was expand into multiple arguments then 1042 // we need to create a temporary and reconstruct it from the 1043 // arguments. 1044 std::string Name = Arg->getNameAsString(); 1045 llvm::Value *Temp = CreateTempAlloca(ConvertType(Ty), 1046 (Name + ".addr").c_str()); 1047 // FIXME: What are the right qualifiers here? 1048 llvm::Function::arg_iterator End = 1049 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); 1050 EmitParmDecl(*Arg, Temp); 1051 1052 // Name the arguments used in expansion and increment AI. 1053 unsigned Index = 0; 1054 for (; AI != End; ++AI, ++Index) 1055 AI->setName(Name + "." + llvm::utostr(Index)); 1056 continue; 1057 } 1058 1059 case ABIArgInfo::Ignore: 1060 break; 1061 1062 case ABIArgInfo::Coerce: 1063 case ABIArgInfo::StructRet: 1064 assert(0 && "Invalid ABI kind for non-return argument"); 1065 } 1066 1067 ++AI; 1068 } 1069 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1070} 1071 1072/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1073/// a pointer to an object of type \arg Ty. 1074/// 1075/// This safely handles the case when the src type is smaller than the 1076/// destination type; in this situation the values of bits which not 1077/// present in the src are undefined. 1078static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 1079 const llvm::Type *Ty, 1080 CodeGenFunction &CGF) { 1081 const llvm::Type *SrcTy = 1082 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 1083 unsigned SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy); 1084 unsigned DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty); 1085 1086 // If load is legal, just bitcase the src pointer. 1087 if (SrcSize == DstSize) { 1088 llvm::Value *Casted = 1089 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 1090 return CGF.Builder.CreateLoad(Casted); 1091 } else { 1092 assert(SrcSize < DstSize && "Coercion is losing source bits!"); 1093 1094 // Otherwise do coercion through memory. This is stupid, but 1095 // simple. 1096 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 1097 llvm::Value *Casted = 1098 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 1099 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 1100 return CGF.Builder.CreateLoad(Tmp); 1101 } 1102} 1103 1104/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1105/// where the source and destination may have different types. 1106/// 1107/// This safely handles the case when the src type is larger than the 1108/// destination type; the upper bits of the src will be lost. 1109static void CreateCoercedStore(llvm::Value *Src, 1110 llvm::Value *DstPtr, 1111 CodeGenFunction &CGF) { 1112 const llvm::Type *SrcTy = Src->getType(); 1113 const llvm::Type *DstTy = 1114 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1115 1116 unsigned SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy); 1117 unsigned DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy); 1118 1119 // If store is legal, just bitcase the src pointer. 1120 if (SrcSize == DstSize) { 1121 llvm::Value *Casted = 1122 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1123 CGF.Builder.CreateStore(Src, Casted); 1124 } else { 1125 assert(SrcSize > DstSize && "Coercion is missing bits!"); 1126 1127 // Otherwise do coercion through memory. This is stupid, but 1128 // simple. 1129 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 1130 CGF.Builder.CreateStore(Src, Tmp); 1131 llvm::Value *Casted = 1132 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 1133 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(Casted), DstPtr); 1134 } 1135} 1136 1137void CodeGenFunction::EmitFunctionEpilog(QualType RetTy, 1138 llvm::Value *ReturnValue) { 1139 llvm::Value *RV = 0; 1140 1141 // Functions with no result always return void. 1142 if (ReturnValue) { 1143 ABIArgInfo RetAI = getABIReturnInfo(RetTy, CGM.getTypes()); 1144 1145 switch (RetAI.getKind()) { 1146 case ABIArgInfo::StructRet: 1147 if (RetTy->isAnyComplexType()) { 1148 // FIXME: Volatile 1149 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1150 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1151 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1152 EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy); 1153 } else { 1154 Builder.CreateStore(Builder.CreateLoad(ReturnValue), 1155 CurFn->arg_begin()); 1156 } 1157 break; 1158 1159 case ABIArgInfo::Default: 1160 RV = Builder.CreateLoad(ReturnValue); 1161 break; 1162 1163 case ABIArgInfo::Ignore: 1164 break; 1165 1166 case ABIArgInfo::Coerce: { 1167 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); 1168 break; 1169 } 1170 1171 case ABIArgInfo::ByVal: 1172 case ABIArgInfo::Expand: 1173 assert(0 && "Invalid ABI kind for return argument"); 1174 } 1175 } 1176 1177 if (RV) { 1178 Builder.CreateRet(RV); 1179 } else { 1180 Builder.CreateRetVoid(); 1181 } 1182} 1183 1184RValue CodeGenFunction::EmitCall(llvm::Value *Callee, 1185 QualType RetTy, 1186 const CallArgList &CallArgs) { 1187 llvm::SmallVector<llvm::Value*, 16> Args; 1188 1189 // Handle struct-return functions by passing a pointer to the 1190 // location that we would like to return into. 1191 ABIArgInfo RetAI = getABIReturnInfo(RetTy, CGM.getTypes()); 1192 switch (RetAI.getKind()) { 1193 case ABIArgInfo::StructRet: 1194 // Create a temporary alloca to hold the result of the call. :( 1195 Args.push_back(CreateTempAlloca(ConvertType(RetTy))); 1196 break; 1197 1198 case ABIArgInfo::Default: 1199 case ABIArgInfo::Ignore: 1200 case ABIArgInfo::Coerce: 1201 break; 1202 1203 case ABIArgInfo::ByVal: 1204 case ABIArgInfo::Expand: 1205 assert(0 && "Invalid ABI kind for return argument"); 1206 } 1207 1208 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1209 I != E; ++I) { 1210 ABIArgInfo ArgInfo = getABIArgumentInfo(I->second, CGM.getTypes()); 1211 RValue RV = I->first; 1212 1213 switch (ArgInfo.getKind()) { 1214 case ABIArgInfo::ByVal: // Default is byval 1215 case ABIArgInfo::Default: 1216 if (RV.isScalar()) { 1217 Args.push_back(RV.getScalarVal()); 1218 } else if (RV.isComplex()) { 1219 // Make a temporary alloca to pass the argument. 1220 Args.push_back(CreateTempAlloca(ConvertType(I->second))); 1221 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1222 } else { 1223 Args.push_back(RV.getAggregateAddr()); 1224 } 1225 break; 1226 1227 case ABIArgInfo::Ignore: 1228 break; 1229 1230 case ABIArgInfo::StructRet: 1231 case ABIArgInfo::Coerce: 1232 assert(0 && "Invalid ABI kind for non-return argument"); 1233 break; 1234 1235 case ABIArgInfo::Expand: 1236 ExpandTypeToArgs(I->second, RV, Args); 1237 break; 1238 } 1239 } 1240 1241 llvm::CallInst *CI = Builder.CreateCall(Callee,&Args[0],&Args[0]+Args.size()); 1242 CGCallInfo CallInfo(RetTy, CallArgs); 1243 1244 // FIXME: Provide TargetDecl so nounwind, noreturn, etc, etc get set. 1245 CodeGen::AttributeListType AttributeList; 1246 CGM.ConstructAttributeList(0, 1247 CallInfo.argtypes_begin(), CallInfo.argtypes_end(), 1248 AttributeList); 1249 CI->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(), 1250 AttributeList.size())); 1251 1252 if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 1253 CI->setCallingConv(F->getCallingConv()); 1254 if (CI->getType() != llvm::Type::VoidTy) 1255 CI->setName("call"); 1256 1257 switch (RetAI.getKind()) { 1258 case ABIArgInfo::StructRet: 1259 if (RetTy->isAnyComplexType()) 1260 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 1261 else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1262 return RValue::getAggregate(Args[0]); 1263 else 1264 return RValue::get(Builder.CreateLoad(Args[0])); 1265 1266 case ABIArgInfo::Default: 1267 return RValue::get(RetTy->isVoidType() ? 0 : CI); 1268 1269 case ABIArgInfo::Ignore: 1270 if (RetTy->isVoidType()) 1271 return RValue::get(0); 1272 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1273 llvm::Value *Res = 1274 llvm::UndefValue::get(llvm::PointerType::getUnqual(ConvertType(RetTy))); 1275 return RValue::getAggregate(Res); 1276 } 1277 return RValue::get(llvm::UndefValue::get(ConvertType(RetTy))); 1278 1279 case ABIArgInfo::Coerce: { 1280 llvm::Value *V = CreateTempAlloca(ConvertType(RetTy), "coerce"); 1281 CreateCoercedStore(CI, V, *this); 1282 if (RetTy->isAnyComplexType()) 1283 return RValue::getComplex(LoadComplexFromAddr(V, false)); 1284 else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1285 return RValue::getAggregate(V); 1286 else 1287 return RValue::get(Builder.CreateLoad(V)); 1288 } 1289 1290 case ABIArgInfo::ByVal: 1291 case ABIArgInfo::Expand: 1292 assert(0 && "Invalid ABI kind for return argument"); 1293 } 1294 1295 assert(0 && "Unhandled ABIArgInfo::Kind"); 1296 return RValue::get(0); 1297} 1298