PPCISelLowering.h revision 349c2787cf9e174c8aa955bf8e3b09a405b2aece
1//===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that PPC uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 16#define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 17 18#include "PPC.h" 19#include "PPCSubtarget.h" 20#include "llvm/CodeGen/SelectionDAG.h" 21#include "llvm/Target/TargetLowering.h" 22 23namespace llvm { 24 namespace PPCISD { 25 enum NodeType { 26 // Start the numbering where the builtin ops and target ops leave off. 27 FIRST_NUMBER = ISD::BUILTIN_OP_END, 28 29 /// FSEL - Traditional three-operand fsel node. 30 /// 31 FSEL, 32 33 /// FCFID - The FCFID instruction, taking an f64 operand and producing 34 /// and f64 value containing the FP representation of the integer that 35 /// was temporarily in the f64 operand. 36 FCFID, 37 38 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 39 /// operand, producing an f64 value containing the integer representation 40 /// of that FP value. 41 FCTIDZ, FCTIWZ, 42 43 /// STFIWX - The STFIWX instruction. The first operand is an input token 44 /// chain, then an f64 value to store, then an address to store it to. 45 STFIWX, 46 47 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking 48 // three v4f32 operands and producing a v4f32 result. 49 VMADDFP, VNMSUBFP, 50 51 /// VPERM - The PPC VPERM Instruction. 52 /// 53 VPERM, 54 55 /// Hi/Lo - These represent the high and low 16-bit parts of a global 56 /// address respectively. These nodes have two operands, the first of 57 /// which must be a TargetGlobalAddress, and the second of which must be a 58 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', 59 /// though these are usually folded into other nodes. 60 Hi, Lo, 61 62 TOC_ENTRY, 63 64 /// The following three target-specific nodes are used for calls through 65 /// function pointers in the 64-bit SVR4 ABI. 66 67 /// Restore the TOC from the TOC save area of the current stack frame. 68 /// This is basically a hard coded load instruction which additionally 69 /// takes/produces a flag. 70 TOC_RESTORE, 71 72 /// Like a regular LOAD but additionally taking/producing a flag. 73 LOAD, 74 75 /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is 76 /// a hard coded load instruction. 77 LOAD_TOC, 78 79 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) 80 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to 81 /// compute an allocation on the stack. 82 DYNALLOC, 83 84 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr 85 /// at function entry, used for PIC code. 86 GlobalBaseReg, 87 88 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit 89 /// shift amounts. These nodes are generated by the multi-precision shift 90 /// code. 91 SRL, SRA, SHL, 92 93 /// EXTSW_32 - This is the EXTSW instruction for use with "32-bit" 94 /// registers. 95 EXTSW_32, 96 97 /// CALL - A direct function call. 98 /// CALL_NOP_SVR4 is a call with the special NOP which follows 64-bit 99 /// SVR4 calls. 100 CALL_Darwin, CALL_SVR4, CALL_NOP_SVR4, 101 102 /// NOP - Special NOP which follows 64-bit SVR4 calls. 103 NOP, 104 105 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a 106 /// MTCTR instruction. 107 MTCTR, 108 109 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a 110 /// BCTRL instruction. 111 BCTRL_Darwin, BCTRL_SVR4, 112 113 /// Return with a flag operand, matched by 'blr' 114 RET_FLAG, 115 116 /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF 117 /// instructions. This copies the bits corresponding to the specified 118 /// CRREG into the resultant GPR. Bits corresponding to other CR regs 119 /// are undefined. 120 MFCR, 121 122 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* 123 /// instructions. For lack of better number, we use the opcode number 124 /// encoding for the OPC field to identify the compare. For example, 838 125 /// is VCMPGTSH. 126 VCMP, 127 128 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the 129 /// altivec VCMP*o instructions. For lack of better number, we use the 130 /// opcode number encoding for the OPC field to identify the compare. For 131 /// example, 838 is VCMPGTSH. 132 VCMPo, 133 134 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This 135 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the 136 /// condition register to branch on, OPC is the branch opcode to use (e.g. 137 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is 138 /// an optional input flag argument. 139 COND_BRANCH, 140 141 // The following 5 instructions are used only as part of the 142 // long double-to-int conversion sequence. 143 144 /// OUTFLAG = MFFS F8RC - This moves the FPSCR (not modelled) into the 145 /// register. 146 MFFS, 147 148 /// OUTFLAG = MTFSB0 INFLAG - This clears a bit in the FPSCR. 149 MTFSB0, 150 151 /// OUTFLAG = MTFSB1 INFLAG - This sets a bit in the FPSCR. 152 MTFSB1, 153 154 /// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with 155 /// rounding towards zero. It has flags added so it won't move past the 156 /// FPSCR-setting instructions. 157 FADDRTZ, 158 159 /// MTFSF = F8RC, INFLAG - This moves the register into the FPSCR. 160 MTFSF, 161 162 /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and 163 /// reserve indexed. This is used to implement atomic operations. 164 LARX, 165 166 /// STCX = This corresponds to PPC stcx. instrcution: store conditional 167 /// indexed. This is used to implement atomic operations. 168 STCX, 169 170 /// TC_RETURN - A tail call return. 171 /// operand #0 chain 172 /// operand #1 callee (register or absolute) 173 /// operand #2 stack adjustment 174 /// operand #3 optional in flag 175 TC_RETURN, 176 177 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls 178 CR6SET, 179 CR6UNSET, 180 181 /// G8RC = LD_GOT_TPREL Symbol, G8RReg - Used by the initial-exec 182 /// TLS model, produces a LD instruction with base register G8RReg 183 /// and offset sym@got@tprel. The latter identifies the GOT entry 184 /// containing the offset of "sym" relative to the thread pointer. 185 LD_GOT_TPREL, 186 187 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS 188 /// model, produces an ADD instruction that adds the contents of 189 /// G8RReg to the thread pointer. Symbol contains a relocation 190 /// sym@tls which is to be replaced by the thread pointer and 191 /// identifies to the linker that the instruction is part of a 192 /// TLS sequence. 193 ADD_TLS, 194 195 /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS 196 /// model, produces an ADDIS8 instruction that adds the GOT base 197 /// register to sym@got@tlsgd@ha. 198 ADDIS_TLSGD_HA, 199 200 /// G8RC = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS 201 /// model, produces an ADDI8 instruction that adds G8RReg to 202 /// sym@got@tlsgd@l. 203 ADDI_TLSGD_L, 204 205 /// G8RC = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS 206 /// model, produces a call to __tls_get_addr(sym@tlsgd). 207 GET_TLS_ADDR, 208 209 /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS 210 /// model, produces an ADDIS8 instruction that adds the GOT base 211 /// register to sym@got@tlsld@ha. 212 ADDIS_TLSLD_HA, 213 214 /// G8RC = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS 215 /// model, produces an ADDI8 instruction that adds G8RReg to 216 /// sym@got@tlsld@l. 217 ADDI_TLSLD_L, 218 219 /// G8RC = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS 220 /// model, produces a call to __tls_get_addr(sym@tlsld). 221 GET_TLSLD_ADDR, 222 223 /// G8RC = ADDIS_DTPREL_HA %X3, Symbol, Chain - For the 224 /// local-dynamic TLS model, produces an ADDIS8 instruction 225 /// that adds X3 to sym@dtprel@ha. The Chain operand is needed 226 /// to tie this in place following a copy to %X3 from the result 227 /// of a GET_TLSLD_ADDR. 228 ADDIS_DTPREL_HA, 229 230 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS 231 /// model, produces an ADDI8 instruction that adds G8RReg to 232 /// sym@got@dtprel@l. 233 ADDI_DTPREL_L, 234 235 /// STD_32 - This is the STD instruction for use with "32-bit" registers. 236 STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, 237 238 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a 239 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of 240 /// the GPRC input, then stores it through Ptr. Type can be either i16 or 241 /// i32. 242 STBRX, 243 244 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a 245 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, 246 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 247 /// or i32. 248 LBRX, 249 250 /// G8RC = ADDIS_TOC_HA %X2, Symbol - For medium code model, produces 251 /// an ADDIS8 instruction that adds the TOC base register to sym@toc@ha. 252 ADDIS_TOC_HA, 253 254 /// G8RC = LD_TOC_L Symbol, G8RReg - For medium code model, produces a 255 /// LD instruction with base register G8RReg and offset sym@toc@l. 256 /// Preceded by an ADDIS_TOC_HA to form a full 32-bit offset. 257 LD_TOC_L, 258 259 /// G8RC = ADDI_TOC_L G8RReg, Symbol - For medium code model, produces 260 /// an ADDI8 instruction that adds G8RReg to sym@toc@l. 261 /// Preceded by an ADDIS_TOC_HA to form a full 32-bit offset. 262 ADDI_TOC_L 263 }; 264 } 265 266 /// Define some predicates that are used for node matching. 267 namespace PPC { 268 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 269 /// VPKUHUM instruction. 270 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 271 272 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 273 /// VPKUWUM instruction. 274 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 275 276 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 277 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 278 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 279 bool isUnary); 280 281 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 282 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 283 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 284 bool isUnary); 285 286 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 287 /// amount, otherwise return -1. 288 int isVSLDOIShuffleMask(SDNode *N, bool isUnary); 289 290 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 291 /// specifies a splat of a single element that is suitable for input to 292 /// VSPLTB/VSPLTH/VSPLTW. 293 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 294 295 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 296 /// are -0.0. 297 bool isAllNegativeZeroVector(SDNode *N); 298 299 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 300 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 301 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize); 302 303 /// get_VSPLTI_elt - If this is a build_vector of constants which can be 304 /// formed by using a vspltis[bhw] instruction of the specified element 305 /// size, return the constant being splatted. The ByteSize field indicates 306 /// the number of bytes of each element [124] -> [bhw]. 307 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 308 } 309 310 class PPCTargetLowering : public TargetLowering { 311 const PPCSubtarget &PPCSubTarget; 312 313 public: 314 explicit PPCTargetLowering(PPCTargetMachine &TM); 315 316 /// getTargetNodeName() - This method returns the name of a target specific 317 /// DAG node. 318 virtual const char *getTargetNodeName(unsigned Opcode) const; 319 320 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; } 321 322 /// getSetCCResultType - Return the ISD::SETCC ValueType 323 virtual EVT getSetCCResultType(EVT VT) const; 324 325 /// getPreIndexedAddressParts - returns true by value, base pointer and 326 /// offset pointer and addressing mode by reference if the node's address 327 /// can be legally represented as pre-indexed load / store address. 328 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 329 SDValue &Offset, 330 ISD::MemIndexedMode &AM, 331 SelectionDAG &DAG) const; 332 333 /// SelectAddressRegReg - Given the specified addressed, check to see if it 334 /// can be represented as an indexed [r+r] operation. Returns false if it 335 /// can be more efficiently represented with [r+imm]. 336 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, 337 SelectionDAG &DAG) const; 338 339 /// SelectAddressRegImm - Returns true if the address N can be represented 340 /// by a base register plus a signed 16-bit displacement [r+imm], and if it 341 /// is not better represented as reg+reg. 342 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, 343 SelectionDAG &DAG) const; 344 345 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 346 /// represented as an indexed [r+r] operation. 347 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, 348 SelectionDAG &DAG) const; 349 350 /// SelectAddressRegImmShift - Returns true if the address N can be 351 /// represented by a base register plus a signed 14-bit displacement 352 /// [r+imm*4]. Suitable for use by STD and friends. 353 bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base, 354 SelectionDAG &DAG) const; 355 356 Sched::Preference getSchedulingPreference(SDNode *N) const; 357 358 /// LowerOperation - Provide custom lowering hooks for some operations. 359 /// 360 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 361 362 /// ReplaceNodeResults - Replace the results of node with an illegal result 363 /// type with new values built out of custom code. 364 /// 365 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 366 SelectionDAG &DAG) const; 367 368 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 369 370 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 371 APInt &KnownZero, 372 APInt &KnownOne, 373 const SelectionDAG &DAG, 374 unsigned Depth = 0) const; 375 376 virtual MachineBasicBlock * 377 EmitInstrWithCustomInserter(MachineInstr *MI, 378 MachineBasicBlock *MBB) const; 379 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 380 MachineBasicBlock *MBB, bool is64Bit, 381 unsigned BinOpcode) const; 382 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI, 383 MachineBasicBlock *MBB, 384 bool is8bit, unsigned Opcode) const; 385 386 ConstraintType getConstraintType(const std::string &Constraint) const; 387 388 /// Examine constraint string and operand type and determine a weight value. 389 /// The operand object must already have been set up with the operand type. 390 ConstraintWeight getSingleConstraintMatchWeight( 391 AsmOperandInfo &info, const char *constraint) const; 392 393 std::pair<unsigned, const TargetRegisterClass*> 394 getRegForInlineAsmConstraint(const std::string &Constraint, 395 EVT VT) const; 396 397 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 398 /// function arguments in the caller parameter area. This is the actual 399 /// alignment, not its logarithm. 400 unsigned getByValTypeAlignment(Type *Ty) const; 401 402 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 403 /// vector. If it is invalid, don't add anything to Ops. 404 virtual void LowerAsmOperandForConstraint(SDValue Op, 405 std::string &Constraint, 406 std::vector<SDValue> &Ops, 407 SelectionDAG &DAG) const; 408 409 /// isLegalAddressingMode - Return true if the addressing mode represented 410 /// by AM is legal for this target, for a load/store of the specified type. 411 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 412 413 /// isLegalAddressImmediate - Return true if the integer value can be used 414 /// as the offset of the target addressing mode for load / store of the 415 /// given type. 416 virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const; 417 418 /// isLegalAddressImmediate - Return true if the GlobalValue can be used as 419 /// the offset of the target addressing mode. 420 virtual bool isLegalAddressImmediate(GlobalValue *GV) const; 421 422 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 423 424 /// getOptimalMemOpType - Returns the target specific optimal type for load 425 /// and store operations as a result of memset, memcpy, and memmove 426 /// lowering. If DstAlign is zero that means it's safe to destination 427 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 428 /// means there isn't a need to check it against alignment requirement, 429 /// probably because the source does not need to be loaded. If 'IsMemset' is 430 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 431 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 432 /// source is constant so it does not need to be loaded. 433 /// It returns EVT::Other if the type should be determined using generic 434 /// target-independent logic. 435 virtual EVT 436 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 437 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, 438 MachineFunction &MF) const; 439 440 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 441 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 442 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 443 /// is expanded to mul + add. 444 virtual bool isFMAFasterThanMulAndAdd(EVT VT) const; 445 446 private: 447 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; 448 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; 449 450 bool 451 IsEligibleForTailCallOptimization(SDValue Callee, 452 CallingConv::ID CalleeCC, 453 bool isVarArg, 454 const SmallVectorImpl<ISD::InputArg> &Ins, 455 SelectionDAG& DAG) const; 456 457 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 458 int SPDiff, 459 SDValue Chain, 460 SDValue &LROpOut, 461 SDValue &FPOpOut, 462 bool isDarwinABI, 463 DebugLoc dl) const; 464 465 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 466 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 467 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 468 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 469 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 470 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 471 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 472 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 473 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 474 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 475 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 476 const PPCSubtarget &Subtarget) const; 477 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, 478 const PPCSubtarget &Subtarget) const; 479 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 480 const PPCSubtarget &Subtarget) const; 481 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 482 const PPCSubtarget &Subtarget) const; 483 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 484 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; 485 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 486 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 487 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; 488 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; 489 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; 490 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 491 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 492 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 493 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 494 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 495 496 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 497 CallingConv::ID CallConv, bool isVarArg, 498 const SmallVectorImpl<ISD::InputArg> &Ins, 499 DebugLoc dl, SelectionDAG &DAG, 500 SmallVectorImpl<SDValue> &InVals) const; 501 SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall, 502 bool isVarArg, 503 SelectionDAG &DAG, 504 SmallVector<std::pair<unsigned, SDValue>, 8> 505 &RegsToPass, 506 SDValue InFlag, SDValue Chain, 507 SDValue &Callee, 508 int SPDiff, unsigned NumBytes, 509 const SmallVectorImpl<ISD::InputArg> &Ins, 510 SmallVectorImpl<SDValue> &InVals) const; 511 512 virtual SDValue 513 LowerFormalArguments(SDValue Chain, 514 CallingConv::ID CallConv, bool isVarArg, 515 const SmallVectorImpl<ISD::InputArg> &Ins, 516 DebugLoc dl, SelectionDAG &DAG, 517 SmallVectorImpl<SDValue> &InVals) const; 518 519 virtual SDValue 520 LowerCall(TargetLowering::CallLoweringInfo &CLI, 521 SmallVectorImpl<SDValue> &InVals) const; 522 523 virtual bool 524 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 525 bool isVarArg, 526 const SmallVectorImpl<ISD::OutputArg> &Outs, 527 LLVMContext &Context) const; 528 529 virtual SDValue 530 LowerReturn(SDValue Chain, 531 CallingConv::ID CallConv, bool isVarArg, 532 const SmallVectorImpl<ISD::OutputArg> &Outs, 533 const SmallVectorImpl<SDValue> &OutVals, 534 DebugLoc dl, SelectionDAG &DAG) const; 535 536 SDValue 537 extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG, 538 SDValue ArgVal, DebugLoc dl) const; 539 540 void 541 setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 542 unsigned nAltivecParamsAtEnd, 543 unsigned MinReservedArea, bool isPPC64) const; 544 545 SDValue 546 LowerFormalArguments_Darwin(SDValue Chain, 547 CallingConv::ID CallConv, bool isVarArg, 548 const SmallVectorImpl<ISD::InputArg> &Ins, 549 DebugLoc dl, SelectionDAG &DAG, 550 SmallVectorImpl<SDValue> &InVals) const; 551 SDValue 552 LowerFormalArguments_64SVR4(SDValue Chain, 553 CallingConv::ID CallConv, bool isVarArg, 554 const SmallVectorImpl<ISD::InputArg> &Ins, 555 DebugLoc dl, SelectionDAG &DAG, 556 SmallVectorImpl<SDValue> &InVals) const; 557 SDValue 558 LowerFormalArguments_32SVR4(SDValue Chain, 559 CallingConv::ID CallConv, bool isVarArg, 560 const SmallVectorImpl<ISD::InputArg> &Ins, 561 DebugLoc dl, SelectionDAG &DAG, 562 SmallVectorImpl<SDValue> &InVals) const; 563 564 SDValue 565 createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 566 SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 567 SelectionDAG &DAG, DebugLoc dl) const; 568 569 SDValue 570 LowerCall_Darwin(SDValue Chain, SDValue Callee, 571 CallingConv::ID CallConv, 572 bool isVarArg, bool isTailCall, 573 const SmallVectorImpl<ISD::OutputArg> &Outs, 574 const SmallVectorImpl<SDValue> &OutVals, 575 const SmallVectorImpl<ISD::InputArg> &Ins, 576 DebugLoc dl, SelectionDAG &DAG, 577 SmallVectorImpl<SDValue> &InVals) const; 578 SDValue 579 LowerCall_64SVR4(SDValue Chain, SDValue Callee, 580 CallingConv::ID CallConv, 581 bool isVarArg, bool isTailCall, 582 const SmallVectorImpl<ISD::OutputArg> &Outs, 583 const SmallVectorImpl<SDValue> &OutVals, 584 const SmallVectorImpl<ISD::InputArg> &Ins, 585 DebugLoc dl, SelectionDAG &DAG, 586 SmallVectorImpl<SDValue> &InVals) const; 587 SDValue 588 LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, 589 bool isVarArg, bool isTailCall, 590 const SmallVectorImpl<ISD::OutputArg> &Outs, 591 const SmallVectorImpl<SDValue> &OutVals, 592 const SmallVectorImpl<ISD::InputArg> &Ins, 593 DebugLoc dl, SelectionDAG &DAG, 594 SmallVectorImpl<SDValue> &InVals) const; 595 }; 596} 597 598#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 599