PPCISelLowering.h revision 86765fbe170198e7bb40fd8499d1354f4c786f60
1//===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that PPC uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 16#define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 17 18#include "PPC.h" 19#include "PPCRegisterInfo.h" 20#include "PPCSubtarget.h" 21#include "llvm/CodeGen/SelectionDAG.h" 22#include "llvm/Target/TargetLowering.h" 23 24namespace llvm { 25 namespace PPCISD { 26 enum NodeType { 27 // Start the numbering where the builtin ops and target ops leave off. 28 FIRST_NUMBER = ISD::BUILTIN_OP_END, 29 30 /// FSEL - Traditional three-operand fsel node. 31 /// 32 FSEL, 33 34 /// FCFID - The FCFID instruction, taking an f64 operand and producing 35 /// and f64 value containing the FP representation of the integer that 36 /// was temporarily in the f64 operand. 37 FCFID, 38 39 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 40 /// operand, producing an f64 value containing the integer representation 41 /// of that FP value. 42 FCTIDZ, FCTIWZ, 43 44 /// STFIWX - The STFIWX instruction. The first operand is an input token 45 /// chain, then an f64 value to store, then an address to store it to. 46 STFIWX, 47 48 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking 49 // three v4f32 operands and producing a v4f32 result. 50 VMADDFP, VNMSUBFP, 51 52 /// VPERM - The PPC VPERM Instruction. 53 /// 54 VPERM, 55 56 /// Hi/Lo - These represent the high and low 16-bit parts of a global 57 /// address respectively. These nodes have two operands, the first of 58 /// which must be a TargetGlobalAddress, and the second of which must be a 59 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', 60 /// though these are usually folded into other nodes. 61 Hi, Lo, 62 63 TOC_ENTRY, 64 65 /// The following three target-specific nodes are used for calls through 66 /// function pointers in the 64-bit SVR4 ABI. 67 68 /// Restore the TOC from the TOC save area of the current stack frame. 69 /// This is basically a hard coded load instruction which additionally 70 /// takes/produces a flag. 71 TOC_RESTORE, 72 73 /// Like a regular LOAD but additionally taking/producing a flag. 74 LOAD, 75 76 /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is 77 /// a hard coded load instruction. 78 LOAD_TOC, 79 80 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) 81 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to 82 /// compute an allocation on the stack. 83 DYNALLOC, 84 85 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr 86 /// at function entry, used for PIC code. 87 GlobalBaseReg, 88 89 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit 90 /// shift amounts. These nodes are generated by the multi-precision shift 91 /// code. 92 SRL, SRA, SHL, 93 94 /// EXTSW_32 - This is the EXTSW instruction for use with "32-bit" 95 /// registers. 96 EXTSW_32, 97 98 /// CALL - A direct function call. 99 /// CALL_NOP is a call with the special NOP which follows 64-bit 100 /// SVR4 calls. 101 CALL, CALL_NOP, 102 103 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a 104 /// MTCTR instruction. 105 MTCTR, 106 107 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a 108 /// BCTRL instruction. 109 BCTRL, 110 111 /// Return with a flag operand, matched by 'blr' 112 RET_FLAG, 113 114 /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF 115 /// instructions. This copies the bits corresponding to the specified 116 /// CRREG into the resultant GPR. Bits corresponding to other CR regs 117 /// are undefined. 118 MFCR, 119 120 // EH_SJLJ_SETJMP - SjLj exception handling setjmp. 121 EH_SJLJ_SETJMP, 122 123 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. 124 EH_SJLJ_LONGJMP, 125 126 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* 127 /// instructions. For lack of better number, we use the opcode number 128 /// encoding for the OPC field to identify the compare. For example, 838 129 /// is VCMPGTSH. 130 VCMP, 131 132 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the 133 /// altivec VCMP*o instructions. For lack of better number, we use the 134 /// opcode number encoding for the OPC field to identify the compare. For 135 /// example, 838 is VCMPGTSH. 136 VCMPo, 137 138 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This 139 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the 140 /// condition register to branch on, OPC is the branch opcode to use (e.g. 141 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is 142 /// an optional input flag argument. 143 COND_BRANCH, 144 145 // The following 5 instructions are used only as part of the 146 // long double-to-int conversion sequence. 147 148 /// OUTFLAG = MFFS F8RC - This moves the FPSCR (not modelled) into the 149 /// register. 150 MFFS, 151 152 /// OUTFLAG = MTFSB0 INFLAG - This clears a bit in the FPSCR. 153 MTFSB0, 154 155 /// OUTFLAG = MTFSB1 INFLAG - This sets a bit in the FPSCR. 156 MTFSB1, 157 158 /// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with 159 /// rounding towards zero. It has flags added so it won't move past the 160 /// FPSCR-setting instructions. 161 FADDRTZ, 162 163 /// MTFSF = F8RC, INFLAG - This moves the register into the FPSCR. 164 MTFSF, 165 166 /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and 167 /// reserve indexed. This is used to implement atomic operations. 168 LARX, 169 170 /// STCX = This corresponds to PPC stcx. instrcution: store conditional 171 /// indexed. This is used to implement atomic operations. 172 STCX, 173 174 /// TC_RETURN - A tail call return. 175 /// operand #0 chain 176 /// operand #1 callee (register or absolute) 177 /// operand #2 stack adjustment 178 /// operand #3 optional in flag 179 TC_RETURN, 180 181 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls 182 CR6SET, 183 CR6UNSET, 184 185 /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec 186 /// TLS model, produces an ADDIS8 instruction that adds the GOT 187 /// base to sym@got@tprel@ha. 188 ADDIS_GOT_TPREL_HA, 189 190 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec 191 /// TLS model, produces a LD instruction with base register G8RReg 192 /// and offset sym@got@tprel@l. This completes the addition that 193 /// finds the offset of "sym" relative to the thread pointer. 194 LD_GOT_TPREL_L, 195 196 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS 197 /// model, produces an ADD instruction that adds the contents of 198 /// G8RReg to the thread pointer. Symbol contains a relocation 199 /// sym@tls which is to be replaced by the thread pointer and 200 /// identifies to the linker that the instruction is part of a 201 /// TLS sequence. 202 ADD_TLS, 203 204 /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS 205 /// model, produces an ADDIS8 instruction that adds the GOT base 206 /// register to sym@got@tlsgd@ha. 207 ADDIS_TLSGD_HA, 208 209 /// G8RC = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS 210 /// model, produces an ADDI8 instruction that adds G8RReg to 211 /// sym@got@tlsgd@l. 212 ADDI_TLSGD_L, 213 214 /// G8RC = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS 215 /// model, produces a call to __tls_get_addr(sym@tlsgd). 216 GET_TLS_ADDR, 217 218 /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS 219 /// model, produces an ADDIS8 instruction that adds the GOT base 220 /// register to sym@got@tlsld@ha. 221 ADDIS_TLSLD_HA, 222 223 /// G8RC = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS 224 /// model, produces an ADDI8 instruction that adds G8RReg to 225 /// sym@got@tlsld@l. 226 ADDI_TLSLD_L, 227 228 /// G8RC = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS 229 /// model, produces a call to __tls_get_addr(sym@tlsld). 230 GET_TLSLD_ADDR, 231 232 /// G8RC = ADDIS_DTPREL_HA %X3, Symbol, Chain - For the 233 /// local-dynamic TLS model, produces an ADDIS8 instruction 234 /// that adds X3 to sym@dtprel@ha. The Chain operand is needed 235 /// to tie this in place following a copy to %X3 from the result 236 /// of a GET_TLSLD_ADDR. 237 ADDIS_DTPREL_HA, 238 239 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS 240 /// model, produces an ADDI8 instruction that adds G8RReg to 241 /// sym@got@dtprel@l. 242 ADDI_DTPREL_L, 243 244 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded 245 /// during instruction selection to optimize a BUILD_VECTOR into 246 /// operations on splats. This is necessary to avoid losing these 247 /// optimizations due to constant folding. 248 VADD_SPLAT, 249 250 /// STD_32 - This is the STD instruction for use with "32-bit" registers. 251 STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, 252 253 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a 254 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of 255 /// the GPRC input, then stores it through Ptr. Type can be either i16 or 256 /// i32. 257 STBRX, 258 259 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a 260 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, 261 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 262 /// or i32. 263 LBRX, 264 265 /// G8RC = ADDIS_TOC_HA %X2, Symbol - For medium and large code model, 266 /// produces an ADDIS8 instruction that adds the TOC base register to 267 /// sym@toc@ha. 268 ADDIS_TOC_HA, 269 270 /// G8RC = LD_TOC_L Symbol, G8RReg - For medium and large code model, 271 /// produces a LD instruction with base register G8RReg and offset 272 /// sym@toc@l. Preceded by an ADDIS_TOC_HA to form a full 32-bit offset. 273 LD_TOC_L, 274 275 /// G8RC = ADDI_TOC_L G8RReg, Symbol - For medium code model, produces 276 /// an ADDI8 instruction that adds G8RReg to sym@toc@l. 277 /// Preceded by an ADDIS_TOC_HA to form a full 32-bit offset. 278 ADDI_TOC_L 279 }; 280 } 281 282 /// Define some predicates that are used for node matching. 283 namespace PPC { 284 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 285 /// VPKUHUM instruction. 286 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 287 288 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 289 /// VPKUWUM instruction. 290 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 291 292 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 293 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 294 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 295 bool isUnary); 296 297 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 298 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 299 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 300 bool isUnary); 301 302 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 303 /// amount, otherwise return -1. 304 int isVSLDOIShuffleMask(SDNode *N, bool isUnary); 305 306 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 307 /// specifies a splat of a single element that is suitable for input to 308 /// VSPLTB/VSPLTH/VSPLTW. 309 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 310 311 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 312 /// are -0.0. 313 bool isAllNegativeZeroVector(SDNode *N); 314 315 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 316 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 317 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize); 318 319 /// get_VSPLTI_elt - If this is a build_vector of constants which can be 320 /// formed by using a vspltis[bhw] instruction of the specified element 321 /// size, return the constant being splatted. The ByteSize field indicates 322 /// the number of bytes of each element [124] -> [bhw]. 323 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 324 } 325 326 class PPCTargetLowering : public TargetLowering { 327 const PPCSubtarget &PPCSubTarget; 328 const PPCRegisterInfo *PPCRegInfo; 329 330 public: 331 explicit PPCTargetLowering(PPCTargetMachine &TM); 332 333 /// getTargetNodeName() - This method returns the name of a target specific 334 /// DAG node. 335 virtual const char *getTargetNodeName(unsigned Opcode) const; 336 337 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i32; } 338 339 /// getSetCCResultType - Return the ISD::SETCC ValueType 340 virtual EVT getSetCCResultType(EVT VT) const; 341 342 /// getPreIndexedAddressParts - returns true by value, base pointer and 343 /// offset pointer and addressing mode by reference if the node's address 344 /// can be legally represented as pre-indexed load / store address. 345 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 346 SDValue &Offset, 347 ISD::MemIndexedMode &AM, 348 SelectionDAG &DAG) const; 349 350 /// SelectAddressRegReg - Given the specified addressed, check to see if it 351 /// can be represented as an indexed [r+r] operation. Returns false if it 352 /// can be more efficiently represented with [r+imm]. 353 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, 354 SelectionDAG &DAG) const; 355 356 /// SelectAddressRegImm - Returns true if the address N can be represented 357 /// by a base register plus a signed 16-bit displacement [r+imm], and if it 358 /// is not better represented as reg+reg. 359 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, 360 SelectionDAG &DAG) const; 361 362 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 363 /// represented as an indexed [r+r] operation. 364 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, 365 SelectionDAG &DAG) const; 366 367 /// SelectAddressRegImmShift - Returns true if the address N can be 368 /// represented by a base register plus a signed 14-bit displacement 369 /// [r+imm*4]. Suitable for use by STD and friends. 370 bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base, 371 SelectionDAG &DAG) const; 372 373 Sched::Preference getSchedulingPreference(SDNode *N) const; 374 375 /// LowerOperation - Provide custom lowering hooks for some operations. 376 /// 377 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 378 379 /// ReplaceNodeResults - Replace the results of node with an illegal result 380 /// type with new values built out of custom code. 381 /// 382 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 383 SelectionDAG &DAG) const; 384 385 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 386 387 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 388 APInt &KnownZero, 389 APInt &KnownOne, 390 const SelectionDAG &DAG, 391 unsigned Depth = 0) const; 392 393 virtual MachineBasicBlock * 394 EmitInstrWithCustomInserter(MachineInstr *MI, 395 MachineBasicBlock *MBB) const; 396 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 397 MachineBasicBlock *MBB, bool is64Bit, 398 unsigned BinOpcode) const; 399 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI, 400 MachineBasicBlock *MBB, 401 bool is8bit, unsigned Opcode) const; 402 403 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI, 404 MachineBasicBlock *MBB) const; 405 406 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, 407 MachineBasicBlock *MBB) const; 408 409 ConstraintType getConstraintType(const std::string &Constraint) const; 410 411 /// Examine constraint string and operand type and determine a weight value. 412 /// The operand object must already have been set up with the operand type. 413 ConstraintWeight getSingleConstraintMatchWeight( 414 AsmOperandInfo &info, const char *constraint) const; 415 416 std::pair<unsigned, const TargetRegisterClass*> 417 getRegForInlineAsmConstraint(const std::string &Constraint, 418 EVT VT) const; 419 420 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 421 /// function arguments in the caller parameter area. This is the actual 422 /// alignment, not its logarithm. 423 unsigned getByValTypeAlignment(Type *Ty) const; 424 425 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 426 /// vector. If it is invalid, don't add anything to Ops. 427 virtual void LowerAsmOperandForConstraint(SDValue Op, 428 std::string &Constraint, 429 std::vector<SDValue> &Ops, 430 SelectionDAG &DAG) const; 431 432 /// isLegalAddressingMode - Return true if the addressing mode represented 433 /// by AM is legal for this target, for a load/store of the specified type. 434 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 435 436 /// isLegalAddressImmediate - Return true if the integer value can be used 437 /// as the offset of the target addressing mode for load / store of the 438 /// given type. 439 virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const; 440 441 /// isLegalAddressImmediate - Return true if the GlobalValue can be used as 442 /// the offset of the target addressing mode. 443 virtual bool isLegalAddressImmediate(GlobalValue *GV) const; 444 445 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 446 447 /// getOptimalMemOpType - Returns the target specific optimal type for load 448 /// and store operations as a result of memset, memcpy, and memmove 449 /// lowering. If DstAlign is zero that means it's safe to destination 450 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 451 /// means there isn't a need to check it against alignment requirement, 452 /// probably because the source does not need to be loaded. If 'IsMemset' is 453 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 454 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 455 /// source is constant so it does not need to be loaded. 456 /// It returns EVT::Other if the type should be determined using generic 457 /// target-independent logic. 458 virtual EVT 459 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 460 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, 461 MachineFunction &MF) const; 462 463 /// Is unaligned memory access allowed for the given type, and is it fast 464 /// relative to software emulation. 465 virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const; 466 467 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 468 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 469 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 470 /// is expanded to mul + add. 471 virtual bool isFMAFasterThanMulAndAdd(EVT VT) const; 472 473 private: 474 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; 475 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; 476 477 bool 478 IsEligibleForTailCallOptimization(SDValue Callee, 479 CallingConv::ID CalleeCC, 480 bool isVarArg, 481 const SmallVectorImpl<ISD::InputArg> &Ins, 482 SelectionDAG& DAG) const; 483 484 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 485 int SPDiff, 486 SDValue Chain, 487 SDValue &LROpOut, 488 SDValue &FPOpOut, 489 bool isDarwinABI, 490 DebugLoc dl) const; 491 492 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 493 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 494 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 495 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 496 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 497 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 498 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 499 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 500 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 501 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 502 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 503 const PPCSubtarget &Subtarget) const; 504 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, 505 const PPCSubtarget &Subtarget) const; 506 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 507 const PPCSubtarget &Subtarget) const; 508 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 509 const PPCSubtarget &Subtarget) const; 510 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 511 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; 512 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 513 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 514 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; 515 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; 516 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; 517 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 518 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 519 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 520 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 521 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 522 523 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 524 CallingConv::ID CallConv, bool isVarArg, 525 const SmallVectorImpl<ISD::InputArg> &Ins, 526 DebugLoc dl, SelectionDAG &DAG, 527 SmallVectorImpl<SDValue> &InVals) const; 528 SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall, 529 bool isVarArg, 530 SelectionDAG &DAG, 531 SmallVector<std::pair<unsigned, SDValue>, 8> 532 &RegsToPass, 533 SDValue InFlag, SDValue Chain, 534 SDValue &Callee, 535 int SPDiff, unsigned NumBytes, 536 const SmallVectorImpl<ISD::InputArg> &Ins, 537 SmallVectorImpl<SDValue> &InVals) const; 538 539 virtual SDValue 540 LowerFormalArguments(SDValue Chain, 541 CallingConv::ID CallConv, bool isVarArg, 542 const SmallVectorImpl<ISD::InputArg> &Ins, 543 DebugLoc dl, SelectionDAG &DAG, 544 SmallVectorImpl<SDValue> &InVals) const; 545 546 virtual SDValue 547 LowerCall(TargetLowering::CallLoweringInfo &CLI, 548 SmallVectorImpl<SDValue> &InVals) const; 549 550 virtual bool 551 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 552 bool isVarArg, 553 const SmallVectorImpl<ISD::OutputArg> &Outs, 554 LLVMContext &Context) const; 555 556 virtual SDValue 557 LowerReturn(SDValue Chain, 558 CallingConv::ID CallConv, bool isVarArg, 559 const SmallVectorImpl<ISD::OutputArg> &Outs, 560 const SmallVectorImpl<SDValue> &OutVals, 561 DebugLoc dl, SelectionDAG &DAG) const; 562 563 SDValue 564 extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG, 565 SDValue ArgVal, DebugLoc dl) const; 566 567 void 568 setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 569 unsigned nAltivecParamsAtEnd, 570 unsigned MinReservedArea, bool isPPC64) const; 571 572 SDValue 573 LowerFormalArguments_Darwin(SDValue Chain, 574 CallingConv::ID CallConv, bool isVarArg, 575 const SmallVectorImpl<ISD::InputArg> &Ins, 576 DebugLoc dl, SelectionDAG &DAG, 577 SmallVectorImpl<SDValue> &InVals) const; 578 SDValue 579 LowerFormalArguments_64SVR4(SDValue Chain, 580 CallingConv::ID CallConv, bool isVarArg, 581 const SmallVectorImpl<ISD::InputArg> &Ins, 582 DebugLoc dl, SelectionDAG &DAG, 583 SmallVectorImpl<SDValue> &InVals) const; 584 SDValue 585 LowerFormalArguments_32SVR4(SDValue Chain, 586 CallingConv::ID CallConv, bool isVarArg, 587 const SmallVectorImpl<ISD::InputArg> &Ins, 588 DebugLoc dl, SelectionDAG &DAG, 589 SmallVectorImpl<SDValue> &InVals) const; 590 591 SDValue 592 createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 593 SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 594 SelectionDAG &DAG, DebugLoc dl) const; 595 596 SDValue 597 LowerCall_Darwin(SDValue Chain, SDValue Callee, 598 CallingConv::ID CallConv, 599 bool isVarArg, bool isTailCall, 600 const SmallVectorImpl<ISD::OutputArg> &Outs, 601 const SmallVectorImpl<SDValue> &OutVals, 602 const SmallVectorImpl<ISD::InputArg> &Ins, 603 DebugLoc dl, SelectionDAG &DAG, 604 SmallVectorImpl<SDValue> &InVals) const; 605 SDValue 606 LowerCall_64SVR4(SDValue Chain, SDValue Callee, 607 CallingConv::ID CallConv, 608 bool isVarArg, bool isTailCall, 609 const SmallVectorImpl<ISD::OutputArg> &Outs, 610 const SmallVectorImpl<SDValue> &OutVals, 611 const SmallVectorImpl<ISD::InputArg> &Ins, 612 DebugLoc dl, SelectionDAG &DAG, 613 SmallVectorImpl<SDValue> &InVals) const; 614 SDValue 615 LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, 616 bool isVarArg, bool isTailCall, 617 const SmallVectorImpl<ISD::OutputArg> &Outs, 618 const SmallVectorImpl<SDValue> &OutVals, 619 const SmallVectorImpl<ISD::InputArg> &Ins, 620 DebugLoc dl, SelectionDAG &DAG, 621 SmallVectorImpl<SDValue> &InVals) const; 622 623 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 624 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 625 }; 626} 627 628#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 629