PPCISelLowering.cpp revision 0111999a88077f237c49d03c5e7891ec874b33a9
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPredicates.h" 17#include "PPCTargetMachine.h" 18#include "PPCPerfectShuffle.h" 19#include "llvm/ADT/VectorExtras.h" 20#include "llvm/Analysis/ScalarEvolutionExpressions.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/SelectionDAG.h" 26#include "llvm/CodeGen/SSARegMap.h" 27#include "llvm/Constants.h" 28#include "llvm/Function.h" 29#include "llvm/Intrinsics.h" 30#include "llvm/Support/MathExtras.h" 31#include "llvm/Target/TargetOptions.h" 32#include "llvm/Support/CommandLine.h" 33using namespace llvm; 34 35static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc"); 36 37PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 38 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) { 39 40 setPow2DivIsCheap(); 41 42 // Use _setjmp/_longjmp instead of setjmp/longjmp. 43 setUseUnderscoreSetJmp(true); 44 setUseUnderscoreLongJmp(true); 45 46 // Set up the register classes. 47 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 48 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 49 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 50 51 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 52 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 53 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand); 54 55 // PowerPC does not have truncstore for i1. 56 setStoreXAction(MVT::i1, Promote); 57 58 // PowerPC has pre-inc load and store's. 59 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 60 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 61 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 62 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 63 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 64 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 65 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 66 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 67 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 68 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 69 70 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 71 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 72 73 // PowerPC has no intrinsics for these particular operations 74 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); 75 setOperationAction(ISD::MEMSET, MVT::Other, Expand); 76 setOperationAction(ISD::MEMCPY, MVT::Other, Expand); 77 78 // PowerPC has no SREM/UREM instructions 79 setOperationAction(ISD::SREM, MVT::i32, Expand); 80 setOperationAction(ISD::UREM, MVT::i32, Expand); 81 setOperationAction(ISD::SREM, MVT::i64, Expand); 82 setOperationAction(ISD::UREM, MVT::i64, Expand); 83 84 // We don't support sin/cos/sqrt/fmod 85 setOperationAction(ISD::FSIN , MVT::f64, Expand); 86 setOperationAction(ISD::FCOS , MVT::f64, Expand); 87 setOperationAction(ISD::FREM , MVT::f64, Expand); 88 setOperationAction(ISD::FSIN , MVT::f32, Expand); 89 setOperationAction(ISD::FCOS , MVT::f32, Expand); 90 setOperationAction(ISD::FREM , MVT::f32, Expand); 91 92 // If we're enabling GP optimizations, use hardware square root 93 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 94 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 95 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 96 } 97 98 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 99 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 100 101 // PowerPC does not have BSWAP, CTPOP or CTTZ 102 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 103 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 104 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 105 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 106 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 107 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 108 109 // PowerPC does not have ROTR 110 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 111 112 // PowerPC does not have Select 113 setOperationAction(ISD::SELECT, MVT::i32, Expand); 114 setOperationAction(ISD::SELECT, MVT::i64, Expand); 115 setOperationAction(ISD::SELECT, MVT::f32, Expand); 116 setOperationAction(ISD::SELECT, MVT::f64, Expand); 117 118 // PowerPC wants to turn select_cc of FP into fsel when possible. 119 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 120 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 121 122 // PowerPC wants to optimize integer setcc a bit 123 setOperationAction(ISD::SETCC, MVT::i32, Custom); 124 125 // PowerPC does not have BRCOND which requires SetCC 126 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 127 128 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 129 130 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 131 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 132 133 // PowerPC does not have [U|S]INT_TO_FP 134 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 135 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 136 137 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 138 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 139 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 140 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 141 142 // We cannot sextinreg(i1). Expand to shifts. 143 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 144 145 // Support label based line numbers. 146 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 147 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 148 if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) { 149 setOperationAction(ISD::LABEL, MVT::Other, Expand); 150 } else { 151 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 152 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 153 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 154 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 155 } 156 157 // We want to legalize GlobalAddress and ConstantPool nodes into the 158 // appropriate instructions to materialize the address. 159 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 160 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 161 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 162 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 163 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 164 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 165 166 // RET must be custom lowered, to meet ABI requirements 167 setOperationAction(ISD::RET , MVT::Other, Custom); 168 169 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 170 setOperationAction(ISD::VASTART , MVT::Other, Custom); 171 172 // VAARG is custom lowered with ELF 32 ABI 173 if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI()) 174 setOperationAction(ISD::VAARG, MVT::Other, Custom); 175 else 176 setOperationAction(ISD::VAARG, MVT::Other, Expand); 177 178 // Use the default implementation. 179 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 180 setOperationAction(ISD::VAEND , MVT::Other, Expand); 181 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 182 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 183 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 184 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 185 186 // We want to custom lower some of our intrinsics. 187 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 188 189 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 190 // They also have instructions for converting between i64 and fp. 191 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 192 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 193 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 194 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 195 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 196 197 // FIXME: disable this lowered code. This generates 64-bit register values, 198 // and we don't model the fact that the top part is clobbered by calls. We 199 // need to flag these together so that the value isn't live across a call. 200 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 201 202 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 203 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 204 } else { 205 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 206 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 207 } 208 209 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 210 // 64 bit PowerPC implementations can support i64 types directly 211 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 212 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 213 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 214 } else { 215 // 32 bit PowerPC wants to expand i64 shifts itself. 216 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 217 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 218 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 219 } 220 221 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 222 // First set operation action for all vector types to expand. Then we 223 // will selectively turn on ones that can be effectively codegen'd. 224 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 225 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 226 // add/sub are legal for all supported vector VT's. 227 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 228 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 229 230 // We promote all shuffles to v16i8. 231 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 232 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 233 234 // We promote all non-typed operations to v4i32. 235 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); 236 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); 237 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); 238 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); 239 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); 240 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); 241 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); 242 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); 243 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 244 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); 245 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); 246 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); 247 248 // No other operations are legal. 249 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 250 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 251 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 252 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 253 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 254 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 255 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 256 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 257 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 258 259 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 260 } 261 262 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 263 // with merges, splats, etc. 264 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 265 266 setOperationAction(ISD::AND , MVT::v4i32, Legal); 267 setOperationAction(ISD::OR , MVT::v4i32, Legal); 268 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 269 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 270 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 271 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 272 273 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 274 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 275 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 276 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 277 278 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 279 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 280 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 281 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 282 283 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 284 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 285 286 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 287 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 288 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 289 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 290 } 291 292 setSetCCResultType(MVT::i32); 293 setShiftAmountType(MVT::i32); 294 setSetCCResultContents(ZeroOrOneSetCCResult); 295 296 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 297 setStackPointerRegisterToSaveRestore(PPC::X1); 298 setExceptionPointerRegister(PPC::X3); 299 setExceptionSelectorRegister(PPC::X4); 300 } else { 301 setStackPointerRegisterToSaveRestore(PPC::R1); 302 setExceptionPointerRegister(PPC::R3); 303 setExceptionSelectorRegister(PPC::R4); 304 } 305 306 // We have target-specific dag combine patterns for the following nodes: 307 setTargetDAGCombine(ISD::SINT_TO_FP); 308 setTargetDAGCombine(ISD::STORE); 309 setTargetDAGCombine(ISD::BR_CC); 310 setTargetDAGCombine(ISD::BSWAP); 311 312 computeRegisterProperties(); 313} 314 315const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 316 switch (Opcode) { 317 default: return 0; 318 case PPCISD::FSEL: return "PPCISD::FSEL"; 319 case PPCISD::FCFID: return "PPCISD::FCFID"; 320 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 321 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 322 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 323 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 324 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 325 case PPCISD::VPERM: return "PPCISD::VPERM"; 326 case PPCISD::Hi: return "PPCISD::Hi"; 327 case PPCISD::Lo: return "PPCISD::Lo"; 328 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 329 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 330 case PPCISD::SRL: return "PPCISD::SRL"; 331 case PPCISD::SRA: return "PPCISD::SRA"; 332 case PPCISD::SHL: return "PPCISD::SHL"; 333 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 334 case PPCISD::STD_32: return "PPCISD::STD_32"; 335 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF"; 336 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho"; 337 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 338 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho"; 339 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF"; 340 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 341 case PPCISD::MFCR: return "PPCISD::MFCR"; 342 case PPCISD::VCMP: return "PPCISD::VCMP"; 343 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 344 case PPCISD::LBRX: return "PPCISD::LBRX"; 345 case PPCISD::STBRX: return "PPCISD::STBRX"; 346 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 347 } 348} 349 350//===----------------------------------------------------------------------===// 351// Node matching predicates, for use by the tblgen matching code. 352//===----------------------------------------------------------------------===// 353 354/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 355static bool isFloatingPointZero(SDOperand Op) { 356 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 357 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 358 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { 359 // Maybe this has already been legalized into the constant pool? 360 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 361 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 362 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 363 } 364 return false; 365} 366 367/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 368/// true if Op is undef or if it matches the specified value. 369static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 370 return Op.getOpcode() == ISD::UNDEF || 371 cast<ConstantSDNode>(Op)->getValue() == Val; 372} 373 374/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 375/// VPKUHUM instruction. 376bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 377 if (!isUnary) { 378 for (unsigned i = 0; i != 16; ++i) 379 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 380 return false; 381 } else { 382 for (unsigned i = 0; i != 8; ++i) 383 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 384 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 385 return false; 386 } 387 return true; 388} 389 390/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 391/// VPKUWUM instruction. 392bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 393 if (!isUnary) { 394 for (unsigned i = 0; i != 16; i += 2) 395 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 396 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 397 return false; 398 } else { 399 for (unsigned i = 0; i != 8; i += 2) 400 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 401 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 402 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 403 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 404 return false; 405 } 406 return true; 407} 408 409/// isVMerge - Common function, used to match vmrg* shuffles. 410/// 411static bool isVMerge(SDNode *N, unsigned UnitSize, 412 unsigned LHSStart, unsigned RHSStart) { 413 assert(N->getOpcode() == ISD::BUILD_VECTOR && 414 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 415 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 416 "Unsupported merge size!"); 417 418 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 419 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 420 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 421 LHSStart+j+i*UnitSize) || 422 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 423 RHSStart+j+i*UnitSize)) 424 return false; 425 } 426 return true; 427} 428 429/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 430/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 431bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 432 if (!isUnary) 433 return isVMerge(N, UnitSize, 8, 24); 434 return isVMerge(N, UnitSize, 8, 8); 435} 436 437/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 438/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 439bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 440 if (!isUnary) 441 return isVMerge(N, UnitSize, 0, 16); 442 return isVMerge(N, UnitSize, 0, 0); 443} 444 445 446/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 447/// amount, otherwise return -1. 448int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 449 assert(N->getOpcode() == ISD::BUILD_VECTOR && 450 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 451 // Find the first non-undef value in the shuffle mask. 452 unsigned i; 453 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 454 /*search*/; 455 456 if (i == 16) return -1; // all undef. 457 458 // Otherwise, check to see if the rest of the elements are consequtively 459 // numbered from this value. 460 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 461 if (ShiftAmt < i) return -1; 462 ShiftAmt -= i; 463 464 if (!isUnary) { 465 // Check the rest of the elements to see if they are consequtive. 466 for (++i; i != 16; ++i) 467 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 468 return -1; 469 } else { 470 // Check the rest of the elements to see if they are consequtive. 471 for (++i; i != 16; ++i) 472 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 473 return -1; 474 } 475 476 return ShiftAmt; 477} 478 479/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 480/// specifies a splat of a single element that is suitable for input to 481/// VSPLTB/VSPLTH/VSPLTW. 482bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 483 assert(N->getOpcode() == ISD::BUILD_VECTOR && 484 N->getNumOperands() == 16 && 485 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 486 487 // This is a splat operation if each element of the permute is the same, and 488 // if the value doesn't reference the second vector. 489 unsigned ElementBase = 0; 490 SDOperand Elt = N->getOperand(0); 491 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 492 ElementBase = EltV->getValue(); 493 else 494 return false; // FIXME: Handle UNDEF elements too! 495 496 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 497 return false; 498 499 // Check that they are consequtive. 500 for (unsigned i = 1; i != EltSize; ++i) { 501 if (!isa<ConstantSDNode>(N->getOperand(i)) || 502 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 503 return false; 504 } 505 506 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 507 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 508 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 509 assert(isa<ConstantSDNode>(N->getOperand(i)) && 510 "Invalid VECTOR_SHUFFLE mask!"); 511 for (unsigned j = 0; j != EltSize; ++j) 512 if (N->getOperand(i+j) != N->getOperand(j)) 513 return false; 514 } 515 516 return true; 517} 518 519/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 520/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 521unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 522 assert(isSplatShuffleMask(N, EltSize)); 523 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 524} 525 526/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 527/// by using a vspltis[bhw] instruction of the specified element size, return 528/// the constant being splatted. The ByteSize field indicates the number of 529/// bytes of each element [124] -> [bhw]. 530SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 531 SDOperand OpVal(0, 0); 532 533 // If ByteSize of the splat is bigger than the element size of the 534 // build_vector, then we have a case where we are checking for a splat where 535 // multiple elements of the buildvector are folded together into a single 536 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 537 unsigned EltSize = 16/N->getNumOperands(); 538 if (EltSize < ByteSize) { 539 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 540 SDOperand UniquedVals[4]; 541 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 542 543 // See if all of the elements in the buildvector agree across. 544 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 545 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 546 // If the element isn't a constant, bail fully out. 547 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); 548 549 550 if (UniquedVals[i&(Multiple-1)].Val == 0) 551 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 552 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 553 return SDOperand(); // no match. 554 } 555 556 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 557 // either constant or undef values that are identical for each chunk. See 558 // if these chunks can form into a larger vspltis*. 559 560 // Check to see if all of the leading entries are either 0 or -1. If 561 // neither, then this won't fit into the immediate field. 562 bool LeadingZero = true; 563 bool LeadingOnes = true; 564 for (unsigned i = 0; i != Multiple-1; ++i) { 565 if (UniquedVals[i].Val == 0) continue; // Must have been undefs. 566 567 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 568 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 569 } 570 // Finally, check the least significant entry. 571 if (LeadingZero) { 572 if (UniquedVals[Multiple-1].Val == 0) 573 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 574 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 575 if (Val < 16) 576 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 577 } 578 if (LeadingOnes) { 579 if (UniquedVals[Multiple-1].Val == 0) 580 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 581 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 582 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 583 return DAG.getTargetConstant(Val, MVT::i32); 584 } 585 586 return SDOperand(); 587 } 588 589 // Check to see if this buildvec has a single non-undef value in its elements. 590 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 591 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 592 if (OpVal.Val == 0) 593 OpVal = N->getOperand(i); 594 else if (OpVal != N->getOperand(i)) 595 return SDOperand(); 596 } 597 598 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. 599 600 unsigned ValSizeInBytes = 0; 601 uint64_t Value = 0; 602 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 603 Value = CN->getValue(); 604 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 605 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 606 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 607 Value = FloatToBits(CN->getValue()); 608 ValSizeInBytes = 4; 609 } 610 611 // If the splat value is larger than the element value, then we can never do 612 // this splat. The only case that we could fit the replicated bits into our 613 // immediate field for would be zero, and we prefer to use vxor for it. 614 if (ValSizeInBytes < ByteSize) return SDOperand(); 615 616 // If the element value is larger than the splat value, cut it in half and 617 // check to see if the two halves are equal. Continue doing this until we 618 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 619 while (ValSizeInBytes > ByteSize) { 620 ValSizeInBytes >>= 1; 621 622 // If the top half equals the bottom half, we're still ok. 623 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 624 (Value & ((1 << (8*ValSizeInBytes))-1))) 625 return SDOperand(); 626 } 627 628 // Properly sign extend the value. 629 int ShAmt = (4-ByteSize)*8; 630 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 631 632 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 633 if (MaskVal == 0) return SDOperand(); 634 635 // Finally, if this value fits in a 5 bit sext field, return it 636 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 637 return DAG.getTargetConstant(MaskVal, MVT::i32); 638 return SDOperand(); 639} 640 641//===----------------------------------------------------------------------===// 642// Addressing Mode Selection 643//===----------------------------------------------------------------------===// 644 645/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 646/// or 64-bit immediate, and if the value can be accurately represented as a 647/// sign extension from a 16-bit value. If so, this returns true and the 648/// immediate. 649static bool isIntS16Immediate(SDNode *N, short &Imm) { 650 if (N->getOpcode() != ISD::Constant) 651 return false; 652 653 Imm = (short)cast<ConstantSDNode>(N)->getValue(); 654 if (N->getValueType(0) == MVT::i32) 655 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue(); 656 else 657 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); 658} 659static bool isIntS16Immediate(SDOperand Op, short &Imm) { 660 return isIntS16Immediate(Op.Val, Imm); 661} 662 663 664/// SelectAddressRegReg - Given the specified addressed, check to see if it 665/// can be represented as an indexed [r+r] operation. Returns false if it 666/// can be more efficiently represented with [r+imm]. 667bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, 668 SDOperand &Index, 669 SelectionDAG &DAG) { 670 short imm = 0; 671 if (N.getOpcode() == ISD::ADD) { 672 if (isIntS16Immediate(N.getOperand(1), imm)) 673 return false; // r+i 674 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 675 return false; // r+i 676 677 Base = N.getOperand(0); 678 Index = N.getOperand(1); 679 return true; 680 } else if (N.getOpcode() == ISD::OR) { 681 if (isIntS16Immediate(N.getOperand(1), imm)) 682 return false; // r+i can fold it if we can. 683 684 // If this is an or of disjoint bitfields, we can codegen this as an add 685 // (for better address arithmetic) if the LHS and RHS of the OR are provably 686 // disjoint. 687 uint64_t LHSKnownZero, LHSKnownOne; 688 uint64_t RHSKnownZero, RHSKnownOne; 689 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 690 691 if (LHSKnownZero) { 692 ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne); 693 // If all of the bits are known zero on the LHS or RHS, the add won't 694 // carry. 695 if ((LHSKnownZero | RHSKnownZero) == ~0U) { 696 Base = N.getOperand(0); 697 Index = N.getOperand(1); 698 return true; 699 } 700 } 701 } 702 703 return false; 704} 705 706/// Returns true if the address N can be represented by a base register plus 707/// a signed 16-bit displacement [r+imm], and if it is not better 708/// represented as reg+reg. 709bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, 710 SDOperand &Base, SelectionDAG &DAG){ 711 // If this can be more profitably realized as r+r, fail. 712 if (SelectAddressRegReg(N, Disp, Base, DAG)) 713 return false; 714 715 if (N.getOpcode() == ISD::ADD) { 716 short imm = 0; 717 if (isIntS16Immediate(N.getOperand(1), imm)) { 718 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 719 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 720 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 721 } else { 722 Base = N.getOperand(0); 723 } 724 return true; // [r+i] 725 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 726 // Match LOAD (ADD (X, Lo(G))). 727 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 728 && "Cannot handle constant offsets yet!"); 729 Disp = N.getOperand(1).getOperand(0); // The global address. 730 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 731 Disp.getOpcode() == ISD::TargetConstantPool || 732 Disp.getOpcode() == ISD::TargetJumpTable); 733 Base = N.getOperand(0); 734 return true; // [&g+r] 735 } 736 } else if (N.getOpcode() == ISD::OR) { 737 short imm = 0; 738 if (isIntS16Immediate(N.getOperand(1), imm)) { 739 // If this is an or of disjoint bitfields, we can codegen this as an add 740 // (for better address arithmetic) if the LHS and RHS of the OR are 741 // provably disjoint. 742 uint64_t LHSKnownZero, LHSKnownOne; 743 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 744 if ((LHSKnownZero|~(unsigned)imm) == ~0U) { 745 // If all of the bits are known zero on the LHS or RHS, the add won't 746 // carry. 747 Base = N.getOperand(0); 748 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 749 return true; 750 } 751 } 752 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 753 // Loading from a constant address. 754 755 // If this address fits entirely in a 16-bit sext immediate field, codegen 756 // this as "d, 0" 757 short Imm; 758 if (isIntS16Immediate(CN, Imm)) { 759 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 760 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 761 return true; 762 } 763 764 // Handle 32-bit sext immediates with LIS + addr mode. 765 if (CN->getValueType(0) == MVT::i32 || 766 (int64_t)CN->getValue() == (int)CN->getValue()) { 767 int Addr = (int)CN->getValue(); 768 769 // Otherwise, break this down into an LIS + disp. 770 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 771 772 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 773 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 774 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 775 return true; 776 } 777 } 778 779 Disp = DAG.getTargetConstant(0, getPointerTy()); 780 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 781 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 782 else 783 Base = N; 784 return true; // [r+0] 785} 786 787/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 788/// represented as an indexed [r+r] operation. 789bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, 790 SDOperand &Index, 791 SelectionDAG &DAG) { 792 // Check to see if we can easily represent this as an [r+r] address. This 793 // will fail if it thinks that the address is more profitably represented as 794 // reg+imm, e.g. where imm = 0. 795 if (SelectAddressRegReg(N, Base, Index, DAG)) 796 return true; 797 798 // If the operand is an addition, always emit this as [r+r], since this is 799 // better (for code size, and execution, as the memop does the add for free) 800 // than emitting an explicit add. 801 if (N.getOpcode() == ISD::ADD) { 802 Base = N.getOperand(0); 803 Index = N.getOperand(1); 804 return true; 805 } 806 807 // Otherwise, do it the hard way, using R0 as the base register. 808 Base = DAG.getRegister(PPC::R0, N.getValueType()); 809 Index = N; 810 return true; 811} 812 813/// SelectAddressRegImmShift - Returns true if the address N can be 814/// represented by a base register plus a signed 14-bit displacement 815/// [r+imm*4]. Suitable for use by STD and friends. 816bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, 817 SDOperand &Base, 818 SelectionDAG &DAG) { 819 // If this can be more profitably realized as r+r, fail. 820 if (SelectAddressRegReg(N, Disp, Base, DAG)) 821 return false; 822 823 if (N.getOpcode() == ISD::ADD) { 824 short imm = 0; 825 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 826 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 827 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 828 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 829 } else { 830 Base = N.getOperand(0); 831 } 832 return true; // [r+i] 833 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 834 // Match LOAD (ADD (X, Lo(G))). 835 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 836 && "Cannot handle constant offsets yet!"); 837 Disp = N.getOperand(1).getOperand(0); // The global address. 838 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 839 Disp.getOpcode() == ISD::TargetConstantPool || 840 Disp.getOpcode() == ISD::TargetJumpTable); 841 Base = N.getOperand(0); 842 return true; // [&g+r] 843 } 844 } else if (N.getOpcode() == ISD::OR) { 845 short imm = 0; 846 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 847 // If this is an or of disjoint bitfields, we can codegen this as an add 848 // (for better address arithmetic) if the LHS and RHS of the OR are 849 // provably disjoint. 850 uint64_t LHSKnownZero, LHSKnownOne; 851 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 852 if ((LHSKnownZero|~(unsigned)imm) == ~0U) { 853 // If all of the bits are known zero on the LHS or RHS, the add won't 854 // carry. 855 Base = N.getOperand(0); 856 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 857 return true; 858 } 859 } 860 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 861 // Loading from a constant address. Verify low two bits are clear. 862 if ((CN->getValue() & 3) == 0) { 863 // If this address fits entirely in a 14-bit sext immediate field, codegen 864 // this as "d, 0" 865 short Imm; 866 if (isIntS16Immediate(CN, Imm)) { 867 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 868 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 869 return true; 870 } 871 872 // Fold the low-part of 32-bit absolute addresses into addr mode. 873 if (CN->getValueType(0) == MVT::i32 || 874 (int64_t)CN->getValue() == (int)CN->getValue()) { 875 int Addr = (int)CN->getValue(); 876 877 // Otherwise, break this down into an LIS + disp. 878 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 879 880 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 881 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 882 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 883 return true; 884 } 885 } 886 } 887 888 Disp = DAG.getTargetConstant(0, getPointerTy()); 889 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 890 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 891 else 892 Base = N; 893 return true; // [r+0] 894} 895 896 897/// getPreIndexedAddressParts - returns true by value, base pointer and 898/// offset pointer and addressing mode by reference if the node's address 899/// can be legally represented as pre-indexed load / store address. 900bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, 901 SDOperand &Offset, 902 ISD::MemIndexedMode &AM, 903 SelectionDAG &DAG) { 904 // Disabled by default for now. 905 if (!EnablePPCPreinc) return false; 906 907 SDOperand Ptr; 908 MVT::ValueType VT; 909 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 910 Ptr = LD->getBasePtr(); 911 VT = LD->getLoadedVT(); 912 913 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 914 ST = ST; 915 Ptr = ST->getBasePtr(); 916 VT = ST->getStoredVT(); 917 } else 918 return false; 919 920 // PowerPC doesn't have preinc load/store instructions for vectors. 921 if (MVT::isVector(VT)) 922 return false; 923 924 // TODO: Check reg+reg first. 925 926 // LDU/STU use reg+imm*4, others use reg+imm. 927 if (VT != MVT::i64) { 928 // reg + imm 929 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 930 return false; 931 } else { 932 // reg + imm * 4. 933 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 934 return false; 935 } 936 937 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 938 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 939 // sext i32 to i64 when addr mode is r+i. 940 if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 && 941 LD->getExtensionType() == ISD::SEXTLOAD && 942 isa<ConstantSDNode>(Offset)) 943 return false; 944 } 945 946 AM = ISD::PRE_INC; 947 return true; 948} 949 950//===----------------------------------------------------------------------===// 951// LowerOperation implementation 952//===----------------------------------------------------------------------===// 953 954static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 955 MVT::ValueType PtrVT = Op.getValueType(); 956 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 957 Constant *C = CP->getConstVal(); 958 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 959 SDOperand Zero = DAG.getConstant(0, PtrVT); 960 961 const TargetMachine &TM = DAG.getTarget(); 962 963 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 964 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 965 966 // If this is a non-darwin platform, we don't support non-static relo models 967 // yet. 968 if (TM.getRelocationModel() == Reloc::Static || 969 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 970 // Generate non-pic code that has direct accesses to the constant pool. 971 // The address of the global is just (hi(&g)+lo(&g)). 972 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 973 } 974 975 if (TM.getRelocationModel() == Reloc::PIC_) { 976 // With PIC, the first instruction is actually "GR+hi(&G)". 977 Hi = DAG.getNode(ISD::ADD, PtrVT, 978 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 979 } 980 981 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 982 return Lo; 983} 984 985static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 986 MVT::ValueType PtrVT = Op.getValueType(); 987 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 988 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 989 SDOperand Zero = DAG.getConstant(0, PtrVT); 990 991 const TargetMachine &TM = DAG.getTarget(); 992 993 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 994 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 995 996 // If this is a non-darwin platform, we don't support non-static relo models 997 // yet. 998 if (TM.getRelocationModel() == Reloc::Static || 999 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1000 // Generate non-pic code that has direct accesses to the constant pool. 1001 // The address of the global is just (hi(&g)+lo(&g)). 1002 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1003 } 1004 1005 if (TM.getRelocationModel() == Reloc::PIC_) { 1006 // With PIC, the first instruction is actually "GR+hi(&G)". 1007 Hi = DAG.getNode(ISD::ADD, PtrVT, 1008 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1009 } 1010 1011 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1012 return Lo; 1013} 1014 1015static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 1016 MVT::ValueType PtrVT = Op.getValueType(); 1017 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1018 GlobalValue *GV = GSDN->getGlobal(); 1019 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1020 SDOperand Zero = DAG.getConstant(0, PtrVT); 1021 1022 const TargetMachine &TM = DAG.getTarget(); 1023 1024 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 1025 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 1026 1027 // If this is a non-darwin platform, we don't support non-static relo models 1028 // yet. 1029 if (TM.getRelocationModel() == Reloc::Static || 1030 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1031 // Generate non-pic code that has direct accesses to globals. 1032 // The address of the global is just (hi(&g)+lo(&g)). 1033 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1034 } 1035 1036 if (TM.getRelocationModel() == Reloc::PIC_) { 1037 // With PIC, the first instruction is actually "GR+hi(&G)". 1038 Hi = DAG.getNode(ISD::ADD, PtrVT, 1039 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1040 } 1041 1042 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1043 1044 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV)) 1045 return Lo; 1046 1047 // If the global is weak or external, we have to go through the lazy 1048 // resolution stub. 1049 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); 1050} 1051 1052static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 1053 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1054 1055 // If we're comparing for equality to zero, expose the fact that this is 1056 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1057 // fold the new nodes. 1058 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1059 if (C->isNullValue() && CC == ISD::SETEQ) { 1060 MVT::ValueType VT = Op.getOperand(0).getValueType(); 1061 SDOperand Zext = Op.getOperand(0); 1062 if (VT < MVT::i32) { 1063 VT = MVT::i32; 1064 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 1065 } 1066 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 1067 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 1068 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 1069 DAG.getConstant(Log2b, MVT::i32)); 1070 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 1071 } 1072 // Leave comparisons against 0 and -1 alone for now, since they're usually 1073 // optimized. FIXME: revisit this when we can custom lower all setcc 1074 // optimizations. 1075 if (C->isAllOnesValue() || C->isNullValue()) 1076 return SDOperand(); 1077 } 1078 1079 // If we have an integer seteq/setne, turn it into a compare against zero 1080 // by xor'ing the rhs with the lhs, which is faster than setting a 1081 // condition register, reading it back out, and masking the correct bit. The 1082 // normal approach here uses sub to do this instead of xor. Using xor exposes 1083 // the result to other bit-twiddling opportunities. 1084 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 1085 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1086 MVT::ValueType VT = Op.getValueType(); 1087 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), 1088 Op.getOperand(1)); 1089 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 1090 } 1091 return SDOperand(); 1092} 1093 1094static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG, 1095 int VarArgsFrameIndex, 1096 int VarArgsStackOffset, 1097 unsigned VarArgsNumGPR, 1098 unsigned VarArgsNumFPR, 1099 const PPCSubtarget &Subtarget) { 1100 1101 assert(0 && "VAARG in ELF32 ABI not implemented yet!"); 1102} 1103 1104static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, 1105 int VarArgsFrameIndex, 1106 int VarArgsStackOffset, 1107 unsigned VarArgsNumGPR, 1108 unsigned VarArgsNumFPR, 1109 const PPCSubtarget &Subtarget) { 1110 1111 if (Subtarget.isMachoABI()) { 1112 // vastart just stores the address of the VarArgsFrameIndex slot into the 1113 // memory location argument. 1114 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1115 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1116 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 1117 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(), 1118 SV->getOffset()); 1119 } 1120 1121 // For ELF 32 ABI we follow the layout of the va_list struct. 1122 // We suppose the given va_list is already allocated. 1123 // 1124 // typedef struct { 1125 // char gpr; /* index into the array of 8 GPRs 1126 // * stored in the register save area 1127 // * gpr=0 corresponds to r3, 1128 // * gpr=1 to r4, etc. 1129 // */ 1130 // char fpr; /* index into the array of 8 FPRs 1131 // * stored in the register save area 1132 // * fpr=0 corresponds to f1, 1133 // * fpr=1 to f2, etc. 1134 // */ 1135 // char *overflow_arg_area; 1136 // /* location on stack that holds 1137 // * the next overflow argument 1138 // */ 1139 // char *reg_save_area; 1140 // /* where r3:r10 and f1:f8 (if saved) 1141 // * are stored 1142 // */ 1143 // } va_list[1]; 1144 1145 1146 SDOperand ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); 1147 SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); 1148 1149 1150 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1151 1152 SDOperand StackOffset = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); 1153 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1154 1155 SDOperand ConstFrameOffset = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, 1156 PtrVT); 1157 SDOperand ConstStackOffset = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8 - 1, 1158 PtrVT); 1159 SDOperand ConstFPROffset = DAG.getConstant(1, PtrVT); 1160 1161 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 1162 1163 // Store first byte : number of int regs 1164 SDOperand firstStore = DAG.getStore(Op.getOperand(0), ArgGPR, 1165 Op.getOperand(1), SV->getValue(), 1166 SV->getOffset()); 1167 SDOperand nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1), 1168 ConstFPROffset); 1169 1170 // Store second byte : number of float regs 1171 SDOperand secondStore = DAG.getStore(firstStore, ArgFPR, nextPtr, 1172 SV->getValue(), SV->getOffset()); 1173 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset); 1174 1175 // Store second word : arguments given on stack 1176 SDOperand thirdStore = DAG.getStore(secondStore, StackOffset, nextPtr, 1177 SV->getValue(), SV->getOffset()); 1178 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset); 1179 1180 // Store third word : arguments given in registers 1181 return DAG.getStore(thirdStore, FR, nextPtr, SV->getValue(), 1182 SV->getOffset()); 1183 1184} 1185 1186#include "PPCGenCallingConv.inc" 1187 1188/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1189/// depending on which subtarget is selected. 1190static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { 1191 if (Subtarget.isMachoABI()) { 1192 static const unsigned FPR[] = { 1193 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1194 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1195 }; 1196 return FPR; 1197 } 1198 1199 1200 static const unsigned FPR[] = { 1201 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1202 PPC::F8 1203 }; 1204 return FPR; 1205} 1206 1207static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, 1208 int &VarArgsFrameIndex, 1209 int &VarArgsStackOffset, 1210 unsigned &VarArgsNumGPR, 1211 unsigned &VarArgsNumFPR, 1212 const PPCSubtarget &Subtarget) { 1213 // TODO: add description of PPC stack frame format, or at least some docs. 1214 // 1215 MachineFunction &MF = DAG.getMachineFunction(); 1216 MachineFrameInfo *MFI = MF.getFrameInfo(); 1217 SSARegMap *RegMap = MF.getSSARegMap(); 1218 SmallVector<SDOperand, 8> ArgValues; 1219 SDOperand Root = Op.getOperand(0); 1220 1221 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1222 bool isPPC64 = PtrVT == MVT::i64; 1223 bool isMachoABI = Subtarget.isMachoABI(); 1224 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1225 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1226 1227 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1228 1229 static const unsigned GPR_32[] = { // 32-bit registers. 1230 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1231 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1232 }; 1233 static const unsigned GPR_64[] = { // 64-bit registers. 1234 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1235 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1236 }; 1237 1238 static const unsigned *FPR = GetFPR(Subtarget); 1239 1240 static const unsigned VR[] = { 1241 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1242 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1243 }; 1244 1245 const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1246 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8; 1247 const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]); 1248 1249 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1250 1251 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1252 1253 // Add DAG nodes to load the arguments or copy them out of registers. On 1254 // entry to a function on PPC, the arguments start after the linkage area, 1255 // although the first ones are often in registers. 1256 // 1257 // In the ELF 32 ABI, GPRs and stack are double word align: an argument 1258 // represented with two words (long long or double) must be copied to an 1259 // even GPR_idx value or to an even ArgOffset value. 1260 1261 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { 1262 SDOperand ArgVal; 1263 bool needsLoad = false; 1264 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1265 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1266 unsigned ArgSize = ObjSize; 1267 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(ArgNo+3))->getValue(); 1268 unsigned AlignFlag = 1 << ISD::ParamFlags::OrigAlignmentOffs; 1269 // See if next argument requires stack alignment in ELF 1270 bool Expand = (ObjectVT == MVT::f64) || ((ArgNo + 1 < e) && 1271 (cast<ConstantSDNode>(Op.getOperand(ArgNo+4))->getValue() & AlignFlag) && 1272 (!(Flags & AlignFlag))); 1273 1274 unsigned CurArgOffset = ArgOffset; 1275 switch (ObjectVT) { 1276 default: assert(0 && "Unhandled argument type!"); 1277 case MVT::i32: 1278 // Double word align in ELF 1279 if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1280 if (GPR_idx != Num_GPR_Regs) { 1281 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1282 MF.addLiveIn(GPR[GPR_idx], VReg); 1283 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 1284 ++GPR_idx; 1285 } else { 1286 needsLoad = true; 1287 ArgSize = PtrByteSize; 1288 } 1289 // Stack align in ELF 1290 if (needsLoad && Expand && isELF32_ABI) 1291 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1292 // All int arguments reserve stack space in Macho ABI. 1293 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize; 1294 break; 1295 1296 case MVT::i64: // PPC64 1297 if (GPR_idx != Num_GPR_Regs) { 1298 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 1299 MF.addLiveIn(GPR[GPR_idx], VReg); 1300 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1301 ++GPR_idx; 1302 } else { 1303 needsLoad = true; 1304 } 1305 // All int arguments reserve stack space in Macho ABI. 1306 if (isMachoABI || needsLoad) ArgOffset += 8; 1307 break; 1308 1309 case MVT::f32: 1310 case MVT::f64: 1311 // Every 4 bytes of argument space consumes one of the GPRs available for 1312 // argument passing. 1313 if (GPR_idx != Num_GPR_Regs && isMachoABI) { 1314 ++GPR_idx; 1315 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1316 ++GPR_idx; 1317 } 1318 if (FPR_idx != Num_FPR_Regs) { 1319 unsigned VReg; 1320 if (ObjectVT == MVT::f32) 1321 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); 1322 else 1323 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); 1324 MF.addLiveIn(FPR[FPR_idx], VReg); 1325 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1326 ++FPR_idx; 1327 } else { 1328 needsLoad = true; 1329 } 1330 1331 // Stack align in ELF 1332 if (needsLoad && Expand && isELF32_ABI) 1333 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1334 // All FP arguments reserve stack space in Macho ABI. 1335 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize; 1336 break; 1337 case MVT::v4f32: 1338 case MVT::v4i32: 1339 case MVT::v8i16: 1340 case MVT::v16i8: 1341 // Note that vector arguments in registers don't reserve stack space. 1342 if (VR_idx != Num_VR_Regs) { 1343 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass); 1344 MF.addLiveIn(VR[VR_idx], VReg); 1345 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1346 ++VR_idx; 1347 } else { 1348 // This should be simple, but requires getting 16-byte aligned stack 1349 // values. 1350 assert(0 && "Loading VR argument not implemented yet!"); 1351 needsLoad = true; 1352 } 1353 break; 1354 } 1355 1356 // We need to load the argument to a virtual register if we determined above 1357 // that we ran out of physical registers of the appropriate type 1358 if (needsLoad) { 1359 // If the argument is actually used, emit a load from the right stack 1360 // slot. 1361 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) { 1362 int FI = MFI->CreateFixedObject(ObjSize, 1363 CurArgOffset + (ArgSize - ObjSize)); 1364 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1365 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 1366 } else { 1367 // Don't emit a dead load. 1368 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT); 1369 } 1370 } 1371 1372 ArgValues.push_back(ArgVal); 1373 } 1374 1375 // If the function takes variable number of arguments, make a frame index for 1376 // the start of the first vararg value... for expansion of llvm.va_start. 1377 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1378 if (isVarArg) { 1379 1380 int depth; 1381 if (isELF32_ABI) { 1382 VarArgsNumGPR = GPR_idx; 1383 VarArgsNumFPR = FPR_idx; 1384 1385 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame 1386 // pointer. 1387 depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 + 1388 Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 + 1389 MVT::getSizeInBits(PtrVT)/8); 1390 1391 VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1392 ArgOffset); 1393 1394 } 1395 else 1396 depth = ArgOffset; 1397 1398 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1399 depth); 1400 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1401 1402 SmallVector<SDOperand, 8> MemOps; 1403 1404 // In ELF 32 ABI, the fixed integer arguments of a variadic function are 1405 // stored to the VarArgsFrameIndex on the stack. 1406 if (isELF32_ABI) { 1407 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) { 1408 SDOperand Val = DAG.getRegister(GPR[GPR_idx], PtrVT); 1409 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1410 MemOps.push_back(Store); 1411 // Increment the address by four for the next argument to store 1412 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1413 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1414 } 1415 } 1416 1417 // If this function is vararg, store any remaining integer argument regs 1418 // to their spots on the stack so that they may be loaded by deferencing the 1419 // result of va_next. 1420 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 1421 unsigned VReg; 1422 if (isPPC64) 1423 VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 1424 else 1425 VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1426 1427 MF.addLiveIn(GPR[GPR_idx], VReg); 1428 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1429 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1430 MemOps.push_back(Store); 1431 // Increment the address by four for the next argument to store 1432 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1433 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1434 } 1435 1436 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex 1437 // on the stack. 1438 if (isELF32_ABI) { 1439 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) { 1440 SDOperand Val = DAG.getRegister(FPR[FPR_idx], MVT::f64); 1441 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1442 MemOps.push_back(Store); 1443 // Increment the address by eight for the next argument to store 1444 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, 1445 PtrVT); 1446 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1447 } 1448 1449 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) { 1450 unsigned VReg; 1451 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); 1452 1453 MF.addLiveIn(FPR[FPR_idx], VReg); 1454 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64); 1455 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1456 MemOps.push_back(Store); 1457 // Increment the address by eight for the next argument to store 1458 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, 1459 PtrVT); 1460 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1461 } 1462 } 1463 1464 if (!MemOps.empty()) 1465 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size()); 1466 } 1467 1468 ArgValues.push_back(Root); 1469 1470 // Return the new list of results. 1471 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 1472 Op.Val->value_end()); 1473 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); 1474} 1475 1476/// isCallCompatibleAddress - Return the immediate to use if the specified 1477/// 32-bit value is representable in the immediate field of a BxA instruction. 1478static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { 1479 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1480 if (!C) return 0; 1481 1482 int Addr = C->getValue(); 1483 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 1484 (Addr << 6 >> 6) != Addr) 1485 return 0; // Top 6 bits have to be sext of immediate. 1486 1487 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val; 1488} 1489 1490 1491static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG, 1492 const PPCSubtarget &Subtarget) { 1493 SDOperand Chain = Op.getOperand(0); 1494 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1495 SDOperand Callee = Op.getOperand(4); 1496 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1497 1498 bool isMachoABI = Subtarget.isMachoABI(); 1499 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1500 1501 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1502 bool isPPC64 = PtrVT == MVT::i64; 1503 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1504 1505 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 1506 // SelectExpr to use to put the arguments in the appropriate registers. 1507 std::vector<SDOperand> args_to_use; 1508 1509 // Count how many bytes are to be pushed on the stack, including the linkage 1510 // area, and parameter passing area. We start with 24/48 bytes, which is 1511 // prereserved space for [SP][CR][LR][3 x unused]. 1512 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1513 1514 // Add up all the space actually used. 1515 for (unsigned i = 0; i != NumOps; ++i) { 1516 unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8; 1517 ArgSize = std::max(ArgSize, PtrByteSize); 1518 NumBytes += ArgSize; 1519 } 1520 1521 // The prolog code of the callee may store up to 8 GPR argument registers to 1522 // the stack, allowing va_start to index over them in memory if its varargs. 1523 // Because we cannot tell if this is needed on the caller side, we have to 1524 // conservatively assume that it is needed. As such, make sure we have at 1525 // least enough stack space for the caller to store the 8 GPRs. 1526 NumBytes = std::max(NumBytes, 1527 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI)); 1528 1529 // Adjust the stack pointer for the new arguments... 1530 // These operations are automatically eliminated by the prolog/epilog pass 1531 Chain = DAG.getCALLSEQ_START(Chain, 1532 DAG.getConstant(NumBytes, PtrVT)); 1533 1534 // Set up a copy of the stack pointer for use loading and storing any 1535 // arguments that may not fit in the registers available for argument 1536 // passing. 1537 SDOperand StackPtr; 1538 if (isPPC64) 1539 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 1540 else 1541 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 1542 1543 // Figure out which arguments are going to go in registers, and which in 1544 // memory. Also, if this is a vararg function, floating point operations 1545 // must be stored to our stack, and loaded into integer regs as well, if 1546 // any integer regs are available for argument passing. 1547 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1548 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1549 1550 static const unsigned GPR_32[] = { // 32-bit registers. 1551 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1552 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1553 }; 1554 static const unsigned GPR_64[] = { // 64-bit registers. 1555 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1556 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1557 }; 1558 static const unsigned *FPR = GetFPR(Subtarget); 1559 1560 static const unsigned VR[] = { 1561 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1562 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1563 }; 1564 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1565 const unsigned NumFPRs = isMachoABI ? 13 : 8; 1566 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]); 1567 1568 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1569 1570 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1571 SmallVector<SDOperand, 8> MemOpChains; 1572 for (unsigned i = 0; i != NumOps; ++i) { 1573 bool inMem = false; 1574 SDOperand Arg = Op.getOperand(5+2*i); 1575 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue(); 1576 unsigned AlignFlag = 1 << ISD::ParamFlags::OrigAlignmentOffs; 1577 // See if next argument requires stack alignment in ELF 1578 unsigned next = 5+2*(i+1)+1; 1579 bool Expand = (Arg.getValueType() == MVT::f64) || ((i + 1 < NumOps) && 1580 (cast<ConstantSDNode>(Op.getOperand(next))->getValue() & AlignFlag) && 1581 (!(Flags & AlignFlag))); 1582 1583 // PtrOff will be used to store the current argument to the stack if a 1584 // register cannot be found for it. 1585 SDOperand PtrOff; 1586 1587 // Stack align in ELF 32 1588 if (isELF32_ABI && Expand) 1589 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize, 1590 StackPtr.getValueType()); 1591 else 1592 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 1593 1594 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 1595 1596 // On PPC64, promote integers to 64-bit values. 1597 if (isPPC64 && Arg.getValueType() == MVT::i32) { 1598 unsigned ExtOp = (Flags & 1) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1599 1600 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 1601 } 1602 1603 switch (Arg.getValueType()) { 1604 default: assert(0 && "Unexpected ValueType for argument!"); 1605 case MVT::i32: 1606 case MVT::i64: 1607 // Double word align in ELF 1608 if (isELF32_ABI && Expand) GPR_idx += (GPR_idx % 2); 1609 if (GPR_idx != NumGPRs) { 1610 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 1611 } else { 1612 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1613 inMem = true; 1614 } 1615 if (inMem || isMachoABI) { 1616 // Stack align in ELF 1617 if (isELF32_ABI && Expand) 1618 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1619 1620 ArgOffset += PtrByteSize; 1621 } 1622 break; 1623 case MVT::f32: 1624 case MVT::f64: 1625 if (isVarArg) { 1626 // Float varargs need to be promoted to double. 1627 if (Arg.getValueType() == MVT::f32) 1628 Arg = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Arg); 1629 } 1630 1631 if (FPR_idx != NumFPRs) { 1632 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 1633 1634 if (isVarArg) { 1635 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1636 MemOpChains.push_back(Store); 1637 1638 // Float varargs are always shadowed in available integer registers 1639 if (GPR_idx != NumGPRs) { 1640 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1641 MemOpChains.push_back(Load.getValue(1)); 1642 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 1643 Load)); 1644 } 1645 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 1646 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 1647 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 1648 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1649 MemOpChains.push_back(Load.getValue(1)); 1650 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 1651 Load)); 1652 } 1653 } else { 1654 // If we have any FPRs remaining, we may also have GPRs remaining. 1655 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 1656 // GPRs. 1657 if (isMachoABI) { 1658 if (GPR_idx != NumGPRs) 1659 ++GPR_idx; 1660 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 1661 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 1662 ++GPR_idx; 1663 } 1664 } 1665 } else { 1666 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1667 inMem = true; 1668 } 1669 if (inMem || isMachoABI) { 1670 // Stack align in ELF 1671 if (isELF32_ABI && Expand) 1672 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1673 if (isPPC64) 1674 ArgOffset += 8; 1675 else 1676 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 1677 } 1678 break; 1679 case MVT::v4f32: 1680 case MVT::v4i32: 1681 case MVT::v8i16: 1682 case MVT::v16i8: 1683 assert(!isVarArg && "Don't support passing vectors to varargs yet!"); 1684 assert(VR_idx != NumVRs && 1685 "Don't support passing more than 12 vector args yet!"); 1686 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 1687 break; 1688 } 1689 } 1690 if (!MemOpChains.empty()) 1691 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1692 &MemOpChains[0], MemOpChains.size()); 1693 1694 // Build a sequence of copy-to-reg nodes chained together with token chain 1695 // and flag operands which copy the outgoing args into the appropriate regs. 1696 SDOperand InFlag; 1697 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1698 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1699 InFlag); 1700 InFlag = Chain.getValue(1); 1701 } 1702 1703 // With the ELF 32 ABI, set CR6 to true if this is a vararg call. 1704 if (isVarArg && isELF32_ABI) { 1705 SDOperand SetCR(DAG.getTargetNode(PPC::SETCR, MVT::i32), 0); 1706 Chain = DAG.getCopyToReg(Chain, PPC::CR6, SetCR, InFlag); 1707 InFlag = Chain.getValue(1); 1708 } 1709 1710 std::vector<MVT::ValueType> NodeTys; 1711 NodeTys.push_back(MVT::Other); // Returns a chain 1712 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1713 1714 SmallVector<SDOperand, 8> Ops; 1715 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF; 1716 1717 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1718 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1719 // node so that legalize doesn't hack it. 1720 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1721 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 1722 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1723 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 1724 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 1725 // If this is an absolute destination address, use the munged value. 1726 Callee = SDOperand(Dest, 0); 1727 else { 1728 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 1729 // to do the call, we can't use PPCISD::CALL. 1730 SDOperand MTCTROps[] = {Chain, Callee, InFlag}; 1731 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0)); 1732 InFlag = Chain.getValue(1); 1733 1734 // Copy the callee address into R12 on darwin. 1735 if (isMachoABI) { 1736 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag); 1737 InFlag = Chain.getValue(1); 1738 } 1739 1740 NodeTys.clear(); 1741 NodeTys.push_back(MVT::Other); 1742 NodeTys.push_back(MVT::Flag); 1743 Ops.push_back(Chain); 1744 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF; 1745 Callee.Val = 0; 1746 } 1747 1748 // If this is a direct call, pass the chain and the callee. 1749 if (Callee.Val) { 1750 Ops.push_back(Chain); 1751 Ops.push_back(Callee); 1752 } 1753 1754 // Add argument registers to the end of the list so that they are known live 1755 // into the call. 1756 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1757 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1758 RegsToPass[i].second.getValueType())); 1759 1760 if (InFlag.Val) 1761 Ops.push_back(InFlag); 1762 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 1763 InFlag = Chain.getValue(1); 1764 1765 SDOperand ResultVals[3]; 1766 unsigned NumResults = 0; 1767 NodeTys.clear(); 1768 1769 // If the call has results, copy the values out of the ret val registers. 1770 switch (Op.Val->getValueType(0)) { 1771 default: assert(0 && "Unexpected ret value!"); 1772 case MVT::Other: break; 1773 case MVT::i32: 1774 if (Op.Val->getValueType(1) == MVT::i32) { 1775 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1); 1776 ResultVals[0] = Chain.getValue(0); 1777 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, 1778 Chain.getValue(2)).getValue(1); 1779 ResultVals[1] = Chain.getValue(0); 1780 NumResults = 2; 1781 NodeTys.push_back(MVT::i32); 1782 } else { 1783 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 1784 ResultVals[0] = Chain.getValue(0); 1785 NumResults = 1; 1786 } 1787 NodeTys.push_back(MVT::i32); 1788 break; 1789 case MVT::i64: 1790 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 1791 ResultVals[0] = Chain.getValue(0); 1792 NumResults = 1; 1793 NodeTys.push_back(MVT::i64); 1794 break; 1795 case MVT::f32: 1796 case MVT::f64: 1797 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0), 1798 InFlag).getValue(1); 1799 ResultVals[0] = Chain.getValue(0); 1800 NumResults = 1; 1801 NodeTys.push_back(Op.Val->getValueType(0)); 1802 break; 1803 case MVT::v4f32: 1804 case MVT::v4i32: 1805 case MVT::v8i16: 1806 case MVT::v16i8: 1807 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0), 1808 InFlag).getValue(1); 1809 ResultVals[0] = Chain.getValue(0); 1810 NumResults = 1; 1811 NodeTys.push_back(Op.Val->getValueType(0)); 1812 break; 1813 } 1814 1815 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, 1816 DAG.getConstant(NumBytes, PtrVT)); 1817 NodeTys.push_back(MVT::Other); 1818 1819 // If the function returns void, just return the chain. 1820 if (NumResults == 0) 1821 return Chain; 1822 1823 // Otherwise, merge everything together with a MERGE_VALUES node. 1824 ResultVals[NumResults++] = Chain; 1825 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1826 ResultVals, NumResults); 1827 return Res.getValue(Op.ResNo); 1828} 1829 1830static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG, TargetMachine &TM) { 1831 SmallVector<CCValAssign, 16> RVLocs; 1832 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 1833 CCState CCInfo(CC, TM, RVLocs); 1834 CCInfo.AnalyzeReturn(Op.Val, RetCC_PPC); 1835 1836 // If this is the first return lowered for this function, add the regs to the 1837 // liveout set for the function. 1838 if (DAG.getMachineFunction().liveout_empty()) { 1839 for (unsigned i = 0; i != RVLocs.size(); ++i) 1840 DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg()); 1841 } 1842 1843 SDOperand Chain = Op.getOperand(0); 1844 SDOperand Flag; 1845 1846 // Copy the result values into the output registers. 1847 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1848 CCValAssign &VA = RVLocs[i]; 1849 assert(VA.isRegLoc() && "Can only return in registers!"); 1850 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag); 1851 Flag = Chain.getValue(1); 1852 } 1853 1854 if (Flag.Val) 1855 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag); 1856 else 1857 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain); 1858} 1859 1860static SDOperand LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, 1861 const PPCSubtarget &Subtarget) { 1862 // When we pop the dynamic allocation we need to restore the SP link. 1863 1864 // Get the corect type for pointers. 1865 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1866 1867 // Construct the stack pointer operand. 1868 bool IsPPC64 = Subtarget.isPPC64(); 1869 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; 1870 SDOperand StackPtr = DAG.getRegister(SP, PtrVT); 1871 1872 // Get the operands for the STACKRESTORE. 1873 SDOperand Chain = Op.getOperand(0); 1874 SDOperand SaveSP = Op.getOperand(1); 1875 1876 // Load the old link SP. 1877 SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); 1878 1879 // Restore the stack pointer. 1880 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP); 1881 1882 // Store the old link SP. 1883 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0); 1884} 1885 1886static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG, 1887 const PPCSubtarget &Subtarget) { 1888 MachineFunction &MF = DAG.getMachineFunction(); 1889 bool IsPPC64 = Subtarget.isPPC64(); 1890 bool isMachoABI = Subtarget.isMachoABI(); 1891 1892 // Get current frame pointer save index. The users of this index will be 1893 // primarily DYNALLOC instructions. 1894 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1895 int FPSI = FI->getFramePointerSaveIndex(); 1896 1897 // If the frame pointer save index hasn't been defined yet. 1898 if (!FPSI) { 1899 // Find out what the fix offset of the frame pointer save area. 1900 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI); 1901 1902 // Allocate the frame index for frame pointer save area. 1903 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset); 1904 // Save the result. 1905 FI->setFramePointerSaveIndex(FPSI); 1906 } 1907 1908 // Get the inputs. 1909 SDOperand Chain = Op.getOperand(0); 1910 SDOperand Size = Op.getOperand(1); 1911 1912 // Get the corect type for pointers. 1913 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1914 // Negate the size. 1915 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, 1916 DAG.getConstant(0, PtrVT), Size); 1917 // Construct a node for the frame pointer save index. 1918 SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT); 1919 // Build a DYNALLOC node. 1920 SDOperand Ops[3] = { Chain, NegSize, FPSIdx }; 1921 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 1922 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); 1923} 1924 1925 1926/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 1927/// possible. 1928static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { 1929 // Not FP? Not a fsel. 1930 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 1931 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 1932 return SDOperand(); 1933 1934 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1935 1936 // Cannot handle SETEQ/SETNE. 1937 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); 1938 1939 MVT::ValueType ResVT = Op.getValueType(); 1940 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 1941 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 1942 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 1943 1944 // If the RHS of the comparison is a 0.0, we don't need to do the 1945 // subtraction at all. 1946 if (isFloatingPointZero(RHS)) 1947 switch (CC) { 1948 default: break; // SETUO etc aren't handled by fsel. 1949 case ISD::SETULT: 1950 case ISD::SETOLT: 1951 case ISD::SETLT: 1952 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1953 case ISD::SETUGE: 1954 case ISD::SETOGE: 1955 case ISD::SETGE: 1956 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1957 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1958 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 1959 case ISD::SETUGT: 1960 case ISD::SETOGT: 1961 case ISD::SETGT: 1962 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1963 case ISD::SETULE: 1964 case ISD::SETOLE: 1965 case ISD::SETLE: 1966 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1967 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1968 return DAG.getNode(PPCISD::FSEL, ResVT, 1969 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 1970 } 1971 1972 SDOperand Cmp; 1973 switch (CC) { 1974 default: break; // SETUO etc aren't handled by fsel. 1975 case ISD::SETULT: 1976 case ISD::SETOLT: 1977 case ISD::SETLT: 1978 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1979 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1980 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1981 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1982 case ISD::SETUGE: 1983 case ISD::SETOGE: 1984 case ISD::SETGE: 1985 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1986 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1987 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1988 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1989 case ISD::SETUGT: 1990 case ISD::SETOGT: 1991 case ISD::SETGT: 1992 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1993 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1994 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1995 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1996 case ISD::SETULE: 1997 case ISD::SETOLE: 1998 case ISD::SETLE: 1999 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2000 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2001 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2002 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2003 } 2004 return SDOperand(); 2005} 2006 2007static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 2008 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 2009 SDOperand Src = Op.getOperand(0); 2010 if (Src.getValueType() == MVT::f32) 2011 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 2012 2013 SDOperand Tmp; 2014 switch (Op.getValueType()) { 2015 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 2016 case MVT::i32: 2017 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 2018 break; 2019 case MVT::i64: 2020 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 2021 break; 2022 } 2023 2024 // Convert the FP value to an int value through memory. 2025 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); 2026 if (Op.getValueType() == MVT::i32) 2027 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); 2028 return Bits; 2029} 2030 2031static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 2032 if (Op.getOperand(0).getValueType() == MVT::i64) { 2033 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 2034 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 2035 if (Op.getValueType() == MVT::f32) 2036 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 2037 return FP; 2038 } 2039 2040 assert(Op.getOperand(0).getValueType() == MVT::i32 && 2041 "Unhandled SINT_TO_FP type in custom expander!"); 2042 // Since we only generate this in 64-bit mode, we can take advantage of 2043 // 64-bit registers. In particular, sign extend the input value into the 2044 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 2045 // then lfd it and fcfid it. 2046 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2047 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 2048 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2049 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2050 2051 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 2052 Op.getOperand(0)); 2053 2054 // STD the extended value into the stack slot. 2055 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 2056 DAG.getEntryNode(), Ext64, FIdx, 2057 DAG.getSrcValue(NULL)); 2058 // Load the value as a double. 2059 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); 2060 2061 // FCFID it and return it. 2062 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 2063 if (Op.getValueType() == MVT::f32) 2064 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 2065 return FP; 2066} 2067 2068static SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { 2069 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 2070 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 2071 2072 // Expand into a bunch of logical ops. Note that these ops 2073 // depend on the PPC behavior for oversized shift amounts. 2074 SDOperand Lo = Op.getOperand(0); 2075 SDOperand Hi = Op.getOperand(1); 2076 SDOperand Amt = Op.getOperand(2); 2077 2078 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 2079 DAG.getConstant(32, MVT::i32), Amt); 2080 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); 2081 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); 2082 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 2083 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 2084 DAG.getConstant(-32U, MVT::i32)); 2085 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); 2086 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 2087 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); 2088 SDOperand OutOps[] = { OutLo, OutHi }; 2089 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 2090 OutOps, 2); 2091} 2092 2093static SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { 2094 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 2095 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRL!"); 2096 2097 // Otherwise, expand into a bunch of logical ops. Note that these ops 2098 // depend on the PPC behavior for oversized shift amounts. 2099 SDOperand Lo = Op.getOperand(0); 2100 SDOperand Hi = Op.getOperand(1); 2101 SDOperand Amt = Op.getOperand(2); 2102 2103 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 2104 DAG.getConstant(32, MVT::i32), Amt); 2105 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 2106 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 2107 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 2108 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 2109 DAG.getConstant(-32U, MVT::i32)); 2110 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); 2111 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 2112 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); 2113 SDOperand OutOps[] = { OutLo, OutHi }; 2114 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 2115 OutOps, 2); 2116} 2117 2118static SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { 2119 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 2120 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); 2121 2122 // Otherwise, expand into a bunch of logical ops, followed by a select_cc. 2123 SDOperand Lo = Op.getOperand(0); 2124 SDOperand Hi = Op.getOperand(1); 2125 SDOperand Amt = Op.getOperand(2); 2126 2127 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 2128 DAG.getConstant(32, MVT::i32), Amt); 2129 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 2130 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 2131 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 2132 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 2133 DAG.getConstant(-32U, MVT::i32)); 2134 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); 2135 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); 2136 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), 2137 Tmp4, Tmp6, ISD::SETLE); 2138 SDOperand OutOps[] = { OutLo, OutHi }; 2139 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 2140 OutOps, 2); 2141} 2142 2143//===----------------------------------------------------------------------===// 2144// Vector related lowering. 2145// 2146 2147// If this is a vector of constants or undefs, get the bits. A bit in 2148// UndefBits is set if the corresponding element of the vector is an 2149// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2150// zero. Return true if this is not an array of constants, false if it is. 2151// 2152static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 2153 uint64_t UndefBits[2]) { 2154 // Start with zero'd results. 2155 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 2156 2157 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); 2158 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2159 SDOperand OpVal = BV->getOperand(i); 2160 2161 unsigned PartNo = i >= e/2; // In the upper 128 bits? 2162 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 2163 2164 uint64_t EltBits = 0; 2165 if (OpVal.getOpcode() == ISD::UNDEF) { 2166 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 2167 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 2168 continue; 2169 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2170 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 2171 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2172 assert(CN->getValueType(0) == MVT::f32 && 2173 "Only one legal FP vector type!"); 2174 EltBits = FloatToBits(CN->getValue()); 2175 } else { 2176 // Nonconstant element. 2177 return true; 2178 } 2179 2180 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 2181 } 2182 2183 //printf("%llx %llx %llx %llx\n", 2184 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 2185 return false; 2186} 2187 2188// If this is a splat (repetition) of a value across the whole vector, return 2189// the smallest size that splats it. For example, "0x01010101010101..." is a 2190// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2191// SplatSize = 1 byte. 2192static bool isConstantSplat(const uint64_t Bits128[2], 2193 const uint64_t Undef128[2], 2194 unsigned &SplatBits, unsigned &SplatUndef, 2195 unsigned &SplatSize) { 2196 2197 // Don't let undefs prevent splats from matching. See if the top 64-bits are 2198 // the same as the lower 64-bits, ignoring undefs. 2199 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 2200 return false; // Can't be a splat if two pieces don't match. 2201 2202 uint64_t Bits64 = Bits128[0] | Bits128[1]; 2203 uint64_t Undef64 = Undef128[0] & Undef128[1]; 2204 2205 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 2206 // undefs. 2207 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 2208 return false; // Can't be a splat if two pieces don't match. 2209 2210 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 2211 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 2212 2213 // If the top 16-bits are different than the lower 16-bits, ignoring 2214 // undefs, we have an i32 splat. 2215 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 2216 SplatBits = Bits32; 2217 SplatUndef = Undef32; 2218 SplatSize = 4; 2219 return true; 2220 } 2221 2222 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 2223 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 2224 2225 // If the top 8-bits are different than the lower 8-bits, ignoring 2226 // undefs, we have an i16 splat. 2227 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 2228 SplatBits = Bits16; 2229 SplatUndef = Undef16; 2230 SplatSize = 2; 2231 return true; 2232 } 2233 2234 // Otherwise, we have an 8-bit splat. 2235 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 2236 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 2237 SplatSize = 1; 2238 return true; 2239} 2240 2241/// BuildSplatI - Build a canonical splati of Val with an element size of 2242/// SplatSize. Cast the result to VT. 2243static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, 2244 SelectionDAG &DAG) { 2245 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 2246 2247 static const MVT::ValueType VTys[] = { // canonical VT to use for each size. 2248 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 2249 }; 2250 2251 MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 2252 2253 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 2254 if (Val == -1) 2255 SplatSize = 1; 2256 2257 MVT::ValueType CanonicalVT = VTys[SplatSize-1]; 2258 2259 // Build a canonical splat for this value. 2260 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT)); 2261 SmallVector<SDOperand, 8> Ops; 2262 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); 2263 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, 2264 &Ops[0], Ops.size()); 2265 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); 2266} 2267 2268/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 2269/// specified intrinsic ID. 2270static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, 2271 SelectionDAG &DAG, 2272 MVT::ValueType DestVT = MVT::Other) { 2273 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 2274 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2275 DAG.getConstant(IID, MVT::i32), LHS, RHS); 2276} 2277 2278/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 2279/// specified intrinsic ID. 2280static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, 2281 SDOperand Op2, SelectionDAG &DAG, 2282 MVT::ValueType DestVT = MVT::Other) { 2283 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 2284 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2285 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 2286} 2287 2288 2289/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 2290/// amount. The result has the specified value type. 2291static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, 2292 MVT::ValueType VT, SelectionDAG &DAG) { 2293 // Force LHS/RHS to be the right type. 2294 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 2295 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 2296 2297 SDOperand Ops[16]; 2298 for (unsigned i = 0; i != 16; ++i) 2299 Ops[i] = DAG.getConstant(i+Amt, MVT::i32); 2300 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 2301 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); 2302 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 2303} 2304 2305// If this is a case we can't handle, return null and let the default 2306// expansion code take care of it. If we CAN select this case, and if it 2307// selects to a single instruction, return Op. Otherwise, if we can codegen 2308// this case more efficiently than a constant pool load, lower it to the 2309// sequence of ops that should be used. 2310static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2311 // If this is a vector of constants or undefs, get the bits. A bit in 2312 // UndefBits is set if the corresponding element of the vector is an 2313 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2314 // zero. 2315 uint64_t VectorBits[2]; 2316 uint64_t UndefBits[2]; 2317 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) 2318 return SDOperand(); // Not a constant vector. 2319 2320 // If this is a splat (repetition) of a value across the whole vector, return 2321 // the smallest size that splats it. For example, "0x01010101010101..." is a 2322 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2323 // SplatSize = 1 byte. 2324 unsigned SplatBits, SplatUndef, SplatSize; 2325 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 2326 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 2327 2328 // First, handle single instruction cases. 2329 2330 // All zeros? 2331 if (SplatBits == 0) { 2332 // Canonicalize all zero vectors to be v4i32. 2333 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 2334 SDOperand Z = DAG.getConstant(0, MVT::i32); 2335 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 2336 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 2337 } 2338 return Op; 2339 } 2340 2341 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 2342 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 2343 if (SextVal >= -16 && SextVal <= 15) 2344 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 2345 2346 2347 // Two instruction sequences. 2348 2349 // If this value is in the range [-32,30] and is even, use: 2350 // tmp = VSPLTI[bhw], result = add tmp, tmp 2351 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 2352 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); 2353 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); 2354 } 2355 2356 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 2357 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 2358 // for fneg/fabs. 2359 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 2360 // Make -1 and vspltisw -1: 2361 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 2362 2363 // Make the VSLW intrinsic, computing 0x8000_0000. 2364 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 2365 OnesV, DAG); 2366 2367 // xor by OnesV to invert it. 2368 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 2369 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2370 } 2371 2372 // Check to see if this is a wide variety of vsplti*, binop self cases. 2373 unsigned SplatBitSize = SplatSize*8; 2374 static const signed char SplatCsts[] = { 2375 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 2376 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 2377 }; 2378 2379 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){ 2380 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 2381 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 2382 int i = SplatCsts[idx]; 2383 2384 // Figure out what shift amount will be used by altivec if shifted by i in 2385 // this splat size. 2386 unsigned TypeShiftAmt = i & (SplatBitSize-1); 2387 2388 // vsplti + shl self. 2389 if (SextVal == (i << (int)TypeShiftAmt)) { 2390 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2391 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2392 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 2393 Intrinsic::ppc_altivec_vslw 2394 }; 2395 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2396 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2397 } 2398 2399 // vsplti + srl self. 2400 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2401 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2402 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2403 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 2404 Intrinsic::ppc_altivec_vsrw 2405 }; 2406 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2407 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2408 } 2409 2410 // vsplti + sra self. 2411 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2412 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2413 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2414 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 2415 Intrinsic::ppc_altivec_vsraw 2416 }; 2417 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2418 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2419 } 2420 2421 // vsplti + rol self. 2422 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 2423 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 2424 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2425 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2426 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 2427 Intrinsic::ppc_altivec_vrlw 2428 }; 2429 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2430 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2431 } 2432 2433 // t = vsplti c, result = vsldoi t, t, 1 2434 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 2435 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2436 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 2437 } 2438 // t = vsplti c, result = vsldoi t, t, 2 2439 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 2440 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2441 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 2442 } 2443 // t = vsplti c, result = vsldoi t, t, 3 2444 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 2445 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2446 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 2447 } 2448 } 2449 2450 // Three instruction sequences. 2451 2452 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 2453 if (SextVal >= 0 && SextVal <= 31) { 2454 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); 2455 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 2456 LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS); 2457 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 2458 } 2459 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 2460 if (SextVal >= -31 && SextVal <= 0) { 2461 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); 2462 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 2463 LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS); 2464 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 2465 } 2466 } 2467 2468 return SDOperand(); 2469} 2470 2471/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 2472/// the specified operations to build the shuffle. 2473static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, 2474 SDOperand RHS, SelectionDAG &DAG) { 2475 unsigned OpNum = (PFEntry >> 26) & 0x0F; 2476 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 2477 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 2478 2479 enum { 2480 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 2481 OP_VMRGHW, 2482 OP_VMRGLW, 2483 OP_VSPLTISW0, 2484 OP_VSPLTISW1, 2485 OP_VSPLTISW2, 2486 OP_VSPLTISW3, 2487 OP_VSLDOI4, 2488 OP_VSLDOI8, 2489 OP_VSLDOI12 2490 }; 2491 2492 if (OpNum == OP_COPY) { 2493 if (LHSID == (1*9+2)*9+3) return LHS; 2494 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 2495 return RHS; 2496 } 2497 2498 SDOperand OpLHS, OpRHS; 2499 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 2500 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 2501 2502 unsigned ShufIdxs[16]; 2503 switch (OpNum) { 2504 default: assert(0 && "Unknown i32 permute!"); 2505 case OP_VMRGHW: 2506 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 2507 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 2508 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 2509 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 2510 break; 2511 case OP_VMRGLW: 2512 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 2513 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 2514 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 2515 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 2516 break; 2517 case OP_VSPLTISW0: 2518 for (unsigned i = 0; i != 16; ++i) 2519 ShufIdxs[i] = (i&3)+0; 2520 break; 2521 case OP_VSPLTISW1: 2522 for (unsigned i = 0; i != 16; ++i) 2523 ShufIdxs[i] = (i&3)+4; 2524 break; 2525 case OP_VSPLTISW2: 2526 for (unsigned i = 0; i != 16; ++i) 2527 ShufIdxs[i] = (i&3)+8; 2528 break; 2529 case OP_VSPLTISW3: 2530 for (unsigned i = 0; i != 16; ++i) 2531 ShufIdxs[i] = (i&3)+12; 2532 break; 2533 case OP_VSLDOI4: 2534 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 2535 case OP_VSLDOI8: 2536 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 2537 case OP_VSLDOI12: 2538 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 2539 } 2540 SDOperand Ops[16]; 2541 for (unsigned i = 0; i != 16; ++i) 2542 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32); 2543 2544 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 2545 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 2546} 2547 2548/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 2549/// is a shuffle we can handle in a single instruction, return it. Otherwise, 2550/// return the code it can be lowered into. Worst case, it can always be 2551/// lowered into a vperm. 2552static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 2553 SDOperand V1 = Op.getOperand(0); 2554 SDOperand V2 = Op.getOperand(1); 2555 SDOperand PermMask = Op.getOperand(2); 2556 2557 // Cases that are handled by instructions that take permute immediates 2558 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 2559 // selected by the instruction selector. 2560 if (V2.getOpcode() == ISD::UNDEF) { 2561 if (PPC::isSplatShuffleMask(PermMask.Val, 1) || 2562 PPC::isSplatShuffleMask(PermMask.Val, 2) || 2563 PPC::isSplatShuffleMask(PermMask.Val, 4) || 2564 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || 2565 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || 2566 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || 2567 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || 2568 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || 2569 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || 2570 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || 2571 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || 2572 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { 2573 return Op; 2574 } 2575 } 2576 2577 // Altivec has a variety of "shuffle immediates" that take two vector inputs 2578 // and produce a fixed permutation. If any of these match, do not lower to 2579 // VPERM. 2580 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || 2581 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || 2582 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || 2583 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || 2584 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || 2585 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || 2586 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || 2587 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || 2588 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) 2589 return Op; 2590 2591 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 2592 // perfect shuffle table to emit an optimal matching sequence. 2593 unsigned PFIndexes[4]; 2594 bool isFourElementShuffle = true; 2595 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 2596 unsigned EltNo = 8; // Start out undef. 2597 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 2598 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 2599 continue; // Undef, ignore it. 2600 2601 unsigned ByteSource = 2602 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 2603 if ((ByteSource & 3) != j) { 2604 isFourElementShuffle = false; 2605 break; 2606 } 2607 2608 if (EltNo == 8) { 2609 EltNo = ByteSource/4; 2610 } else if (EltNo != ByteSource/4) { 2611 isFourElementShuffle = false; 2612 break; 2613 } 2614 } 2615 PFIndexes[i] = EltNo; 2616 } 2617 2618 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 2619 // perfect shuffle vector to determine if it is cost effective to do this as 2620 // discrete instructions, or whether we should use a vperm. 2621 if (isFourElementShuffle) { 2622 // Compute the index in the perfect shuffle table. 2623 unsigned PFTableIndex = 2624 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 2625 2626 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 2627 unsigned Cost = (PFEntry >> 30); 2628 2629 // Determining when to avoid vperm is tricky. Many things affect the cost 2630 // of vperm, particularly how many times the perm mask needs to be computed. 2631 // For example, if the perm mask can be hoisted out of a loop or is already 2632 // used (perhaps because there are multiple permutes with the same shuffle 2633 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 2634 // the loop requires an extra register. 2635 // 2636 // As a compromise, we only emit discrete instructions if the shuffle can be 2637 // generated in 3 or fewer operations. When we have loop information 2638 // available, if this block is within a loop, we should avoid using vperm 2639 // for 3-operation perms and use a constant pool load instead. 2640 if (Cost < 3) 2641 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 2642 } 2643 2644 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 2645 // vector that will get spilled to the constant pool. 2646 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 2647 2648 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 2649 // that it is in input element units, not in bytes. Convert now. 2650 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); 2651 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 2652 2653 SmallVector<SDOperand, 16> ResultMask; 2654 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 2655 unsigned SrcElt; 2656 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 2657 SrcElt = 0; 2658 else 2659 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 2660 2661 for (unsigned j = 0; j != BytesPerElement; ++j) 2662 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 2663 MVT::i8)); 2664 } 2665 2666 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, 2667 &ResultMask[0], ResultMask.size()); 2668 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 2669} 2670 2671/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 2672/// altivec comparison. If it is, return true and fill in Opc/isDot with 2673/// information about the intrinsic. 2674static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, 2675 bool &isDot) { 2676 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 2677 CompareOpc = -1; 2678 isDot = false; 2679 switch (IntrinsicID) { 2680 default: return false; 2681 // Comparison predicates. 2682 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 2683 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 2684 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 2685 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 2686 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 2687 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 2688 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 2689 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 2690 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 2691 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 2692 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 2693 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 2694 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 2695 2696 // Normal Comparisons. 2697 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 2698 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 2699 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 2700 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 2701 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 2702 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 2703 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 2704 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 2705 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 2706 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 2707 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 2708 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 2709 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 2710 } 2711 return true; 2712} 2713 2714/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 2715/// lower, do it, otherwise return null. 2716static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 2717 // If this is a lowered altivec predicate compare, CompareOpc is set to the 2718 // opcode number of the comparison. 2719 int CompareOpc; 2720 bool isDot; 2721 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 2722 return SDOperand(); // Don't custom lower most intrinsics. 2723 2724 // If this is a non-dot comparison, make the VCMP node and we are done. 2725 if (!isDot) { 2726 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 2727 Op.getOperand(1), Op.getOperand(2), 2728 DAG.getConstant(CompareOpc, MVT::i32)); 2729 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 2730 } 2731 2732 // Create the PPCISD altivec 'dot' comparison node. 2733 SDOperand Ops[] = { 2734 Op.getOperand(2), // LHS 2735 Op.getOperand(3), // RHS 2736 DAG.getConstant(CompareOpc, MVT::i32) 2737 }; 2738 std::vector<MVT::ValueType> VTs; 2739 VTs.push_back(Op.getOperand(2).getValueType()); 2740 VTs.push_back(MVT::Flag); 2741 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 2742 2743 // Now that we have the comparison, emit a copy from the CR to a GPR. 2744 // This is flagged to the above dot comparison. 2745 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 2746 DAG.getRegister(PPC::CR6, MVT::i32), 2747 CompNode.getValue(1)); 2748 2749 // Unpack the result based on how the target uses it. 2750 unsigned BitNo; // Bit # of CR6. 2751 bool InvertBit; // Invert result? 2752 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 2753 default: // Can't happen, don't crash on invalid number though. 2754 case 0: // Return the value of the EQ bit of CR6. 2755 BitNo = 0; InvertBit = false; 2756 break; 2757 case 1: // Return the inverted value of the EQ bit of CR6. 2758 BitNo = 0; InvertBit = true; 2759 break; 2760 case 2: // Return the value of the LT bit of CR6. 2761 BitNo = 2; InvertBit = false; 2762 break; 2763 case 3: // Return the inverted value of the LT bit of CR6. 2764 BitNo = 2; InvertBit = true; 2765 break; 2766 } 2767 2768 // Shift the bit into the low position. 2769 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 2770 DAG.getConstant(8-(3-BitNo), MVT::i32)); 2771 // Isolate the bit. 2772 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 2773 DAG.getConstant(1, MVT::i32)); 2774 2775 // If we are supposed to, toggle the bit. 2776 if (InvertBit) 2777 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 2778 DAG.getConstant(1, MVT::i32)); 2779 return Flags; 2780} 2781 2782static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2783 // Create a stack slot that is 16-byte aligned. 2784 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2785 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 2786 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2787 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2788 2789 // Store the input value into Value#0 of the stack slot. 2790 SDOperand Store = DAG.getStore(DAG.getEntryNode(), 2791 Op.getOperand(0), FIdx, NULL, 0); 2792 // Load it out. 2793 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); 2794} 2795 2796static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) { 2797 if (Op.getValueType() == MVT::v4i32) { 2798 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2799 2800 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 2801 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 2802 2803 SDOperand RHSSwap = // = vrlw RHS, 16 2804 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 2805 2806 // Shrinkify inputs to v8i16. 2807 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 2808 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 2809 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 2810 2811 // Low parts multiplied together, generating 32-bit results (we ignore the 2812 // top parts). 2813 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 2814 LHS, RHS, DAG, MVT::v4i32); 2815 2816 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 2817 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 2818 // Shift the high parts up 16 bits. 2819 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 2820 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 2821 } else if (Op.getValueType() == MVT::v8i16) { 2822 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2823 2824 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 2825 2826 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 2827 LHS, RHS, Zero, DAG); 2828 } else if (Op.getValueType() == MVT::v16i8) { 2829 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2830 2831 // Multiply the even 8-bit parts, producing 16-bit sums. 2832 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 2833 LHS, RHS, DAG, MVT::v8i16); 2834 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 2835 2836 // Multiply the odd 8-bit parts, producing 16-bit sums. 2837 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 2838 LHS, RHS, DAG, MVT::v8i16); 2839 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 2840 2841 // Merge the results together. 2842 SDOperand Ops[16]; 2843 for (unsigned i = 0; i != 8; ++i) { 2844 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); 2845 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); 2846 } 2847 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 2848 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 2849 } else { 2850 assert(0 && "Unknown mul to lower!"); 2851 abort(); 2852 } 2853} 2854 2855/// LowerOperation - Provide custom lowering hooks for some operations. 2856/// 2857SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2858 switch (Op.getOpcode()) { 2859 default: assert(0 && "Wasn't expecting to be able to lower this!"); 2860 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2861 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2862 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2863 case ISD::SETCC: return LowerSETCC(Op, DAG); 2864 case ISD::VASTART: 2865 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 2866 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 2867 2868 case ISD::VAARG: 2869 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 2870 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 2871 2872 case ISD::FORMAL_ARGUMENTS: 2873 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, 2874 VarArgsStackOffset, VarArgsNumGPR, 2875 VarArgsNumFPR, PPCSubTarget); 2876 2877 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget); 2878 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine()); 2879 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 2880 case ISD::DYNAMIC_STACKALLOC: 2881 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 2882 2883 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2884 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 2885 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 2886 2887 // Lower 64-bit shifts. 2888 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 2889 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 2890 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 2891 2892 // Vector-related lowering. 2893 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 2894 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2895 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2896 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 2897 case ISD::MUL: return LowerMUL(Op, DAG); 2898 2899 // Frame & Return address. Currently unimplemented 2900 case ISD::RETURNADDR: break; 2901 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 2902 } 2903 return SDOperand(); 2904} 2905 2906//===----------------------------------------------------------------------===// 2907// Other Lowering Code 2908//===----------------------------------------------------------------------===// 2909 2910MachineBasicBlock * 2911PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2912 MachineBasicBlock *BB) { 2913 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2914 assert((MI->getOpcode() == PPC::SELECT_CC_I4 || 2915 MI->getOpcode() == PPC::SELECT_CC_I8 || 2916 MI->getOpcode() == PPC::SELECT_CC_F4 || 2917 MI->getOpcode() == PPC::SELECT_CC_F8 || 2918 MI->getOpcode() == PPC::SELECT_CC_VRRC) && 2919 "Unexpected instr type to insert"); 2920 2921 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2922 // control-flow pattern. The incoming instruction knows the destination vreg 2923 // to set, the condition code register to branch on, the true/false values to 2924 // select between, and a branch opcode to use. 2925 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2926 ilist<MachineBasicBlock>::iterator It = BB; 2927 ++It; 2928 2929 // thisMBB: 2930 // ... 2931 // TrueVal = ... 2932 // cmpTY ccX, r1, r2 2933 // bCC copy1MBB 2934 // fallthrough --> copy0MBB 2935 MachineBasicBlock *thisMBB = BB; 2936 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 2937 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 2938 unsigned SelectPred = MI->getOperand(4).getImm(); 2939 BuildMI(BB, TII->get(PPC::BCC)) 2940 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 2941 MachineFunction *F = BB->getParent(); 2942 F->getBasicBlockList().insert(It, copy0MBB); 2943 F->getBasicBlockList().insert(It, sinkMBB); 2944 // Update machine-CFG edges by first adding all successors of the current 2945 // block to the new block which will contain the Phi node for the select. 2946 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 2947 e = BB->succ_end(); i != e; ++i) 2948 sinkMBB->addSuccessor(*i); 2949 // Next, remove all successors of the current block, and add the true 2950 // and fallthrough blocks as its successors. 2951 while(!BB->succ_empty()) 2952 BB->removeSuccessor(BB->succ_begin()); 2953 BB->addSuccessor(copy0MBB); 2954 BB->addSuccessor(sinkMBB); 2955 2956 // copy0MBB: 2957 // %FalseValue = ... 2958 // # fallthrough to sinkMBB 2959 BB = copy0MBB; 2960 2961 // Update machine-CFG edges 2962 BB->addSuccessor(sinkMBB); 2963 2964 // sinkMBB: 2965 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 2966 // ... 2967 BB = sinkMBB; 2968 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 2969 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 2970 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 2971 2972 delete MI; // The pseudo instruction is gone now. 2973 return BB; 2974} 2975 2976//===----------------------------------------------------------------------===// 2977// Target Optimization Hooks 2978//===----------------------------------------------------------------------===// 2979 2980SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 2981 DAGCombinerInfo &DCI) const { 2982 TargetMachine &TM = getTargetMachine(); 2983 SelectionDAG &DAG = DCI.DAG; 2984 switch (N->getOpcode()) { 2985 default: break; 2986 case PPCISD::SHL: 2987 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2988 if (C->getValue() == 0) // 0 << V -> 0. 2989 return N->getOperand(0); 2990 } 2991 break; 2992 case PPCISD::SRL: 2993 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2994 if (C->getValue() == 0) // 0 >>u V -> 0. 2995 return N->getOperand(0); 2996 } 2997 break; 2998 case PPCISD::SRA: 2999 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3000 if (C->getValue() == 0 || // 0 >>s V -> 0. 3001 C->isAllOnesValue()) // -1 >>s V -> -1. 3002 return N->getOperand(0); 3003 } 3004 break; 3005 3006 case ISD::SINT_TO_FP: 3007 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 3008 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 3009 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 3010 // We allow the src/dst to be either f32/f64, but the intermediate 3011 // type must be i64. 3012 if (N->getOperand(0).getValueType() == MVT::i64) { 3013 SDOperand Val = N->getOperand(0).getOperand(0); 3014 if (Val.getValueType() == MVT::f32) { 3015 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 3016 DCI.AddToWorklist(Val.Val); 3017 } 3018 3019 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 3020 DCI.AddToWorklist(Val.Val); 3021 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 3022 DCI.AddToWorklist(Val.Val); 3023 if (N->getValueType(0) == MVT::f32) { 3024 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); 3025 DCI.AddToWorklist(Val.Val); 3026 } 3027 return Val; 3028 } else if (N->getOperand(0).getValueType() == MVT::i32) { 3029 // If the intermediate type is i32, we can avoid the load/store here 3030 // too. 3031 } 3032 } 3033 } 3034 break; 3035 case ISD::STORE: 3036 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 3037 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 3038 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 3039 N->getOperand(1).getValueType() == MVT::i32) { 3040 SDOperand Val = N->getOperand(1).getOperand(0); 3041 if (Val.getValueType() == MVT::f32) { 3042 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 3043 DCI.AddToWorklist(Val.Val); 3044 } 3045 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 3046 DCI.AddToWorklist(Val.Val); 3047 3048 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 3049 N->getOperand(2), N->getOperand(3)); 3050 DCI.AddToWorklist(Val.Val); 3051 return Val; 3052 } 3053 3054 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 3055 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 3056 N->getOperand(1).Val->hasOneUse() && 3057 (N->getOperand(1).getValueType() == MVT::i32 || 3058 N->getOperand(1).getValueType() == MVT::i16)) { 3059 SDOperand BSwapOp = N->getOperand(1).getOperand(0); 3060 // Do an any-extend to 32-bits if this is a half-word input. 3061 if (BSwapOp.getValueType() == MVT::i16) 3062 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 3063 3064 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 3065 N->getOperand(2), N->getOperand(3), 3066 DAG.getValueType(N->getOperand(1).getValueType())); 3067 } 3068 break; 3069 case ISD::BSWAP: 3070 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 3071 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) && 3072 N->getOperand(0).hasOneUse() && 3073 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 3074 SDOperand Load = N->getOperand(0); 3075 LoadSDNode *LD = cast<LoadSDNode>(Load); 3076 // Create the byte-swapping load. 3077 std::vector<MVT::ValueType> VTs; 3078 VTs.push_back(MVT::i32); 3079 VTs.push_back(MVT::Other); 3080 SDOperand SV = DAG.getSrcValue(LD->getSrcValue(), LD->getSrcValueOffset()); 3081 SDOperand Ops[] = { 3082 LD->getChain(), // Chain 3083 LD->getBasePtr(), // Ptr 3084 SV, // SrcValue 3085 DAG.getValueType(N->getValueType(0)) // VT 3086 }; 3087 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); 3088 3089 // If this is an i16 load, insert the truncate. 3090 SDOperand ResVal = BSLoad; 3091 if (N->getValueType(0) == MVT::i16) 3092 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 3093 3094 // First, combine the bswap away. This makes the value produced by the 3095 // load dead. 3096 DCI.CombineTo(N, ResVal); 3097 3098 // Next, combine the load away, we give it a bogus result value but a real 3099 // chain result. The result value is dead because the bswap is dead. 3100 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); 3101 3102 // Return N so it doesn't get rechecked! 3103 return SDOperand(N, 0); 3104 } 3105 3106 break; 3107 case PPCISD::VCMP: { 3108 // If a VCMPo node already exists with exactly the same operands as this 3109 // node, use its result instead of this node (VCMPo computes both a CR6 and 3110 // a normal output). 3111 // 3112 if (!N->getOperand(0).hasOneUse() && 3113 !N->getOperand(1).hasOneUse() && 3114 !N->getOperand(2).hasOneUse()) { 3115 3116 // Scan all of the users of the LHS, looking for VCMPo's that match. 3117 SDNode *VCMPoNode = 0; 3118 3119 SDNode *LHSN = N->getOperand(0).Val; 3120 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 3121 UI != E; ++UI) 3122 if ((*UI)->getOpcode() == PPCISD::VCMPo && 3123 (*UI)->getOperand(1) == N->getOperand(1) && 3124 (*UI)->getOperand(2) == N->getOperand(2) && 3125 (*UI)->getOperand(0) == N->getOperand(0)) { 3126 VCMPoNode = *UI; 3127 break; 3128 } 3129 3130 // If there is no VCMPo node, or if the flag value has a single use, don't 3131 // transform this. 3132 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 3133 break; 3134 3135 // Look at the (necessarily single) use of the flag value. If it has a 3136 // chain, this transformation is more complex. Note that multiple things 3137 // could use the value result, which we should ignore. 3138 SDNode *FlagUser = 0; 3139 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 3140 FlagUser == 0; ++UI) { 3141 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 3142 SDNode *User = *UI; 3143 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 3144 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { 3145 FlagUser = User; 3146 break; 3147 } 3148 } 3149 } 3150 3151 // If the user is a MFCR instruction, we know this is safe. Otherwise we 3152 // give up for right now. 3153 if (FlagUser->getOpcode() == PPCISD::MFCR) 3154 return SDOperand(VCMPoNode, 0); 3155 } 3156 break; 3157 } 3158 case ISD::BR_CC: { 3159 // If this is a branch on an altivec predicate comparison, lower this so 3160 // that we don't have to do a MFCR: instead, branch directly on CR6. This 3161 // lowering is done pre-legalize, because the legalizer lowers the predicate 3162 // compare down to code that is difficult to reassemble. 3163 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 3164 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); 3165 int CompareOpc; 3166 bool isDot; 3167 3168 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 3169 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 3170 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 3171 assert(isDot && "Can't compare against a vector result!"); 3172 3173 // If this is a comparison against something other than 0/1, then we know 3174 // that the condition is never/always true. 3175 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 3176 if (Val != 0 && Val != 1) { 3177 if (CC == ISD::SETEQ) // Cond never true, remove branch. 3178 return N->getOperand(0); 3179 // Always !=, turn it into an unconditional branch. 3180 return DAG.getNode(ISD::BR, MVT::Other, 3181 N->getOperand(0), N->getOperand(4)); 3182 } 3183 3184 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 3185 3186 // Create the PPCISD altivec 'dot' comparison node. 3187 std::vector<MVT::ValueType> VTs; 3188 SDOperand Ops[] = { 3189 LHS.getOperand(2), // LHS of compare 3190 LHS.getOperand(3), // RHS of compare 3191 DAG.getConstant(CompareOpc, MVT::i32) 3192 }; 3193 VTs.push_back(LHS.getOperand(2).getValueType()); 3194 VTs.push_back(MVT::Flag); 3195 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 3196 3197 // Unpack the result based on how the target uses it. 3198 PPC::Predicate CompOpc; 3199 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 3200 default: // Can't happen, don't crash on invalid number though. 3201 case 0: // Branch on the value of the EQ bit of CR6. 3202 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 3203 break; 3204 case 1: // Branch on the inverted value of the EQ bit of CR6. 3205 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 3206 break; 3207 case 2: // Branch on the value of the LT bit of CR6. 3208 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 3209 break; 3210 case 3: // Branch on the inverted value of the LT bit of CR6. 3211 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 3212 break; 3213 } 3214 3215 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 3216 DAG.getConstant(CompOpc, MVT::i32), 3217 DAG.getRegister(PPC::CR6, MVT::i32), 3218 N->getOperand(4), CompNode.getValue(1)); 3219 } 3220 break; 3221 } 3222 } 3223 3224 return SDOperand(); 3225} 3226 3227//===----------------------------------------------------------------------===// 3228// Inline Assembly Support 3229//===----------------------------------------------------------------------===// 3230 3231void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 3232 uint64_t Mask, 3233 uint64_t &KnownZero, 3234 uint64_t &KnownOne, 3235 unsigned Depth) const { 3236 KnownZero = 0; 3237 KnownOne = 0; 3238 switch (Op.getOpcode()) { 3239 default: break; 3240 case PPCISD::LBRX: { 3241 // lhbrx is known to have the top bits cleared out. 3242 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 3243 KnownZero = 0xFFFF0000; 3244 break; 3245 } 3246 case ISD::INTRINSIC_WO_CHAIN: { 3247 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 3248 default: break; 3249 case Intrinsic::ppc_altivec_vcmpbfp_p: 3250 case Intrinsic::ppc_altivec_vcmpeqfp_p: 3251 case Intrinsic::ppc_altivec_vcmpequb_p: 3252 case Intrinsic::ppc_altivec_vcmpequh_p: 3253 case Intrinsic::ppc_altivec_vcmpequw_p: 3254 case Intrinsic::ppc_altivec_vcmpgefp_p: 3255 case Intrinsic::ppc_altivec_vcmpgtfp_p: 3256 case Intrinsic::ppc_altivec_vcmpgtsb_p: 3257 case Intrinsic::ppc_altivec_vcmpgtsh_p: 3258 case Intrinsic::ppc_altivec_vcmpgtsw_p: 3259 case Intrinsic::ppc_altivec_vcmpgtub_p: 3260 case Intrinsic::ppc_altivec_vcmpgtuh_p: 3261 case Intrinsic::ppc_altivec_vcmpgtuw_p: 3262 KnownZero = ~1U; // All bits but the low one are known to be zero. 3263 break; 3264 } 3265 } 3266 } 3267} 3268 3269 3270/// getConstraintType - Given a constraint, return the type of 3271/// constraint it is for this target. 3272PPCTargetLowering::ConstraintType 3273PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 3274 if (Constraint.size() == 1) { 3275 switch (Constraint[0]) { 3276 default: break; 3277 case 'b': 3278 case 'r': 3279 case 'f': 3280 case 'v': 3281 case 'y': 3282 return C_RegisterClass; 3283 } 3284 } 3285 return TargetLowering::getConstraintType(Constraint); 3286} 3287 3288std::pair<unsigned, const TargetRegisterClass*> 3289PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3290 MVT::ValueType VT) const { 3291 if (Constraint.size() == 1) { 3292 // GCC RS6000 Constraint Letters 3293 switch (Constraint[0]) { 3294 case 'b': // R1-R31 3295 case 'r': // R0-R31 3296 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 3297 return std::make_pair(0U, PPC::G8RCRegisterClass); 3298 return std::make_pair(0U, PPC::GPRCRegisterClass); 3299 case 'f': 3300 if (VT == MVT::f32) 3301 return std::make_pair(0U, PPC::F4RCRegisterClass); 3302 else if (VT == MVT::f64) 3303 return std::make_pair(0U, PPC::F8RCRegisterClass); 3304 break; 3305 case 'v': 3306 return std::make_pair(0U, PPC::VRRCRegisterClass); 3307 case 'y': // crrc 3308 return std::make_pair(0U, PPC::CRRCRegisterClass); 3309 } 3310 } 3311 3312 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3313} 3314 3315 3316// isOperandValidForConstraint 3317SDOperand PPCTargetLowering:: 3318isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) { 3319 switch (Letter) { 3320 default: break; 3321 case 'I': 3322 case 'J': 3323 case 'K': 3324 case 'L': 3325 case 'M': 3326 case 'N': 3327 case 'O': 3328 case 'P': { 3329 if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate. 3330 unsigned Value = cast<ConstantSDNode>(Op)->getValue(); 3331 switch (Letter) { 3332 default: assert(0 && "Unknown constraint letter!"); 3333 case 'I': // "I" is a signed 16-bit constant. 3334 if ((short)Value == (int)Value) return Op; 3335 break; 3336 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 3337 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 3338 if ((short)Value == 0) return Op; 3339 break; 3340 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 3341 if ((Value >> 16) == 0) return Op; 3342 break; 3343 case 'M': // "M" is a constant that is greater than 31. 3344 if (Value > 31) return Op; 3345 break; 3346 case 'N': // "N" is a positive constant that is an exact power of two. 3347 if ((int)Value > 0 && isPowerOf2_32(Value)) return Op; 3348 break; 3349 case 'O': // "O" is the constant zero. 3350 if (Value == 0) return Op; 3351 break; 3352 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 3353 if ((short)-Value == (int)-Value) return Op; 3354 break; 3355 } 3356 break; 3357 } 3358 } 3359 3360 // Handle standard constraint letters. 3361 return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG); 3362} 3363 3364// isLegalAddressingMode - Return true if the addressing mode represented 3365// by AM is legal for this target, for a load/store of the specified type. 3366bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 3367 const Type *Ty) const { 3368 // FIXME: PPC does not allow r+i addressing modes for vectors! 3369 3370 // PPC allows a sign-extended 16-bit immediate field. 3371 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 3372 return false; 3373 3374 // No global is ever allowed as a base. 3375 if (AM.BaseGV) 3376 return false; 3377 3378 // PPC only support r+r, 3379 switch (AM.Scale) { 3380 case 0: // "r+i" or just "i", depending on HasBaseReg. 3381 break; 3382 case 1: 3383 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 3384 return false; 3385 // Otherwise we have r+r or r+i. 3386 break; 3387 case 2: 3388 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 3389 return false; 3390 // Allow 2*r as r+r. 3391 break; 3392 } 3393 3394 return true; 3395} 3396 3397/// isLegalAddressImmediate - Return true if the integer value can be used 3398/// as the offset of the target addressing mode for load / store of the 3399/// given type. 3400bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{ 3401 // PPC allows a sign-extended 16-bit immediate field. 3402 return (V > -(1 << 16) && V < (1 << 16)-1); 3403} 3404 3405bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 3406 return false; 3407} 3408 3409SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) 3410{ 3411 // Depths > 0 not supported yet! 3412 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 3413 return SDOperand(); 3414 3415 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3416 bool isPPC64 = PtrVT == MVT::i64; 3417 3418 MachineFunction &MF = DAG.getMachineFunction(); 3419 MachineFrameInfo *MFI = MF.getFrameInfo(); 3420 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 3421 && MFI->getStackSize(); 3422 3423 if (isPPC64) 3424 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1, 3425 MVT::i32); 3426 else 3427 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1, 3428 MVT::i32); 3429} 3430