PPCISelLowering.cpp revision ddb739e5ea6ccf6fa4f4e2a23e3da550868efaa1
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCTargetMachine.h" 16#include "llvm/ADT/VectorExtras.h" 17#include "llvm/Analysis/ScalarEvolutionExpressions.h" 18#include "llvm/CodeGen/MachineFrameInfo.h" 19#include "llvm/CodeGen/MachineFunction.h" 20#include "llvm/CodeGen/MachineInstrBuilder.h" 21#include "llvm/CodeGen/SelectionDAG.h" 22#include "llvm/CodeGen/SSARegMap.h" 23#include "llvm/Constants.h" 24#include "llvm/Function.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/Support/MathExtras.h" 27#include "llvm/Target/TargetOptions.h" 28using namespace llvm; 29 30PPCTargetLowering::PPCTargetLowering(TargetMachine &TM) 31 : TargetLowering(TM) { 32 33 // Fold away setcc operations if possible. 34 setSetCCIsExpensive(); 35 setPow2DivIsCheap(); 36 37 // Use _setjmp/_longjmp instead of setjmp/longjmp. 38 setUseUnderscoreSetJmpLongJmp(true); 39 40 // Set up the register classes. 41 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 42 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 43 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 44 45 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 46 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 47 48 // PowerPC has no intrinsics for these particular operations 49 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); 50 setOperationAction(ISD::MEMSET, MVT::Other, Expand); 51 setOperationAction(ISD::MEMCPY, MVT::Other, Expand); 52 53 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 54 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand); 55 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand); 56 57 // PowerPC has no SREM/UREM instructions 58 setOperationAction(ISD::SREM, MVT::i32, Expand); 59 setOperationAction(ISD::UREM, MVT::i32, Expand); 60 61 // We don't support sin/cos/sqrt/fmod 62 setOperationAction(ISD::FSIN , MVT::f64, Expand); 63 setOperationAction(ISD::FCOS , MVT::f64, Expand); 64 setOperationAction(ISD::FREM , MVT::f64, Expand); 65 setOperationAction(ISD::FSIN , MVT::f32, Expand); 66 setOperationAction(ISD::FCOS , MVT::f32, Expand); 67 setOperationAction(ISD::FREM , MVT::f32, Expand); 68 69 // If we're enabling GP optimizations, use hardware square root 70 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 71 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 72 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 73 } 74 75 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 76 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 77 78 // PowerPC does not have BSWAP, CTPOP or CTTZ 79 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 80 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 81 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 82 83 // PowerPC does not have ROTR 84 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 85 86 // PowerPC does not have Select 87 setOperationAction(ISD::SELECT, MVT::i32, Expand); 88 setOperationAction(ISD::SELECT, MVT::f32, Expand); 89 setOperationAction(ISD::SELECT, MVT::f64, Expand); 90 91 // PowerPC wants to turn select_cc of FP into fsel when possible. 92 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 93 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 94 95 // PowerPC wants to optimize integer setcc a bit 96 setOperationAction(ISD::SETCC, MVT::i32, Custom); 97 98 // PowerPC does not have BRCOND which requires SetCC 99 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 100 101 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 102 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 103 104 // PowerPC does not have [U|S]INT_TO_FP 105 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 106 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 107 108 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 109 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 110 111 // PowerPC does not have truncstore for i1. 112 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote); 113 114 // Support label based line numbers. 115 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 116 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 117 // FIXME - use subtarget debug flags 118 if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) 119 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); 120 121 // We want to legalize GlobalAddress and ConstantPool nodes into the 122 // appropriate instructions to materialize the address. 123 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 124 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 125 126 // RET must be custom lowered, to meet ABI requirements 127 setOperationAction(ISD::RET , MVT::Other, Custom); 128 129 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 130 setOperationAction(ISD::VASTART , MVT::Other, Custom); 131 132 // Use the default implementation. 133 setOperationAction(ISD::VAARG , MVT::Other, Expand); 134 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 135 setOperationAction(ISD::VAEND , MVT::Other, Expand); 136 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 137 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 138 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); 139 140 // We want to custom lower some of our intrinsics. 141 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 142 143 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) { 144 // They also have instructions for converting between i64 and fp. 145 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 146 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 147 148 // FIXME: disable this lowered code. This generates 64-bit register values, 149 // and we don't model the fact that the top part is clobbered by calls. We 150 // need to flag these together so that the value isn't live across a call. 151 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 152 153 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 154 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 155 } else { 156 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 157 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 158 } 159 160 if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) { 161 // 64 bit PowerPC implementations can support i64 types directly 162 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 163 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 164 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 165 } else { 166 // 32 bit PowerPC wants to expand i64 shifts itself. 167 setOperationAction(ISD::SHL, MVT::i64, Custom); 168 setOperationAction(ISD::SRL, MVT::i64, Custom); 169 setOperationAction(ISD::SRA, MVT::i64, Custom); 170 } 171 172 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 173 // First set operation action for all vector types to expand. Then we 174 // will selectively turn on ones that can be effectively codegen'd. 175 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 176 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 177 // add/sub/and/or/xor are legal for all supported vector VT's. 178 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 179 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 180 setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal); 181 setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal); 182 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal); 183 184 // We promote all shuffles to v16i8. 185 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 186 AddPromotedToType(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 187 188 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 189 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 190 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 191 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 192 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 193 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 194 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 195 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 196 197 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 198 } 199 200 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 201 // with merges, splats, etc. 202 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 203 204 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 205 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 206 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 207 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 208 209 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 210 211 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 212 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 213 214 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 215 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 216 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 217 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 218 } 219 220 setSetCCResultContents(ZeroOrOneSetCCResult); 221 setStackPointerRegisterToSaveRestore(PPC::R1); 222 223 // We have target-specific dag combine patterns for the following nodes: 224 setTargetDAGCombine(ISD::SINT_TO_FP); 225 setTargetDAGCombine(ISD::STORE); 226 227 computeRegisterProperties(); 228} 229 230const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 231 switch (Opcode) { 232 default: return 0; 233 case PPCISD::FSEL: return "PPCISD::FSEL"; 234 case PPCISD::FCFID: return "PPCISD::FCFID"; 235 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 236 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 237 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 238 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 239 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 240 case PPCISD::VPERM: return "PPCISD::VPERM"; 241 case PPCISD::Hi: return "PPCISD::Hi"; 242 case PPCISD::Lo: return "PPCISD::Lo"; 243 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 244 case PPCISD::SRL: return "PPCISD::SRL"; 245 case PPCISD::SRA: return "PPCISD::SRA"; 246 case PPCISD::SHL: return "PPCISD::SHL"; 247 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 248 case PPCISD::STD_32: return "PPCISD::STD_32"; 249 case PPCISD::CALL: return "PPCISD::CALL"; 250 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 251 case PPCISD::MFCR: return "PPCISD::MFCR"; 252 case PPCISD::VCMP: return "PPCISD::VCMP"; 253 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 254 } 255} 256 257/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 258static bool isFloatingPointZero(SDOperand Op) { 259 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 260 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 261 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) { 262 // Maybe this has already been legalized into the constant pool? 263 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 264 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get())) 265 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 266 } 267 return false; 268} 269 270/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 271/// true if Op is undef or if it matches the specified value. 272static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 273 return Op.getOpcode() == ISD::UNDEF || 274 cast<ConstantSDNode>(Op)->getValue() == Val; 275} 276 277/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 278/// VPKUHUM instruction. 279bool PPC::isVPKUHUMShuffleMask(SDNode *N) { 280 return isConstantOrUndef(N->getOperand( 0), 1) && 281 isConstantOrUndef(N->getOperand( 1), 3) && 282 isConstantOrUndef(N->getOperand( 2), 5) && 283 isConstantOrUndef(N->getOperand( 3), 7) && 284 isConstantOrUndef(N->getOperand( 4), 9) && 285 isConstantOrUndef(N->getOperand( 5), 11) && 286 isConstantOrUndef(N->getOperand( 6), 13) && 287 isConstantOrUndef(N->getOperand( 7), 15) && 288 isConstantOrUndef(N->getOperand( 8), 17) && 289 isConstantOrUndef(N->getOperand( 9), 19) && 290 isConstantOrUndef(N->getOperand(10), 21) && 291 isConstantOrUndef(N->getOperand(11), 23) && 292 isConstantOrUndef(N->getOperand(12), 25) && 293 isConstantOrUndef(N->getOperand(13), 27) && 294 isConstantOrUndef(N->getOperand(14), 29) && 295 isConstantOrUndef(N->getOperand(15), 31); 296} 297 298/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 299/// VPKUWUM instruction. 300bool PPC::isVPKUWUMShuffleMask(SDNode *N) { 301 return isConstantOrUndef(N->getOperand( 0), 2) && 302 isConstantOrUndef(N->getOperand( 1), 3) && 303 isConstantOrUndef(N->getOperand( 2), 6) && 304 isConstantOrUndef(N->getOperand( 3), 7) && 305 isConstantOrUndef(N->getOperand( 4), 10) && 306 isConstantOrUndef(N->getOperand( 5), 11) && 307 isConstantOrUndef(N->getOperand( 6), 14) && 308 isConstantOrUndef(N->getOperand( 7), 15) && 309 isConstantOrUndef(N->getOperand( 8), 18) && 310 isConstantOrUndef(N->getOperand( 9), 19) && 311 isConstantOrUndef(N->getOperand(10), 22) && 312 isConstantOrUndef(N->getOperand(11), 23) && 313 isConstantOrUndef(N->getOperand(12), 26) && 314 isConstantOrUndef(N->getOperand(13), 27) && 315 isConstantOrUndef(N->getOperand(14), 30) && 316 isConstantOrUndef(N->getOperand(15), 31); 317} 318 319 320 321/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 322/// specifies a splat of a single element that is suitable for input to 323/// VSPLTB/VSPLTH/VSPLTW. 324bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 325 assert(N->getOpcode() == ISD::BUILD_VECTOR && 326 N->getNumOperands() == 16 && 327 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 328 329 // This is a splat operation if each element of the permute is the same, and 330 // if the value doesn't reference the second vector. 331 unsigned ElementBase = 0; 332 SDOperand Elt = N->getOperand(0); 333 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 334 ElementBase = EltV->getValue(); 335 else 336 return false; // FIXME: Handle UNDEF elements too! 337 338 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 339 return false; 340 341 // Check that they are consequtive. 342 for (unsigned i = 1; i != EltSize; ++i) { 343 if (!isa<ConstantSDNode>(N->getOperand(i)) || 344 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 345 return false; 346 } 347 348 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 349 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 350 assert(isa<ConstantSDNode>(N->getOperand(i)) && 351 "Invalid VECTOR_SHUFFLE mask!"); 352 for (unsigned j = 0; j != EltSize; ++j) 353 if (N->getOperand(i+j) != N->getOperand(j)) 354 return false; 355 } 356 357 return true; 358} 359 360/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 361/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 362unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 363 assert(isSplatShuffleMask(N, EltSize)); 364 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 365} 366 367/// isVecSplatImm - Return true if this is a build_vector of constants which 368/// can be formed by using a vspltis[bhw] instruction. The ByteSize field 369/// indicates the number of bytes of each element [124] -> [bhw]. 370bool PPC::isVecSplatImm(SDNode *N, unsigned ByteSize, char *Val) { 371 SDOperand OpVal(0, 0); 372 // Check to see if this buildvec has a single non-undef value in its elements. 373 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 374 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 375 if (OpVal.Val == 0) 376 OpVal = N->getOperand(i); 377 else if (OpVal != N->getOperand(i)) 378 return false; 379 } 380 381 if (OpVal.Val == 0) return false; // All UNDEF: use implicit def. 382 383 unsigned ValSizeInBytes = 0; 384 uint64_t Value = 0; 385 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 386 Value = CN->getValue(); 387 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 388 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 389 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 390 Value = FloatToBits(CN->getValue()); 391 ValSizeInBytes = 4; 392 } 393 394 // If the splat value is larger than the element value, then we can never do 395 // this splat. The only case that we could fit the replicated bits into our 396 // immediate field for would be zero, and we prefer to use vxor for it. 397 if (ValSizeInBytes < ByteSize) return false; 398 399 // If the element value is larger than the splat value, cut it in half and 400 // check to see if the two halves are equal. Continue doing this until we 401 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 402 while (ValSizeInBytes > ByteSize) { 403 ValSizeInBytes >>= 1; 404 405 // If the top half equals the bottom half, we're still ok. 406 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 407 (Value & ((1 << (8*ValSizeInBytes))-1))) 408 return false; 409 } 410 411 // Properly sign extend the value. 412 int ShAmt = (4-ByteSize)*8; 413 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 414 415 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 416 if (MaskVal == 0) return false; 417 418 if (Val) *Val = MaskVal; 419 420 // Finally, if this value fits in a 5 bit sext field, return true. 421 return ((MaskVal << (32-5)) >> (32-5)) == MaskVal; 422} 423 424 425/// LowerOperation - Provide custom lowering hooks for some operations. 426/// 427SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 428 switch (Op.getOpcode()) { 429 default: assert(0 && "Wasn't expecting to be able to lower this!"); 430 case ISD::FP_TO_SINT: { 431 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 432 SDOperand Src = Op.getOperand(0); 433 if (Src.getValueType() == MVT::f32) 434 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 435 436 SDOperand Tmp; 437 switch (Op.getValueType()) { 438 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 439 case MVT::i32: 440 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 441 break; 442 case MVT::i64: 443 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 444 break; 445 } 446 447 // Convert the FP value to an int value through memory. 448 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); 449 if (Op.getValueType() == MVT::i32) 450 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); 451 return Bits; 452 } 453 case ISD::SINT_TO_FP: 454 if (Op.getOperand(0).getValueType() == MVT::i64) { 455 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 456 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 457 if (Op.getValueType() == MVT::f32) 458 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 459 return FP; 460 } else { 461 assert(Op.getOperand(0).getValueType() == MVT::i32 && 462 "Unhandled SINT_TO_FP type in custom expander!"); 463 // Since we only generate this in 64-bit mode, we can take advantage of 464 // 64-bit registers. In particular, sign extend the input value into the 465 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 466 // then lfd it and fcfid it. 467 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 468 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 469 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32); 470 471 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 472 Op.getOperand(0)); 473 474 // STD the extended value into the stack slot. 475 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 476 DAG.getEntryNode(), Ext64, FIdx, 477 DAG.getSrcValue(NULL)); 478 // Load the value as a double. 479 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL)); 480 481 // FCFID it and return it. 482 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 483 if (Op.getValueType() == MVT::f32) 484 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 485 return FP; 486 } 487 break; 488 489 case ISD::SELECT_CC: { 490 // Turn FP only select_cc's into fsel instructions. 491 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 492 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 493 break; 494 495 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 496 497 // Cannot handle SETEQ/SETNE. 498 if (CC == ISD::SETEQ || CC == ISD::SETNE) break; 499 500 MVT::ValueType ResVT = Op.getValueType(); 501 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 502 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 503 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 504 505 // If the RHS of the comparison is a 0.0, we don't need to do the 506 // subtraction at all. 507 if (isFloatingPointZero(RHS)) 508 switch (CC) { 509 default: break; // SETUO etc aren't handled by fsel. 510 case ISD::SETULT: 511 case ISD::SETLT: 512 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 513 case ISD::SETUGE: 514 case ISD::SETGE: 515 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 516 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 517 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 518 case ISD::SETUGT: 519 case ISD::SETGT: 520 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 521 case ISD::SETULE: 522 case ISD::SETLE: 523 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 524 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 525 return DAG.getNode(PPCISD::FSEL, ResVT, 526 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 527 } 528 529 SDOperand Cmp; 530 switch (CC) { 531 default: break; // SETUO etc aren't handled by fsel. 532 case ISD::SETULT: 533 case ISD::SETLT: 534 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 535 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 536 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 537 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 538 case ISD::SETUGE: 539 case ISD::SETGE: 540 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 541 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 542 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 543 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 544 case ISD::SETUGT: 545 case ISD::SETGT: 546 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 547 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 548 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 549 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 550 case ISD::SETULE: 551 case ISD::SETLE: 552 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 553 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 554 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 555 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 556 } 557 break; 558 } 559 case ISD::SHL: { 560 assert(Op.getValueType() == MVT::i64 && 561 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 562 // The generic code does a fine job expanding shift by a constant. 563 if (isa<ConstantSDNode>(Op.getOperand(1))) break; 564 565 // Otherwise, expand into a bunch of logical ops. Note that these ops 566 // depend on the PPC behavior for oversized shift amounts. 567 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 568 DAG.getConstant(0, MVT::i32)); 569 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 570 DAG.getConstant(1, MVT::i32)); 571 SDOperand Amt = Op.getOperand(1); 572 573 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 574 DAG.getConstant(32, MVT::i32), Amt); 575 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); 576 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); 577 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 578 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 579 DAG.getConstant(-32U, MVT::i32)); 580 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); 581 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 582 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); 583 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); 584 } 585 case ISD::SRL: { 586 assert(Op.getValueType() == MVT::i64 && 587 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 588 // The generic code does a fine job expanding shift by a constant. 589 if (isa<ConstantSDNode>(Op.getOperand(1))) break; 590 591 // Otherwise, expand into a bunch of logical ops. Note that these ops 592 // depend on the PPC behavior for oversized shift amounts. 593 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 594 DAG.getConstant(0, MVT::i32)); 595 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 596 DAG.getConstant(1, MVT::i32)); 597 SDOperand Amt = Op.getOperand(1); 598 599 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 600 DAG.getConstant(32, MVT::i32), Amt); 601 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 602 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 603 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 604 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 605 DAG.getConstant(-32U, MVT::i32)); 606 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); 607 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 608 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); 609 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); 610 } 611 case ISD::SRA: { 612 assert(Op.getValueType() == MVT::i64 && 613 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); 614 // The generic code does a fine job expanding shift by a constant. 615 if (isa<ConstantSDNode>(Op.getOperand(1))) break; 616 617 // Otherwise, expand into a bunch of logical ops, followed by a select_cc. 618 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 619 DAG.getConstant(0, MVT::i32)); 620 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 621 DAG.getConstant(1, MVT::i32)); 622 SDOperand Amt = Op.getOperand(1); 623 624 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 625 DAG.getConstant(32, MVT::i32), Amt); 626 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 627 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 628 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 629 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 630 DAG.getConstant(-32U, MVT::i32)); 631 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); 632 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); 633 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), 634 Tmp4, Tmp6, ISD::SETLE); 635 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); 636 } 637 case ISD::ConstantPool: { 638 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 639 Constant *C = CP->get(); 640 SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment()); 641 SDOperand Zero = DAG.getConstant(0, MVT::i32); 642 643 if (getTargetMachine().getRelocationModel() == Reloc::Static) { 644 // Generate non-pic code that has direct accesses to the constant pool. 645 // The address of the global is just (hi(&g)+lo(&g)). 646 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero); 647 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero); 648 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); 649 } 650 651 // Only lower ConstantPool on Darwin. 652 if (!getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) break; 653 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero); 654 if (getTargetMachine().getRelocationModel() == Reloc::PIC) { 655 // With PIC, the first instruction is actually "GR+hi(&G)". 656 Hi = DAG.getNode(ISD::ADD, MVT::i32, 657 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi); 658 } 659 660 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero); 661 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); 662 return Lo; 663 } 664 case ISD::GlobalAddress: { 665 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 666 GlobalValue *GV = GSDN->getGlobal(); 667 SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset()); 668 SDOperand Zero = DAG.getConstant(0, MVT::i32); 669 670 if (getTargetMachine().getRelocationModel() == Reloc::Static) { 671 // Generate non-pic code that has direct accesses to globals. 672 // The address of the global is just (hi(&g)+lo(&g)). 673 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero); 674 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero); 675 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); 676 } 677 678 // Only lower GlobalAddress on Darwin. 679 if (!getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) break; 680 681 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero); 682 if (getTargetMachine().getRelocationModel() == Reloc::PIC) { 683 // With PIC, the first instruction is actually "GR+hi(&G)". 684 Hi = DAG.getNode(ISD::ADD, MVT::i32, 685 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi); 686 } 687 688 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero); 689 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo); 690 691 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() && 692 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode())) 693 return Lo; 694 695 // If the global is weak or external, we have to go through the lazy 696 // resolution stub. 697 return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0)); 698 } 699 case ISD::SETCC: { 700 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 701 702 // If we're comparing for equality to zero, expose the fact that this is 703 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 704 // fold the new nodes. 705 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 706 if (C->isNullValue() && CC == ISD::SETEQ) { 707 MVT::ValueType VT = Op.getOperand(0).getValueType(); 708 SDOperand Zext = Op.getOperand(0); 709 if (VT < MVT::i32) { 710 VT = MVT::i32; 711 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 712 } 713 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 714 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 715 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 716 DAG.getConstant(Log2b, getShiftAmountTy())); 717 return DAG.getNode(ISD::TRUNCATE, getSetCCResultTy(), Scc); 718 } 719 // Leave comparisons against 0 and -1 alone for now, since they're usually 720 // optimized. FIXME: revisit this when we can custom lower all setcc 721 // optimizations. 722 if (C->isAllOnesValue() || C->isNullValue()) 723 break; 724 } 725 726 // If we have an integer seteq/setne, turn it into a compare against zero 727 // by subtracting the rhs from the lhs, which is faster than setting a 728 // condition register, reading it back out, and masking the correct bit. 729 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 730 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 731 MVT::ValueType VT = Op.getValueType(); 732 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0), 733 Op.getOperand(1)); 734 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 735 } 736 break; 737 } 738 case ISD::VASTART: { 739 // vastart just stores the address of the VarArgsFrameIndex slot into the 740 // memory location argument. 741 // FIXME: Replace MVT::i32 with PointerTy 742 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32); 743 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR, 744 Op.getOperand(1), Op.getOperand(2)); 745 } 746 case ISD::RET: { 747 SDOperand Copy; 748 749 switch(Op.getNumOperands()) { 750 default: 751 assert(0 && "Do not know how to return this many arguments!"); 752 abort(); 753 case 1: 754 return SDOperand(); // ret void is legal 755 case 2: { 756 MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); 757 unsigned ArgReg = MVT::isInteger(ArgVT) ? PPC::R3 : PPC::F1; 758 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), 759 SDOperand()); 760 break; 761 } 762 case 3: 763 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2), 764 SDOperand()); 765 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1)); 766 break; 767 } 768 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); 769 } 770 case ISD::SCALAR_TO_VECTOR: { 771 // Create a stack slot that is 16-byte aligned. 772 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 773 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 774 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32); 775 776 // Store the input value into Value#0 of the stack slot. 777 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(), 778 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL)); 779 // Load it out. 780 return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL)); 781 } 782 case ISD::BUILD_VECTOR: 783 // If this is a case we can't handle, return null and let the default 784 // expansion code take care of it. If we CAN select this case, return Op. 785 786 // See if this is all zeros. 787 // FIXME: We should handle splat(-0.0), and other cases here. 788 if (ISD::isBuildVectorAllZeros(Op.Val)) 789 return Op; 790 791 if (PPC::isVecSplatImm(Op.Val, 1) || // vspltisb 792 PPC::isVecSplatImm(Op.Val, 2) || // vspltish 793 PPC::isVecSplatImm(Op.Val, 4)) // vspltisw 794 return Op; 795 796 return SDOperand(); 797 798 case ISD::VECTOR_SHUFFLE: { 799 SDOperand V1 = Op.getOperand(0); 800 SDOperand V2 = Op.getOperand(1); 801 SDOperand PermMask = Op.getOperand(2); 802 803 // Cases that are handled by instructions that take permute immediates 804 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 805 // selected by the instruction selector. 806 if (V2.getOpcode() == ISD::UNDEF && 807 (PPC::isSplatShuffleMask(PermMask.Val, 1) || 808 PPC::isSplatShuffleMask(PermMask.Val, 2) || 809 PPC::isSplatShuffleMask(PermMask.Val, 4))) 810 return Op; 811 812 if (PPC::isVPKUWUMShuffleMask(PermMask.Val) || 813 PPC::isVPKUHUMShuffleMask(PermMask.Val)) 814 return Op; 815 816 // TODO: Handle more cases, and also handle cases that are cheaper to do as 817 // multiple such instructions than as a constant pool load/vperm pair. 818 819 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 820 // vector that will get spilled to the constant pool. 821 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 822 823 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 824 // that it is in input element units, not in bytes. Convert now. 825 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); 826 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 827 828 std::vector<SDOperand> ResultMask; 829 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 830 unsigned SrcElt =cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 831 832 for (unsigned j = 0; j != BytesPerElement; ++j) 833 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 834 MVT::i8)); 835 } 836 837 SDOperand VPermMask =DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask); 838 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 839 } 840 case ISD::INTRINSIC_WO_CHAIN: { 841 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); 842 843 // If this is a lowered altivec predicate compare, CompareOpc is set to the 844 // opcode number of the comparison. 845 int CompareOpc = -1; 846 bool isDot = false; 847 switch (IntNo) { 848 default: return SDOperand(); // Don't custom lower most intrinsics. 849 // Comparison predicates. 850 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 851 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 852 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 853 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 854 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 855 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 856 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 857 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 858 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 859 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 860 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 861 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 862 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 863 864 // Normal Comparisons. 865 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 866 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 867 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 868 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 869 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 870 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 871 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 872 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 873 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 874 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 875 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 876 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 877 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 878 } 879 880 assert(CompareOpc>0 && "We only lower altivec predicate compares so far!"); 881 882 // If this is a non-dot comparison, make the VCMP node. 883 if (!isDot) 884 return DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 885 Op.getOperand(1), Op.getOperand(2), 886 DAG.getConstant(CompareOpc, MVT::i32)); 887 888 // Create the PPCISD altivec 'dot' comparison node. 889 std::vector<SDOperand> Ops; 890 std::vector<MVT::ValueType> VTs; 891 Ops.push_back(Op.getOperand(2)); // LHS 892 Ops.push_back(Op.getOperand(3)); // RHS 893 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32)); 894 VTs.push_back(Op.getOperand(2).getValueType()); 895 VTs.push_back(MVT::Flag); 896 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops); 897 898 // Now that we have the comparison, emit a copy from the CR to a GPR. 899 // This is flagged to the above dot comparison. 900 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 901 DAG.getRegister(PPC::CR6, MVT::i32), 902 CompNode.getValue(1)); 903 904 // Unpack the result based on how the target uses it. 905 unsigned BitNo; // Bit # of CR6. 906 bool InvertBit; // Invert result? 907 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 908 default: // Can't happen, don't crash on invalid number though. 909 case 0: // Return the value of the EQ bit of CR6. 910 BitNo = 0; InvertBit = false; 911 break; 912 case 1: // Return the inverted value of the EQ bit of CR6. 913 BitNo = 0; InvertBit = true; 914 break; 915 case 2: // Return the value of the LT bit of CR6. 916 BitNo = 2; InvertBit = false; 917 break; 918 case 3: // Return the inverted value of the LT bit of CR6. 919 BitNo = 2; InvertBit = true; 920 break; 921 } 922 923 // Shift the bit into the low position. 924 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 925 DAG.getConstant(8-(3-BitNo), MVT::i32)); 926 // Isolate the bit. 927 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 928 DAG.getConstant(1, MVT::i32)); 929 930 // If we are supposed to, toggle the bit. 931 if (InvertBit) 932 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 933 DAG.getConstant(1, MVT::i32)); 934 return Flags; 935 } 936 } 937 return SDOperand(); 938} 939 940std::vector<SDOperand> 941PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) { 942 // 943 // add beautiful description of PPC stack frame format, or at least some docs 944 // 945 MachineFunction &MF = DAG.getMachineFunction(); 946 MachineFrameInfo *MFI = MF.getFrameInfo(); 947 MachineBasicBlock& BB = MF.front(); 948 SSARegMap *RegMap = MF.getSSARegMap(); 949 std::vector<SDOperand> ArgValues; 950 951 unsigned ArgOffset = 24; 952 unsigned GPR_remaining = 8; 953 unsigned FPR_remaining = 13; 954 unsigned GPR_idx = 0, FPR_idx = 0; 955 static const unsigned GPR[] = { 956 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 957 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 958 }; 959 static const unsigned FPR[] = { 960 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 961 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 962 }; 963 964 // Add DAG nodes to load the arguments... On entry to a function on PPC, 965 // the arguments start at offset 24, although they are likely to be passed 966 // in registers. 967 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 968 SDOperand newroot, argt; 969 unsigned ObjSize; 970 bool needsLoad = false; 971 bool ArgLive = !I->use_empty(); 972 MVT::ValueType ObjectVT = getValueType(I->getType()); 973 974 switch (ObjectVT) { 975 default: assert(0 && "Unhandled argument type!"); 976 case MVT::i1: 977 case MVT::i8: 978 case MVT::i16: 979 case MVT::i32: 980 ObjSize = 4; 981 if (!ArgLive) break; 982 if (GPR_remaining > 0) { 983 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 984 MF.addLiveIn(GPR[GPR_idx], VReg); 985 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); 986 if (ObjectVT != MVT::i32) { 987 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext 988 : ISD::AssertZext; 989 argt = DAG.getNode(AssertOp, MVT::i32, argt, 990 DAG.getValueType(ObjectVT)); 991 argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt); 992 } 993 } else { 994 needsLoad = true; 995 } 996 break; 997 case MVT::i64: 998 ObjSize = 8; 999 if (!ArgLive) break; 1000 if (GPR_remaining > 0) { 1001 SDOperand argHi, argLo; 1002 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1003 MF.addLiveIn(GPR[GPR_idx], VReg); 1004 argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); 1005 // If we have two or more remaining argument registers, then both halves 1006 // of the i64 can be sourced from there. Otherwise, the lower half will 1007 // have to come off the stack. This can happen when an i64 is preceded 1008 // by 28 bytes of arguments. 1009 if (GPR_remaining > 1) { 1010 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1011 MF.addLiveIn(GPR[GPR_idx+1], VReg); 1012 argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32); 1013 } else { 1014 int FI = MFI->CreateFixedObject(4, ArgOffset+4); 1015 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); 1016 argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN, 1017 DAG.getSrcValue(NULL)); 1018 } 1019 // Build the outgoing arg thingy 1020 argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi); 1021 newroot = argLo; 1022 } else { 1023 needsLoad = true; 1024 } 1025 break; 1026 case MVT::f32: 1027 case MVT::f64: 1028 ObjSize = (ObjectVT == MVT::f64) ? 8 : 4; 1029 if (!ArgLive) { 1030 if (FPR_remaining > 0) { 1031 --FPR_remaining; 1032 ++FPR_idx; 1033 } 1034 break; 1035 } 1036 if (FPR_remaining > 0) { 1037 unsigned VReg; 1038 if (ObjectVT == MVT::f32) 1039 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); 1040 else 1041 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); 1042 MF.addLiveIn(FPR[FPR_idx], VReg); 1043 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT); 1044 --FPR_remaining; 1045 ++FPR_idx; 1046 } else { 1047 needsLoad = true; 1048 } 1049 break; 1050 } 1051 1052 // We need to load the argument to a virtual register if we determined above 1053 // that we ran out of physical registers of the appropriate type 1054 if (needsLoad) { 1055 unsigned SubregOffset = 0; 1056 if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3; 1057 if (ObjectVT == MVT::i16) SubregOffset = 2; 1058 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 1059 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); 1060 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, 1061 DAG.getConstant(SubregOffset, MVT::i32)); 1062 argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN, 1063 DAG.getSrcValue(NULL)); 1064 } 1065 1066 // Every 4 bytes of argument space consumes one of the GPRs available for 1067 // argument passing. 1068 if (GPR_remaining > 0) { 1069 unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1; 1070 GPR_remaining -= delta; 1071 GPR_idx += delta; 1072 } 1073 ArgOffset += ObjSize; 1074 if (newroot.Val) 1075 DAG.setRoot(newroot.getValue(1)); 1076 1077 ArgValues.push_back(argt); 1078 } 1079 1080 // If the function takes variable number of arguments, make a frame index for 1081 // the start of the first vararg value... for expansion of llvm.va_start. 1082 if (F.isVarArg()) { 1083 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); 1084 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32); 1085 // If this function is vararg, store any remaining integer argument regs 1086 // to their spots on the stack so that they may be loaded by deferencing the 1087 // result of va_next. 1088 std::vector<SDOperand> MemOps; 1089 for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) { 1090 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1091 MF.addLiveIn(GPR[GPR_idx], VReg); 1092 SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); 1093 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1), 1094 Val, FIN, DAG.getSrcValue(NULL)); 1095 MemOps.push_back(Store); 1096 // Increment the address by four for the next argument to store 1097 SDOperand PtrOff = DAG.getConstant(4, getPointerTy()); 1098 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff); 1099 } 1100 if (!MemOps.empty()) { 1101 MemOps.push_back(DAG.getRoot()); 1102 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps)); 1103 } 1104 } 1105 1106 // Finally, inform the code generator which regs we return values in. 1107 switch (getValueType(F.getReturnType())) { 1108 default: assert(0 && "Unknown type!"); 1109 case MVT::isVoid: break; 1110 case MVT::i1: 1111 case MVT::i8: 1112 case MVT::i16: 1113 case MVT::i32: 1114 MF.addLiveOut(PPC::R3); 1115 break; 1116 case MVT::i64: 1117 MF.addLiveOut(PPC::R3); 1118 MF.addLiveOut(PPC::R4); 1119 break; 1120 case MVT::f32: 1121 case MVT::f64: 1122 MF.addLiveOut(PPC::F1); 1123 break; 1124 } 1125 1126 return ArgValues; 1127} 1128 1129std::pair<SDOperand, SDOperand> 1130PPCTargetLowering::LowerCallTo(SDOperand Chain, 1131 const Type *RetTy, bool isVarArg, 1132 unsigned CallingConv, bool isTailCall, 1133 SDOperand Callee, ArgListTy &Args, 1134 SelectionDAG &DAG) { 1135 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 1136 // SelectExpr to use to put the arguments in the appropriate registers. 1137 std::vector<SDOperand> args_to_use; 1138 1139 // Count how many bytes are to be pushed on the stack, including the linkage 1140 // area, and parameter passing area. 1141 unsigned NumBytes = 24; 1142 1143 if (Args.empty()) { 1144 Chain = DAG.getCALLSEQ_START(Chain, 1145 DAG.getConstant(NumBytes, getPointerTy())); 1146 } else { 1147 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 1148 switch (getValueType(Args[i].second)) { 1149 default: assert(0 && "Unknown value type!"); 1150 case MVT::i1: 1151 case MVT::i8: 1152 case MVT::i16: 1153 case MVT::i32: 1154 case MVT::f32: 1155 NumBytes += 4; 1156 break; 1157 case MVT::i64: 1158 case MVT::f64: 1159 NumBytes += 8; 1160 break; 1161 } 1162 } 1163 1164 // Just to be safe, we'll always reserve the full 24 bytes of linkage area 1165 // plus 32 bytes of argument space in case any called code gets funky on us. 1166 // (Required by ABI to support var arg) 1167 if (NumBytes < 56) NumBytes = 56; 1168 1169 // Adjust the stack pointer for the new arguments... 1170 // These operations are automatically eliminated by the prolog/epilog pass 1171 Chain = DAG.getCALLSEQ_START(Chain, 1172 DAG.getConstant(NumBytes, getPointerTy())); 1173 1174 // Set up a copy of the stack pointer for use loading and storing any 1175 // arguments that may not fit in the registers available for argument 1176 // passing. 1177 SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 1178 1179 // Figure out which arguments are going to go in registers, and which in 1180 // memory. Also, if this is a vararg function, floating point operations 1181 // must be stored to our stack, and loaded into integer regs as well, if 1182 // any integer regs are available for argument passing. 1183 unsigned ArgOffset = 24; 1184 unsigned GPR_remaining = 8; 1185 unsigned FPR_remaining = 13; 1186 1187 std::vector<SDOperand> MemOps; 1188 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 1189 // PtrOff will be used to store the current argument to the stack if a 1190 // register cannot be found for it. 1191 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); 1192 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); 1193 MVT::ValueType ArgVT = getValueType(Args[i].second); 1194 1195 switch (ArgVT) { 1196 default: assert(0 && "Unexpected ValueType for argument!"); 1197 case MVT::i1: 1198 case MVT::i8: 1199 case MVT::i16: 1200 // Promote the integer to 32 bits. If the input type is signed use a 1201 // sign extend, otherwise use a zero extend. 1202 if (Args[i].second->isSigned()) 1203 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first); 1204 else 1205 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first); 1206 // FALL THROUGH 1207 case MVT::i32: 1208 if (GPR_remaining > 0) { 1209 args_to_use.push_back(Args[i].first); 1210 --GPR_remaining; 1211 } else { 1212 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 1213 Args[i].first, PtrOff, 1214 DAG.getSrcValue(NULL))); 1215 } 1216 ArgOffset += 4; 1217 break; 1218 case MVT::i64: 1219 // If we have one free GPR left, we can place the upper half of the i64 1220 // in it, and store the other half to the stack. If we have two or more 1221 // free GPRs, then we can pass both halves of the i64 in registers. 1222 if (GPR_remaining > 0) { 1223 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, 1224 Args[i].first, DAG.getConstant(1, MVT::i32)); 1225 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, 1226 Args[i].first, DAG.getConstant(0, MVT::i32)); 1227 args_to_use.push_back(Hi); 1228 --GPR_remaining; 1229 if (GPR_remaining > 0) { 1230 args_to_use.push_back(Lo); 1231 --GPR_remaining; 1232 } else { 1233 SDOperand ConstFour = DAG.getConstant(4, getPointerTy()); 1234 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour); 1235 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 1236 Lo, PtrOff, DAG.getSrcValue(NULL))); 1237 } 1238 } else { 1239 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 1240 Args[i].first, PtrOff, 1241 DAG.getSrcValue(NULL))); 1242 } 1243 ArgOffset += 8; 1244 break; 1245 case MVT::f32: 1246 case MVT::f64: 1247 if (FPR_remaining > 0) { 1248 args_to_use.push_back(Args[i].first); 1249 --FPR_remaining; 1250 if (isVarArg) { 1251 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain, 1252 Args[i].first, PtrOff, 1253 DAG.getSrcValue(NULL)); 1254 MemOps.push_back(Store); 1255 // Float varargs are always shadowed in available integer registers 1256 if (GPR_remaining > 0) { 1257 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff, 1258 DAG.getSrcValue(NULL)); 1259 MemOps.push_back(Load.getValue(1)); 1260 args_to_use.push_back(Load); 1261 --GPR_remaining; 1262 } 1263 if (GPR_remaining > 0 && MVT::f64 == ArgVT) { 1264 SDOperand ConstFour = DAG.getConstant(4, getPointerTy()); 1265 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour); 1266 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff, 1267 DAG.getSrcValue(NULL)); 1268 MemOps.push_back(Load.getValue(1)); 1269 args_to_use.push_back(Load); 1270 --GPR_remaining; 1271 } 1272 } else { 1273 // If we have any FPRs remaining, we may also have GPRs remaining. 1274 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 1275 // GPRs. 1276 if (GPR_remaining > 0) { 1277 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 1278 --GPR_remaining; 1279 } 1280 if (GPR_remaining > 0 && MVT::f64 == ArgVT) { 1281 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32)); 1282 --GPR_remaining; 1283 } 1284 } 1285 } else { 1286 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 1287 Args[i].first, PtrOff, 1288 DAG.getSrcValue(NULL))); 1289 } 1290 ArgOffset += (ArgVT == MVT::f32) ? 4 : 8; 1291 break; 1292 } 1293 } 1294 if (!MemOps.empty()) 1295 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps); 1296 } 1297 1298 std::vector<MVT::ValueType> RetVals; 1299 MVT::ValueType RetTyVT = getValueType(RetTy); 1300 MVT::ValueType ActualRetTyVT = RetTyVT; 1301 if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16) 1302 ActualRetTyVT = MVT::i32; // Promote result to i32. 1303 1304 if (RetTyVT == MVT::i64) { 1305 RetVals.push_back(MVT::i32); 1306 RetVals.push_back(MVT::i32); 1307 } else if (RetTyVT != MVT::isVoid) { 1308 RetVals.push_back(ActualRetTyVT); 1309 } 1310 RetVals.push_back(MVT::Other); 1311 1312 // If the callee is a GlobalAddress node (quite common, every direct call is) 1313 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1314 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1315 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32); 1316 1317 std::vector<SDOperand> Ops; 1318 Ops.push_back(Chain); 1319 Ops.push_back(Callee); 1320 Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end()); 1321 SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops); 1322 Chain = TheCall.getValue(TheCall.Val->getNumValues()-1); 1323 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, 1324 DAG.getConstant(NumBytes, getPointerTy())); 1325 SDOperand RetVal = TheCall; 1326 1327 // If the result is a small value, add a note so that we keep track of the 1328 // information about whether it is sign or zero extended. 1329 if (RetTyVT != ActualRetTyVT) { 1330 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext, 1331 MVT::i32, RetVal, DAG.getValueType(RetTyVT)); 1332 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal); 1333 } else if (RetTyVT == MVT::i64) { 1334 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1)); 1335 } 1336 1337 return std::make_pair(RetVal, Chain); 1338} 1339 1340MachineBasicBlock * 1341PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 1342 MachineBasicBlock *BB) { 1343 assert((MI->getOpcode() == PPC::SELECT_CC_Int || 1344 MI->getOpcode() == PPC::SELECT_CC_F4 || 1345 MI->getOpcode() == PPC::SELECT_CC_F8) && 1346 "Unexpected instr type to insert"); 1347 1348 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1349 // control-flow pattern. The incoming instruction knows the destination vreg 1350 // to set, the condition code register to branch on, the true/false values to 1351 // select between, and a branch opcode to use. 1352 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1353 ilist<MachineBasicBlock>::iterator It = BB; 1354 ++It; 1355 1356 // thisMBB: 1357 // ... 1358 // TrueVal = ... 1359 // cmpTY ccX, r1, r2 1360 // bCC copy1MBB 1361 // fallthrough --> copy0MBB 1362 MachineBasicBlock *thisMBB = BB; 1363 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 1364 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 1365 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2) 1366 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1367 MachineFunction *F = BB->getParent(); 1368 F->getBasicBlockList().insert(It, copy0MBB); 1369 F->getBasicBlockList().insert(It, sinkMBB); 1370 // Update machine-CFG edges by first adding all successors of the current 1371 // block to the new block which will contain the Phi node for the select. 1372 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 1373 e = BB->succ_end(); i != e; ++i) 1374 sinkMBB->addSuccessor(*i); 1375 // Next, remove all successors of the current block, and add the true 1376 // and fallthrough blocks as its successors. 1377 while(!BB->succ_empty()) 1378 BB->removeSuccessor(BB->succ_begin()); 1379 BB->addSuccessor(copy0MBB); 1380 BB->addSuccessor(sinkMBB); 1381 1382 // copy0MBB: 1383 // %FalseValue = ... 1384 // # fallthrough to sinkMBB 1385 BB = copy0MBB; 1386 1387 // Update machine-CFG edges 1388 BB->addSuccessor(sinkMBB); 1389 1390 // sinkMBB: 1391 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1392 // ... 1393 BB = sinkMBB; 1394 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg()) 1395 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1396 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1397 1398 delete MI; // The pseudo instruction is gone now. 1399 return BB; 1400} 1401 1402SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 1403 DAGCombinerInfo &DCI) const { 1404 TargetMachine &TM = getTargetMachine(); 1405 SelectionDAG &DAG = DCI.DAG; 1406 switch (N->getOpcode()) { 1407 default: break; 1408 case ISD::SINT_TO_FP: 1409 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) { 1410 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 1411 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 1412 // We allow the src/dst to be either f32/f64, but the intermediate 1413 // type must be i64. 1414 if (N->getOperand(0).getValueType() == MVT::i64) { 1415 SDOperand Val = N->getOperand(0).getOperand(0); 1416 if (Val.getValueType() == MVT::f32) { 1417 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 1418 DCI.AddToWorklist(Val.Val); 1419 } 1420 1421 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 1422 DCI.AddToWorklist(Val.Val); 1423 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 1424 DCI.AddToWorklist(Val.Val); 1425 if (N->getValueType(0) == MVT::f32) { 1426 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); 1427 DCI.AddToWorklist(Val.Val); 1428 } 1429 return Val; 1430 } else if (N->getOperand(0).getValueType() == MVT::i32) { 1431 // If the intermediate type is i32, we can avoid the load/store here 1432 // too. 1433 } 1434 } 1435 } 1436 break; 1437 case ISD::STORE: 1438 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 1439 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 1440 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 1441 N->getOperand(1).getValueType() == MVT::i32) { 1442 SDOperand Val = N->getOperand(1).getOperand(0); 1443 if (Val.getValueType() == MVT::f32) { 1444 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 1445 DCI.AddToWorklist(Val.Val); 1446 } 1447 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 1448 DCI.AddToWorklist(Val.Val); 1449 1450 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 1451 N->getOperand(2), N->getOperand(3)); 1452 DCI.AddToWorklist(Val.Val); 1453 return Val; 1454 } 1455 break; 1456 case PPCISD::VCMP: { 1457 // If a VCMPo node already exists with exactly the same operands as this 1458 // node, use its result instead of this node (VCMPo computes both a CR6 and 1459 // a normal output). 1460 // 1461 if (!N->getOperand(0).hasOneUse() && 1462 !N->getOperand(1).hasOneUse() && 1463 !N->getOperand(2).hasOneUse()) { 1464 1465 // Scan all of the users of the LHS, looking for VCMPo's that match. 1466 SDNode *VCMPoNode = 0; 1467 1468 SDNode *LHSN = N->getOperand(0).Val; 1469 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 1470 UI != E; ++UI) 1471 if ((*UI)->getOpcode() == PPCISD::VCMPo && 1472 (*UI)->getOperand(1) == N->getOperand(1) && 1473 (*UI)->getOperand(2) == N->getOperand(2) && 1474 (*UI)->getOperand(0) == N->getOperand(0)) { 1475 VCMPoNode = *UI; 1476 break; 1477 } 1478 1479 // If there are non-zero uses of the flag value, use the VCMPo node! 1480 if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1)) 1481 return SDOperand(VCMPoNode, 0); 1482 } 1483 break; 1484 } 1485 } 1486 1487 return SDOperand(); 1488} 1489 1490void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 1491 uint64_t Mask, 1492 uint64_t &KnownZero, 1493 uint64_t &KnownOne, 1494 unsigned Depth) const { 1495 KnownZero = 0; 1496 KnownOne = 0; 1497 switch (Op.getOpcode()) { 1498 default: break; 1499 case ISD::INTRINSIC_WO_CHAIN: { 1500 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 1501 default: break; 1502 case Intrinsic::ppc_altivec_vcmpbfp_p: 1503 case Intrinsic::ppc_altivec_vcmpeqfp_p: 1504 case Intrinsic::ppc_altivec_vcmpequb_p: 1505 case Intrinsic::ppc_altivec_vcmpequh_p: 1506 case Intrinsic::ppc_altivec_vcmpequw_p: 1507 case Intrinsic::ppc_altivec_vcmpgefp_p: 1508 case Intrinsic::ppc_altivec_vcmpgtfp_p: 1509 case Intrinsic::ppc_altivec_vcmpgtsb_p: 1510 case Intrinsic::ppc_altivec_vcmpgtsh_p: 1511 case Intrinsic::ppc_altivec_vcmpgtsw_p: 1512 case Intrinsic::ppc_altivec_vcmpgtub_p: 1513 case Intrinsic::ppc_altivec_vcmpgtuh_p: 1514 case Intrinsic::ppc_altivec_vcmpgtuw_p: 1515 KnownZero = ~1U; // All bits but the low one are known to be zero. 1516 break; 1517 } 1518 } 1519 } 1520} 1521 1522 1523/// getConstraintType - Given a constraint letter, return the type of 1524/// constraint it is for this target. 1525PPCTargetLowering::ConstraintType 1526PPCTargetLowering::getConstraintType(char ConstraintLetter) const { 1527 switch (ConstraintLetter) { 1528 default: break; 1529 case 'b': 1530 case 'r': 1531 case 'f': 1532 case 'v': 1533 case 'y': 1534 return C_RegisterClass; 1535 } 1536 return TargetLowering::getConstraintType(ConstraintLetter); 1537} 1538 1539 1540std::vector<unsigned> PPCTargetLowering:: 1541getRegClassForInlineAsmConstraint(const std::string &Constraint, 1542 MVT::ValueType VT) const { 1543 if (Constraint.size() == 1) { 1544 switch (Constraint[0]) { // GCC RS6000 Constraint Letters 1545 default: break; // Unknown constriant letter 1546 case 'b': 1547 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 , 1548 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , 1549 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, 1550 PPC::R12, PPC::R13, PPC::R14, PPC::R15, 1551 PPC::R16, PPC::R17, PPC::R18, PPC::R19, 1552 PPC::R20, PPC::R21, PPC::R22, PPC::R23, 1553 PPC::R24, PPC::R25, PPC::R26, PPC::R27, 1554 PPC::R28, PPC::R29, PPC::R30, PPC::R31, 1555 0); 1556 case 'r': 1557 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 , 1558 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , 1559 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, 1560 PPC::R12, PPC::R13, PPC::R14, PPC::R15, 1561 PPC::R16, PPC::R17, PPC::R18, PPC::R19, 1562 PPC::R20, PPC::R21, PPC::R22, PPC::R23, 1563 PPC::R24, PPC::R25, PPC::R26, PPC::R27, 1564 PPC::R28, PPC::R29, PPC::R30, PPC::R31, 1565 0); 1566 case 'f': 1567 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 , 1568 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 , 1569 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11, 1570 PPC::F12, PPC::F13, PPC::F14, PPC::F15, 1571 PPC::F16, PPC::F17, PPC::F18, PPC::F19, 1572 PPC::F20, PPC::F21, PPC::F22, PPC::F23, 1573 PPC::F24, PPC::F25, PPC::F26, PPC::F27, 1574 PPC::F28, PPC::F29, PPC::F30, PPC::F31, 1575 0); 1576 case 'v': 1577 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , 1578 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 , 1579 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, 1580 PPC::V12, PPC::V13, PPC::V14, PPC::V15, 1581 PPC::V16, PPC::V17, PPC::V18, PPC::V19, 1582 PPC::V20, PPC::V21, PPC::V22, PPC::V23, 1583 PPC::V24, PPC::V25, PPC::V26, PPC::V27, 1584 PPC::V28, PPC::V29, PPC::V30, PPC::V31, 1585 0); 1586 case 'y': 1587 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3, 1588 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7, 1589 0); 1590 } 1591 } 1592 1593 return std::vector<unsigned>(); 1594} 1595 1596// isOperandValidForConstraint 1597bool PPCTargetLowering:: 1598isOperandValidForConstraint(SDOperand Op, char Letter) { 1599 switch (Letter) { 1600 default: break; 1601 case 'I': 1602 case 'J': 1603 case 'K': 1604 case 'L': 1605 case 'M': 1606 case 'N': 1607 case 'O': 1608 case 'P': { 1609 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate. 1610 unsigned Value = cast<ConstantSDNode>(Op)->getValue(); 1611 switch (Letter) { 1612 default: assert(0 && "Unknown constraint letter!"); 1613 case 'I': // "I" is a signed 16-bit constant. 1614 return (short)Value == (int)Value; 1615 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 1616 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 1617 return (short)Value == 0; 1618 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 1619 return (Value >> 16) == 0; 1620 case 'M': // "M" is a constant that is greater than 31. 1621 return Value > 31; 1622 case 'N': // "N" is a positive constant that is an exact power of two. 1623 return (int)Value > 0 && isPowerOf2_32(Value); 1624 case 'O': // "O" is the constant zero. 1625 return Value == 0; 1626 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 1627 return (short)-Value == (int)-Value; 1628 } 1629 break; 1630 } 1631 } 1632 1633 // Handle standard constraint letters. 1634 return TargetLowering::isOperandValidForConstraint(Op, Letter); 1635} 1636 1637/// isLegalAddressImmediate - Return true if the integer value can be used 1638/// as the offset of the target addressing mode. 1639bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const { 1640 // PPC allows a sign-extended 16-bit immediate field. 1641 return (V > -(1 << 16) && V < (1 << 16)-1); 1642} 1643