PPCISelLowering.cpp revision 325f0a129e57ff5d1842edd0b4b7473a4d6b47f6
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCTargetMachine.h" 16#include "PPCPerfectShuffle.h" 17#include "llvm/ADT/VectorExtras.h" 18#include "llvm/Analysis/ScalarEvolutionExpressions.h" 19#include "llvm/CodeGen/MachineFrameInfo.h" 20#include "llvm/CodeGen/MachineFunction.h" 21#include "llvm/CodeGen/MachineInstrBuilder.h" 22#include "llvm/CodeGen/SelectionDAG.h" 23#include "llvm/CodeGen/SSARegMap.h" 24#include "llvm/Constants.h" 25#include "llvm/Function.h" 26#include "llvm/Intrinsics.h" 27#include "llvm/Support/MathExtras.h" 28#include "llvm/Target/TargetOptions.h" 29using namespace llvm; 30 31PPCTargetLowering::PPCTargetLowering(TargetMachine &TM) 32 : TargetLowering(TM) { 33 34 // Fold away setcc operations if possible. 35 setSetCCIsExpensive(); 36 setPow2DivIsCheap(); 37 38 // Use _setjmp/_longjmp instead of setjmp/longjmp. 39 setUseUnderscoreSetJmpLongJmp(true); 40 41 // Set up the register classes. 42 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 43 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 44 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 45 46 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 47 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 48 49 // PowerPC has no intrinsics for these particular operations 50 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); 51 setOperationAction(ISD::MEMSET, MVT::Other, Expand); 52 setOperationAction(ISD::MEMCPY, MVT::Other, Expand); 53 54 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 55 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand); 56 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand); 57 58 // PowerPC has no SREM/UREM instructions 59 setOperationAction(ISD::SREM, MVT::i32, Expand); 60 setOperationAction(ISD::UREM, MVT::i32, Expand); 61 setOperationAction(ISD::SREM, MVT::i64, Expand); 62 setOperationAction(ISD::UREM, MVT::i64, Expand); 63 64 // We don't support sin/cos/sqrt/fmod 65 setOperationAction(ISD::FSIN , MVT::f64, Expand); 66 setOperationAction(ISD::FCOS , MVT::f64, Expand); 67 setOperationAction(ISD::FREM , MVT::f64, Expand); 68 setOperationAction(ISD::FSIN , MVT::f32, Expand); 69 setOperationAction(ISD::FCOS , MVT::f32, Expand); 70 setOperationAction(ISD::FREM , MVT::f32, Expand); 71 72 // If we're enabling GP optimizations, use hardware square root 73 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 74 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 75 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 76 } 77 78 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 79 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 80 81 // PowerPC does not have BSWAP, CTPOP or CTTZ 82 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 83 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 84 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 85 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 86 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 87 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 88 89 // PowerPC does not have ROTR 90 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 91 92 // PowerPC does not have Select 93 setOperationAction(ISD::SELECT, MVT::i32, Expand); 94 setOperationAction(ISD::SELECT, MVT::i64, Expand); 95 setOperationAction(ISD::SELECT, MVT::f32, Expand); 96 setOperationAction(ISD::SELECT, MVT::f64, Expand); 97 98 // PowerPC wants to turn select_cc of FP into fsel when possible. 99 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 100 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 101 102 // PowerPC wants to optimize integer setcc a bit 103 setOperationAction(ISD::SETCC, MVT::i32, Custom); 104 105 // PowerPC does not have BRCOND which requires SetCC 106 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 107 108 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 109 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 110 111 // PowerPC does not have [U|S]INT_TO_FP 112 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 113 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 114 115 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 116 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 117 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 118 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 119 120 // PowerPC does not have truncstore for i1. 121 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote); 122 123 // We cannot sextinreg(i1). Expand to shifts. 124 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 125 126 127 // Support label based line numbers. 128 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 129 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 130 // FIXME - use subtarget debug flags 131 if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) 132 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); 133 134 // We want to legalize GlobalAddress and ConstantPool nodes into the 135 // appropriate instructions to materialize the address. 136 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 137 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 138 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 139 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 140 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 141 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 142 143 // RET must be custom lowered, to meet ABI requirements 144 setOperationAction(ISD::RET , MVT::Other, Custom); 145 146 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 147 setOperationAction(ISD::VASTART , MVT::Other, Custom); 148 149 // Use the default implementation. 150 setOperationAction(ISD::VAARG , MVT::Other, Expand); 151 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 152 setOperationAction(ISD::VAEND , MVT::Other, Expand); 153 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 154 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 155 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); 156 157 // We want to custom lower some of our intrinsics. 158 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 159 160 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 161 // They also have instructions for converting between i64 and fp. 162 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 163 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 164 165 // FIXME: disable this lowered code. This generates 64-bit register values, 166 // and we don't model the fact that the top part is clobbered by calls. We 167 // need to flag these together so that the value isn't live across a call. 168 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 169 170 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 171 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 172 } else { 173 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 174 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 175 } 176 177 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 178 // 64 bit PowerPC implementations can support i64 types directly 179 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 180 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 181 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 182 } else { 183 // 32 bit PowerPC wants to expand i64 shifts itself. 184 setOperationAction(ISD::SHL, MVT::i64, Custom); 185 setOperationAction(ISD::SRL, MVT::i64, Custom); 186 setOperationAction(ISD::SRA, MVT::i64, Custom); 187 } 188 189 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 190 // First set operation action for all vector types to expand. Then we 191 // will selectively turn on ones that can be effectively codegen'd. 192 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 193 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 194 // add/sub are legal for all supported vector VT's. 195 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 196 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 197 198 // We promote all shuffles to v16i8. 199 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 200 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 201 202 // We promote all non-typed operations to v4i32. 203 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); 204 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); 205 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); 206 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); 207 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); 208 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); 209 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); 210 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); 211 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 212 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); 213 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); 214 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); 215 216 // No other operations are legal. 217 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 218 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 219 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 220 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 221 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 222 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 223 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 224 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 225 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 226 227 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 228 } 229 230 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 231 // with merges, splats, etc. 232 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 233 234 setOperationAction(ISD::AND , MVT::v4i32, Legal); 235 setOperationAction(ISD::OR , MVT::v4i32, Legal); 236 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 237 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 238 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 239 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 240 241 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 242 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 243 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 244 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 245 246 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 247 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 248 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 249 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 250 251 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 252 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 253 254 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 255 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 256 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 257 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 258 } 259 260 setSetCCResultType(MVT::i32); 261 setShiftAmountType(MVT::i32); 262 setSetCCResultContents(ZeroOrOneSetCCResult); 263 setStackPointerRegisterToSaveRestore(PPC::R1); 264 265 // We have target-specific dag combine patterns for the following nodes: 266 setTargetDAGCombine(ISD::SINT_TO_FP); 267 setTargetDAGCombine(ISD::STORE); 268 setTargetDAGCombine(ISD::BR_CC); 269 setTargetDAGCombine(ISD::BSWAP); 270 271 computeRegisterProperties(); 272} 273 274const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 275 switch (Opcode) { 276 default: return 0; 277 case PPCISD::FSEL: return "PPCISD::FSEL"; 278 case PPCISD::FCFID: return "PPCISD::FCFID"; 279 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 280 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 281 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 282 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 283 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 284 case PPCISD::VPERM: return "PPCISD::VPERM"; 285 case PPCISD::Hi: return "PPCISD::Hi"; 286 case PPCISD::Lo: return "PPCISD::Lo"; 287 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 288 case PPCISD::SRL: return "PPCISD::SRL"; 289 case PPCISD::SRA: return "PPCISD::SRA"; 290 case PPCISD::SHL: return "PPCISD::SHL"; 291 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 292 case PPCISD::STD_32: return "PPCISD::STD_32"; 293 case PPCISD::CALL: return "PPCISD::CALL"; 294 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 295 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 296 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 297 case PPCISD::MFCR: return "PPCISD::MFCR"; 298 case PPCISD::VCMP: return "PPCISD::VCMP"; 299 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 300 case PPCISD::LBRX: return "PPCISD::LBRX"; 301 case PPCISD::STBRX: return "PPCISD::STBRX"; 302 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 303 } 304} 305 306//===----------------------------------------------------------------------===// 307// Node matching predicates, for use by the tblgen matching code. 308//===----------------------------------------------------------------------===// 309 310/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 311static bool isFloatingPointZero(SDOperand Op) { 312 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 313 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 314 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) { 315 // Maybe this has already been legalized into the constant pool? 316 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 317 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get())) 318 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 319 } 320 return false; 321} 322 323/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 324/// true if Op is undef or if it matches the specified value. 325static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 326 return Op.getOpcode() == ISD::UNDEF || 327 cast<ConstantSDNode>(Op)->getValue() == Val; 328} 329 330/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 331/// VPKUHUM instruction. 332bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 333 if (!isUnary) { 334 for (unsigned i = 0; i != 16; ++i) 335 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 336 return false; 337 } else { 338 for (unsigned i = 0; i != 8; ++i) 339 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 340 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 341 return false; 342 } 343 return true; 344} 345 346/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 347/// VPKUWUM instruction. 348bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 349 if (!isUnary) { 350 for (unsigned i = 0; i != 16; i += 2) 351 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 352 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 353 return false; 354 } else { 355 for (unsigned i = 0; i != 8; i += 2) 356 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 357 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 358 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 359 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 360 return false; 361 } 362 return true; 363} 364 365/// isVMerge - Common function, used to match vmrg* shuffles. 366/// 367static bool isVMerge(SDNode *N, unsigned UnitSize, 368 unsigned LHSStart, unsigned RHSStart) { 369 assert(N->getOpcode() == ISD::BUILD_VECTOR && 370 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 371 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 372 "Unsupported merge size!"); 373 374 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 375 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 376 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 377 LHSStart+j+i*UnitSize) || 378 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 379 RHSStart+j+i*UnitSize)) 380 return false; 381 } 382 return true; 383} 384 385/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 386/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 387bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 388 if (!isUnary) 389 return isVMerge(N, UnitSize, 8, 24); 390 return isVMerge(N, UnitSize, 8, 8); 391} 392 393/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 394/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 395bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 396 if (!isUnary) 397 return isVMerge(N, UnitSize, 0, 16); 398 return isVMerge(N, UnitSize, 0, 0); 399} 400 401 402/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 403/// amount, otherwise return -1. 404int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 405 assert(N->getOpcode() == ISD::BUILD_VECTOR && 406 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 407 // Find the first non-undef value in the shuffle mask. 408 unsigned i; 409 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 410 /*search*/; 411 412 if (i == 16) return -1; // all undef. 413 414 // Otherwise, check to see if the rest of the elements are consequtively 415 // numbered from this value. 416 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 417 if (ShiftAmt < i) return -1; 418 ShiftAmt -= i; 419 420 if (!isUnary) { 421 // Check the rest of the elements to see if they are consequtive. 422 for (++i; i != 16; ++i) 423 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 424 return -1; 425 } else { 426 // Check the rest of the elements to see if they are consequtive. 427 for (++i; i != 16; ++i) 428 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 429 return -1; 430 } 431 432 return ShiftAmt; 433} 434 435/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 436/// specifies a splat of a single element that is suitable for input to 437/// VSPLTB/VSPLTH/VSPLTW. 438bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 439 assert(N->getOpcode() == ISD::BUILD_VECTOR && 440 N->getNumOperands() == 16 && 441 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 442 443 // This is a splat operation if each element of the permute is the same, and 444 // if the value doesn't reference the second vector. 445 unsigned ElementBase = 0; 446 SDOperand Elt = N->getOperand(0); 447 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 448 ElementBase = EltV->getValue(); 449 else 450 return false; // FIXME: Handle UNDEF elements too! 451 452 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 453 return false; 454 455 // Check that they are consequtive. 456 for (unsigned i = 1; i != EltSize; ++i) { 457 if (!isa<ConstantSDNode>(N->getOperand(i)) || 458 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 459 return false; 460 } 461 462 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 463 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 464 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 465 assert(isa<ConstantSDNode>(N->getOperand(i)) && 466 "Invalid VECTOR_SHUFFLE mask!"); 467 for (unsigned j = 0; j != EltSize; ++j) 468 if (N->getOperand(i+j) != N->getOperand(j)) 469 return false; 470 } 471 472 return true; 473} 474 475/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 476/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 477unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 478 assert(isSplatShuffleMask(N, EltSize)); 479 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 480} 481 482/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 483/// by using a vspltis[bhw] instruction of the specified element size, return 484/// the constant being splatted. The ByteSize field indicates the number of 485/// bytes of each element [124] -> [bhw]. 486SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 487 SDOperand OpVal(0, 0); 488 489 // If ByteSize of the splat is bigger than the element size of the 490 // build_vector, then we have a case where we are checking for a splat where 491 // multiple elements of the buildvector are folded together into a single 492 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 493 unsigned EltSize = 16/N->getNumOperands(); 494 if (EltSize < ByteSize) { 495 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 496 SDOperand UniquedVals[4]; 497 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 498 499 // See if all of the elements in the buildvector agree across. 500 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 501 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 502 // If the element isn't a constant, bail fully out. 503 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); 504 505 506 if (UniquedVals[i&(Multiple-1)].Val == 0) 507 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 508 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 509 return SDOperand(); // no match. 510 } 511 512 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 513 // either constant or undef values that are identical for each chunk. See 514 // if these chunks can form into a larger vspltis*. 515 516 // Check to see if all of the leading entries are either 0 or -1. If 517 // neither, then this won't fit into the immediate field. 518 bool LeadingZero = true; 519 bool LeadingOnes = true; 520 for (unsigned i = 0; i != Multiple-1; ++i) { 521 if (UniquedVals[i].Val == 0) continue; // Must have been undefs. 522 523 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 524 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 525 } 526 // Finally, check the least significant entry. 527 if (LeadingZero) { 528 if (UniquedVals[Multiple-1].Val == 0) 529 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 530 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 531 if (Val < 16) 532 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 533 } 534 if (LeadingOnes) { 535 if (UniquedVals[Multiple-1].Val == 0) 536 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 537 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 538 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 539 return DAG.getTargetConstant(Val, MVT::i32); 540 } 541 542 return SDOperand(); 543 } 544 545 // Check to see if this buildvec has a single non-undef value in its elements. 546 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 547 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 548 if (OpVal.Val == 0) 549 OpVal = N->getOperand(i); 550 else if (OpVal != N->getOperand(i)) 551 return SDOperand(); 552 } 553 554 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. 555 556 unsigned ValSizeInBytes = 0; 557 uint64_t Value = 0; 558 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 559 Value = CN->getValue(); 560 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 561 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 562 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 563 Value = FloatToBits(CN->getValue()); 564 ValSizeInBytes = 4; 565 } 566 567 // If the splat value is larger than the element value, then we can never do 568 // this splat. The only case that we could fit the replicated bits into our 569 // immediate field for would be zero, and we prefer to use vxor for it. 570 if (ValSizeInBytes < ByteSize) return SDOperand(); 571 572 // If the element value is larger than the splat value, cut it in half and 573 // check to see if the two halves are equal. Continue doing this until we 574 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 575 while (ValSizeInBytes > ByteSize) { 576 ValSizeInBytes >>= 1; 577 578 // If the top half equals the bottom half, we're still ok. 579 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 580 (Value & ((1 << (8*ValSizeInBytes))-1))) 581 return SDOperand(); 582 } 583 584 // Properly sign extend the value. 585 int ShAmt = (4-ByteSize)*8; 586 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 587 588 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 589 if (MaskVal == 0) return SDOperand(); 590 591 // Finally, if this value fits in a 5 bit sext field, return it 592 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 593 return DAG.getTargetConstant(MaskVal, MVT::i32); 594 return SDOperand(); 595} 596 597//===----------------------------------------------------------------------===// 598// LowerOperation implementation 599//===----------------------------------------------------------------------===// 600 601static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 602 MVT::ValueType PtrVT = Op.getValueType(); 603 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 604 Constant *C = CP->get(); 605 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 606 SDOperand Zero = DAG.getConstant(0, PtrVT); 607 608 const TargetMachine &TM = DAG.getTarget(); 609 610 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 611 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 612 613 // If this is a non-darwin platform, we don't support non-static relo models 614 // yet. 615 if (TM.getRelocationModel() == Reloc::Static || 616 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 617 // Generate non-pic code that has direct accesses to the constant pool. 618 // The address of the global is just (hi(&g)+lo(&g)). 619 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 620 } 621 622 if (TM.getRelocationModel() == Reloc::PIC_) { 623 // With PIC, the first instruction is actually "GR+hi(&G)". 624 Hi = DAG.getNode(ISD::ADD, PtrVT, 625 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 626 } 627 628 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 629 return Lo; 630} 631 632static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 633 MVT::ValueType PtrVT = Op.getValueType(); 634 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 635 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 636 SDOperand Zero = DAG.getConstant(0, PtrVT); 637 638 const TargetMachine &TM = DAG.getTarget(); 639 640 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 641 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 642 643 // If this is a non-darwin platform, we don't support non-static relo models 644 // yet. 645 if (TM.getRelocationModel() == Reloc::Static || 646 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 647 // Generate non-pic code that has direct accesses to the constant pool. 648 // The address of the global is just (hi(&g)+lo(&g)). 649 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 650 } 651 652 if (TM.getRelocationModel() == Reloc::PIC_) { 653 // With PIC, the first instruction is actually "GR+hi(&G)". 654 Hi = DAG.getNode(ISD::ADD, PtrVT, 655 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 656 } 657 658 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 659 return Lo; 660} 661 662static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 663 MVT::ValueType PtrVT = Op.getValueType(); 664 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 665 GlobalValue *GV = GSDN->getGlobal(); 666 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 667 SDOperand Zero = DAG.getConstant(0, PtrVT); 668 669 const TargetMachine &TM = DAG.getTarget(); 670 671 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 672 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 673 674 // If this is a non-darwin platform, we don't support non-static relo models 675 // yet. 676 if (TM.getRelocationModel() == Reloc::Static || 677 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 678 // Generate non-pic code that has direct accesses to globals. 679 // The address of the global is just (hi(&g)+lo(&g)). 680 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 681 } 682 683 if (TM.getRelocationModel() == Reloc::PIC_) { 684 // With PIC, the first instruction is actually "GR+hi(&G)". 685 Hi = DAG.getNode(ISD::ADD, PtrVT, 686 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 687 } 688 689 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 690 691 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() && 692 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode())) 693 return Lo; 694 695 // If the global is weak or external, we have to go through the lazy 696 // resolution stub. 697 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, DAG.getSrcValue(0)); 698} 699 700static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 701 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 702 703 // If we're comparing for equality to zero, expose the fact that this is 704 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 705 // fold the new nodes. 706 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 707 if (C->isNullValue() && CC == ISD::SETEQ) { 708 MVT::ValueType VT = Op.getOperand(0).getValueType(); 709 SDOperand Zext = Op.getOperand(0); 710 if (VT < MVT::i32) { 711 VT = MVT::i32; 712 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 713 } 714 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 715 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 716 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 717 DAG.getConstant(Log2b, MVT::i32)); 718 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 719 } 720 // Leave comparisons against 0 and -1 alone for now, since they're usually 721 // optimized. FIXME: revisit this when we can custom lower all setcc 722 // optimizations. 723 if (C->isAllOnesValue() || C->isNullValue()) 724 return SDOperand(); 725 } 726 727 // If we have an integer seteq/setne, turn it into a compare against zero 728 // by subtracting the rhs from the lhs, which is faster than setting a 729 // condition register, reading it back out, and masking the correct bit. 730 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 731 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 732 MVT::ValueType VT = Op.getValueType(); 733 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0), 734 Op.getOperand(1)); 735 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 736 } 737 return SDOperand(); 738} 739 740static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, 741 unsigned VarArgsFrameIndex) { 742 // vastart just stores the address of the VarArgsFrameIndex slot into the 743 // memory location argument. 744 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 745 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 746 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR, 747 Op.getOperand(1), Op.getOperand(2)); 748} 749 750static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, 751 int &VarArgsFrameIndex) { 752 // TODO: add description of PPC stack frame format, or at least some docs. 753 // 754 MachineFunction &MF = DAG.getMachineFunction(); 755 MachineFrameInfo *MFI = MF.getFrameInfo(); 756 SSARegMap *RegMap = MF.getSSARegMap(); 757 std::vector<SDOperand> ArgValues; 758 SDOperand Root = Op.getOperand(0); 759 760 unsigned ArgOffset = 24; 761 const unsigned Num_GPR_Regs = 8; 762 const unsigned Num_FPR_Regs = 13; 763 const unsigned Num_VR_Regs = 12; 764 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 765 766 static const unsigned GPR_32[] = { // 32-bit registers. 767 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 768 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 769 }; 770 static const unsigned GPR_64[] = { // 64-bit registers. 771 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 772 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 773 }; 774 static const unsigned FPR[] = { 775 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 776 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 777 }; 778 static const unsigned VR[] = { 779 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 780 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 781 }; 782 783 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 784 bool isPPC64 = PtrVT == MVT::i64; 785 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 786 787 // Add DAG nodes to load the arguments or copy them out of registers. On 788 // entry to a function on PPC, the arguments start at offset 24, although the 789 // first ones are often in registers. 790 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { 791 SDOperand ArgVal; 792 bool needsLoad = false; 793 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 794 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 795 796 unsigned CurArgOffset = ArgOffset; 797 switch (ObjectVT) { 798 default: assert(0 && "Unhandled argument type!"); 799 case MVT::i32: 800 // All int arguments reserve stack space. 801 ArgOffset += isPPC64 ? 8 : 4; 802 803 if (GPR_idx != Num_GPR_Regs) { 804 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 805 MF.addLiveIn(GPR[GPR_idx], VReg); 806 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 807 ++GPR_idx; 808 } else { 809 needsLoad = true; 810 } 811 break; 812 case MVT::i64: // PPC64 813 // All int arguments reserve stack space. 814 ArgOffset += 8; 815 816 if (GPR_idx != Num_GPR_Regs) { 817 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 818 MF.addLiveIn(GPR[GPR_idx], VReg); 819 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 820 ++GPR_idx; 821 } else { 822 needsLoad = true; 823 } 824 break; 825 case MVT::f32: 826 case MVT::f64: 827 // All FP arguments reserve stack space. 828 ArgOffset += ObjSize; 829 830 // Every 4 bytes of argument space consumes one of the GPRs available for 831 // argument passing. 832 if (GPR_idx != Num_GPR_Regs) { 833 ++GPR_idx; 834 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs) 835 ++GPR_idx; 836 } 837 if (FPR_idx != Num_FPR_Regs) { 838 unsigned VReg; 839 if (ObjectVT == MVT::f32) 840 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); 841 else 842 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); 843 MF.addLiveIn(FPR[FPR_idx], VReg); 844 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 845 ++FPR_idx; 846 } else { 847 needsLoad = true; 848 } 849 break; 850 case MVT::v4f32: 851 case MVT::v4i32: 852 case MVT::v8i16: 853 case MVT::v16i8: 854 // Note that vector arguments in registers don't reserve stack space. 855 if (VR_idx != Num_VR_Regs) { 856 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass); 857 MF.addLiveIn(VR[VR_idx], VReg); 858 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 859 ++VR_idx; 860 } else { 861 // This should be simple, but requires getting 16-byte aligned stack 862 // values. 863 assert(0 && "Loading VR argument not implemented yet!"); 864 needsLoad = true; 865 } 866 break; 867 } 868 869 // We need to load the argument to a virtual register if we determined above 870 // that we ran out of physical registers of the appropriate type 871 if (needsLoad) { 872 // If the argument is actually used, emit a load from the right stack 873 // slot. 874 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) { 875 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); 876 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 877 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, 878 DAG.getSrcValue(NULL)); 879 } else { 880 // Don't emit a dead load. 881 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT); 882 } 883 } 884 885 ArgValues.push_back(ArgVal); 886 } 887 888 // If the function takes variable number of arguments, make a frame index for 889 // the start of the first vararg value... for expansion of llvm.va_start. 890 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 891 if (isVarArg) { 892 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 893 ArgOffset); 894 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 895 // If this function is vararg, store any remaining integer argument regs 896 // to their spots on the stack so that they may be loaded by deferencing the 897 // result of va_next. 898 std::vector<SDOperand> MemOps; 899 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 900 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 901 MF.addLiveIn(GPR[GPR_idx], VReg); 902 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 903 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1), 904 Val, FIN, DAG.getSrcValue(NULL)); 905 MemOps.push_back(Store); 906 // Increment the address by four for the next argument to store 907 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 908 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 909 } 910 if (!MemOps.empty()) 911 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps); 912 } 913 914 ArgValues.push_back(Root); 915 916 // Return the new list of results. 917 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 918 Op.Val->value_end()); 919 return DAG.getNode(ISD::MERGE_VALUES, RetVT, ArgValues); 920} 921 922/// isCallCompatibleAddress - Return the immediate to use if the specified 923/// 32-bit value is representable in the immediate field of a BxA instruction. 924static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { 925 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 926 if (!C) return 0; 927 928 int Addr = C->getValue(); 929 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 930 (Addr << 6 >> 6) != Addr) 931 return 0; // Top 6 bits have to be sext of immediate. 932 933 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val; 934} 935 936 937static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { 938 SDOperand Chain = Op.getOperand(0); 939 unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 940 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 941 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; 942 SDOperand Callee = Op.getOperand(4); 943 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 944 945 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 946 bool isPPC64 = PtrVT == MVT::i64; 947 unsigned PtrByteSize = isPPC64 ? 8 : 4; 948 949 950 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 951 // SelectExpr to use to put the arguments in the appropriate registers. 952 std::vector<SDOperand> args_to_use; 953 954 // Count how many bytes are to be pushed on the stack, including the linkage 955 // area, and parameter passing area. We start with 24/48 bytes, which is 956 // prereserved space for [SP][CR][LR][3 x unused]. 957 unsigned NumBytes = 6*PtrByteSize; 958 959 // Add up all the space actually used. 960 for (unsigned i = 0; i != NumOps; ++i) 961 NumBytes += MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8; 962 963 // The prolog code of the callee may store up to 8 GPR argument registers to 964 // the stack, allowing va_start to index over them in memory if its varargs. 965 // Because we cannot tell if this is needed on the caller side, we have to 966 // conservatively assume that it is needed. As such, make sure we have at 967 // least enough stack space for the caller to store the 8 GPRs. 968 if (NumBytes < 6*PtrByteSize+8*PtrByteSize) 969 NumBytes = 6*PtrByteSize+8*PtrByteSize; 970 971 // Adjust the stack pointer for the new arguments... 972 // These operations are automatically eliminated by the prolog/epilog pass 973 Chain = DAG.getCALLSEQ_START(Chain, 974 DAG.getConstant(NumBytes, PtrVT)); 975 976 // Set up a copy of the stack pointer for use loading and storing any 977 // arguments that may not fit in the registers available for argument 978 // passing. 979 SDOperand StackPtr; 980 if (isPPC64) 981 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 982 else 983 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 984 985 // Figure out which arguments are going to go in registers, and which in 986 // memory. Also, if this is a vararg function, floating point operations 987 // must be stored to our stack, and loaded into integer regs as well, if 988 // any integer regs are available for argument passing. 989 unsigned ArgOffset = 6*PtrByteSize; 990 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 991 static const unsigned GPR_32[] = { // 32-bit registers. 992 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 993 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 994 }; 995 static const unsigned GPR_64[] = { // 64-bit registers. 996 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 997 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 998 }; 999 static const unsigned FPR[] = { 1000 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1001 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1002 }; 1003 static const unsigned VR[] = { 1004 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1005 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1006 }; 1007 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1008 const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]); 1009 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]); 1010 1011 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1012 1013 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1014 std::vector<SDOperand> MemOpChains; 1015 for (unsigned i = 0; i != NumOps; ++i) { 1016 SDOperand Arg = Op.getOperand(5+2*i); 1017 1018 // PtrOff will be used to store the current argument to the stack if a 1019 // register cannot be found for it. 1020 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 1021 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 1022 1023 // On PPC64, promote integers to 64-bit values. 1024 if (isPPC64 && Arg.getValueType() == MVT::i32) { 1025 unsigned ExtOp = ISD::ZERO_EXTEND; 1026 if (cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue()) 1027 ExtOp = ISD::SIGN_EXTEND; 1028 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 1029 } 1030 1031 switch (Arg.getValueType()) { 1032 default: assert(0 && "Unexpected ValueType for argument!"); 1033 case MVT::i32: 1034 case MVT::i64: 1035 if (GPR_idx != NumGPRs) { 1036 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 1037 } else { 1038 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 1039 Arg, PtrOff, DAG.getSrcValue(NULL))); 1040 } 1041 ArgOffset += PtrByteSize; 1042 break; 1043 case MVT::f32: 1044 case MVT::f64: 1045 if (FPR_idx != NumFPRs) { 1046 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 1047 1048 if (isVarArg) { 1049 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain, 1050 Arg, PtrOff, 1051 DAG.getSrcValue(NULL)); 1052 MemOpChains.push_back(Store); 1053 1054 // Float varargs are always shadowed in available integer registers 1055 if (GPR_idx != NumGPRs) { 1056 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, 1057 DAG.getSrcValue(NULL)); 1058 MemOpChains.push_back(Load.getValue(1)); 1059 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1060 } 1061 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64) { 1062 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 1063 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 1064 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, 1065 DAG.getSrcValue(NULL)); 1066 MemOpChains.push_back(Load.getValue(1)); 1067 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1068 } 1069 } else { 1070 // If we have any FPRs remaining, we may also have GPRs remaining. 1071 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 1072 // GPRs. 1073 if (GPR_idx != NumGPRs) 1074 ++GPR_idx; 1075 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64) 1076 ++GPR_idx; 1077 } 1078 } else { 1079 MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain, 1080 Arg, PtrOff, DAG.getSrcValue(NULL))); 1081 } 1082 if (isPPC64) 1083 ArgOffset += 8; 1084 else 1085 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 1086 break; 1087 case MVT::v4f32: 1088 case MVT::v4i32: 1089 case MVT::v8i16: 1090 case MVT::v16i8: 1091 assert(!isVarArg && "Don't support passing vectors to varargs yet!"); 1092 assert(VR_idx != NumVRs && 1093 "Don't support passing more than 12 vector args yet!"); 1094 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 1095 break; 1096 } 1097 } 1098 if (!MemOpChains.empty()) 1099 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOpChains); 1100 1101 // Build a sequence of copy-to-reg nodes chained together with token chain 1102 // and flag operands which copy the outgoing args into the appropriate regs. 1103 SDOperand InFlag; 1104 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1105 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1106 InFlag); 1107 InFlag = Chain.getValue(1); 1108 } 1109 1110 std::vector<MVT::ValueType> NodeTys; 1111 NodeTys.push_back(MVT::Other); // Returns a chain 1112 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1113 1114 std::vector<SDOperand> Ops; 1115 unsigned CallOpc = PPCISD::CALL; 1116 1117 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1118 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1119 // node so that legalize doesn't hack it. 1120 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1121 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 1122 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1123 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 1124 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 1125 // If this is an absolute destination address, use the munged value. 1126 Callee = SDOperand(Dest, 0); 1127 else { 1128 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 1129 // to do the call, we can't use PPCISD::CALL. 1130 Ops.push_back(Chain); 1131 Ops.push_back(Callee); 1132 1133 if (InFlag.Val) 1134 Ops.push_back(InFlag); 1135 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, Ops); 1136 InFlag = Chain.getValue(1); 1137 1138 // Copy the callee address into R12 on darwin. 1139 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag); 1140 InFlag = Chain.getValue(1); 1141 1142 NodeTys.clear(); 1143 NodeTys.push_back(MVT::Other); 1144 NodeTys.push_back(MVT::Flag); 1145 Ops.clear(); 1146 Ops.push_back(Chain); 1147 CallOpc = PPCISD::BCTRL; 1148 Callee.Val = 0; 1149 } 1150 1151 // If this is a direct call, pass the chain and the callee. 1152 if (Callee.Val) { 1153 Ops.push_back(Chain); 1154 Ops.push_back(Callee); 1155 } 1156 1157 // Add argument registers to the end of the list so that they are known live 1158 // into the call. 1159 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1160 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1161 RegsToPass[i].second.getValueType())); 1162 1163 if (InFlag.Val) 1164 Ops.push_back(InFlag); 1165 Chain = DAG.getNode(CallOpc, NodeTys, Ops); 1166 InFlag = Chain.getValue(1); 1167 1168 std::vector<SDOperand> ResultVals; 1169 NodeTys.clear(); 1170 1171 // If the call has results, copy the values out of the ret val registers. 1172 switch (Op.Val->getValueType(0)) { 1173 default: assert(0 && "Unexpected ret value!"); 1174 case MVT::Other: break; 1175 case MVT::i32: 1176 if (Op.Val->getValueType(1) == MVT::i32) { 1177 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1); 1178 ResultVals.push_back(Chain.getValue(0)); 1179 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, 1180 Chain.getValue(2)).getValue(1); 1181 ResultVals.push_back(Chain.getValue(0)); 1182 NodeTys.push_back(MVT::i32); 1183 } else { 1184 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 1185 ResultVals.push_back(Chain.getValue(0)); 1186 } 1187 NodeTys.push_back(MVT::i32); 1188 break; 1189 case MVT::i64: 1190 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 1191 ResultVals.push_back(Chain.getValue(0)); 1192 NodeTys.push_back(MVT::i64); 1193 break; 1194 case MVT::f32: 1195 case MVT::f64: 1196 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0), 1197 InFlag).getValue(1); 1198 ResultVals.push_back(Chain.getValue(0)); 1199 NodeTys.push_back(Op.Val->getValueType(0)); 1200 break; 1201 case MVT::v4f32: 1202 case MVT::v4i32: 1203 case MVT::v8i16: 1204 case MVT::v16i8: 1205 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0), 1206 InFlag).getValue(1); 1207 ResultVals.push_back(Chain.getValue(0)); 1208 NodeTys.push_back(Op.Val->getValueType(0)); 1209 break; 1210 } 1211 1212 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, 1213 DAG.getConstant(NumBytes, PtrVT)); 1214 NodeTys.push_back(MVT::Other); 1215 1216 // If the function returns void, just return the chain. 1217 if (ResultVals.empty()) 1218 return Chain; 1219 1220 // Otherwise, merge everything together with a MERGE_VALUES node. 1221 ResultVals.push_back(Chain); 1222 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, ResultVals); 1223 return Res.getValue(Op.ResNo); 1224} 1225 1226static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { 1227 SDOperand Copy; 1228 switch(Op.getNumOperands()) { 1229 default: 1230 assert(0 && "Do not know how to return this many arguments!"); 1231 abort(); 1232 case 1: 1233 return SDOperand(); // ret void is legal 1234 case 3: { 1235 MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); 1236 unsigned ArgReg; 1237 if (ArgVT == MVT::i32) { 1238 ArgReg = PPC::R3; 1239 } else if (ArgVT == MVT::i64) { 1240 ArgReg = PPC::X3; 1241 } else if (MVT::isVector(ArgVT)) { 1242 ArgReg = PPC::V2; 1243 } else { 1244 assert(MVT::isFloatingPoint(ArgVT)); 1245 ArgReg = PPC::F1; 1246 } 1247 1248 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), 1249 SDOperand()); 1250 1251 // If we haven't noted the R3/F1 are live out, do so now. 1252 if (DAG.getMachineFunction().liveout_empty()) 1253 DAG.getMachineFunction().addLiveOut(ArgReg); 1254 break; 1255 } 1256 case 5: 1257 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3), 1258 SDOperand()); 1259 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1)); 1260 // If we haven't noted the R3+R4 are live out, do so now. 1261 if (DAG.getMachineFunction().liveout_empty()) { 1262 DAG.getMachineFunction().addLiveOut(PPC::R3); 1263 DAG.getMachineFunction().addLiveOut(PPC::R4); 1264 } 1265 break; 1266 } 1267 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); 1268} 1269 1270/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 1271/// possible. 1272static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { 1273 // Not FP? Not a fsel. 1274 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 1275 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 1276 return SDOperand(); 1277 1278 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1279 1280 // Cannot handle SETEQ/SETNE. 1281 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); 1282 1283 MVT::ValueType ResVT = Op.getValueType(); 1284 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 1285 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 1286 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 1287 1288 // If the RHS of the comparison is a 0.0, we don't need to do the 1289 // subtraction at all. 1290 if (isFloatingPointZero(RHS)) 1291 switch (CC) { 1292 default: break; // SETUO etc aren't handled by fsel. 1293 case ISD::SETULT: 1294 case ISD::SETOLT: 1295 case ISD::SETLT: 1296 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1297 case ISD::SETUGE: 1298 case ISD::SETOGE: 1299 case ISD::SETGE: 1300 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1301 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1302 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 1303 case ISD::SETUGT: 1304 case ISD::SETOGT: 1305 case ISD::SETGT: 1306 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1307 case ISD::SETULE: 1308 case ISD::SETOLE: 1309 case ISD::SETLE: 1310 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1311 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1312 return DAG.getNode(PPCISD::FSEL, ResVT, 1313 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 1314 } 1315 1316 SDOperand Cmp; 1317 switch (CC) { 1318 default: break; // SETUO etc aren't handled by fsel. 1319 case ISD::SETULT: 1320 case ISD::SETOLT: 1321 case ISD::SETLT: 1322 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1323 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1324 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1325 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1326 case ISD::SETUGE: 1327 case ISD::SETOGE: 1328 case ISD::SETGE: 1329 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1330 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1331 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1332 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1333 case ISD::SETUGT: 1334 case ISD::SETOGT: 1335 case ISD::SETGT: 1336 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1337 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1338 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1339 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1340 case ISD::SETULE: 1341 case ISD::SETOLE: 1342 case ISD::SETLE: 1343 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1344 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1345 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1346 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1347 } 1348 return SDOperand(); 1349} 1350 1351static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 1352 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 1353 SDOperand Src = Op.getOperand(0); 1354 if (Src.getValueType() == MVT::f32) 1355 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 1356 1357 SDOperand Tmp; 1358 switch (Op.getValueType()) { 1359 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 1360 case MVT::i32: 1361 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 1362 break; 1363 case MVT::i64: 1364 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 1365 break; 1366 } 1367 1368 // Convert the FP value to an int value through memory. 1369 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); 1370 if (Op.getValueType() == MVT::i32) 1371 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); 1372 return Bits; 1373} 1374 1375static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 1376 if (Op.getOperand(0).getValueType() == MVT::i64) { 1377 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 1378 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 1379 if (Op.getValueType() == MVT::f32) 1380 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 1381 return FP; 1382 } 1383 1384 assert(Op.getOperand(0).getValueType() == MVT::i32 && 1385 "Unhandled SINT_TO_FP type in custom expander!"); 1386 // Since we only generate this in 64-bit mode, we can take advantage of 1387 // 64-bit registers. In particular, sign extend the input value into the 1388 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 1389 // then lfd it and fcfid it. 1390 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 1391 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 1392 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1393 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 1394 1395 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 1396 Op.getOperand(0)); 1397 1398 // STD the extended value into the stack slot. 1399 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 1400 DAG.getEntryNode(), Ext64, FIdx, 1401 DAG.getSrcValue(NULL)); 1402 // Load the value as a double. 1403 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL)); 1404 1405 // FCFID it and return it. 1406 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 1407 if (Op.getValueType() == MVT::f32) 1408 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 1409 return FP; 1410} 1411 1412static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG, 1413 MVT::ValueType PtrVT) { 1414 assert(Op.getValueType() == MVT::i64 && 1415 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 1416 // The generic code does a fine job expanding shift by a constant. 1417 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); 1418 1419 // Otherwise, expand into a bunch of logical ops. Note that these ops 1420 // depend on the PPC behavior for oversized shift amounts. 1421 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1422 DAG.getConstant(0, PtrVT)); 1423 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1424 DAG.getConstant(1, PtrVT)); 1425 SDOperand Amt = Op.getOperand(1); 1426 1427 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1428 DAG.getConstant(32, MVT::i32), Amt); 1429 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); 1430 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); 1431 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1432 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1433 DAG.getConstant(-32U, MVT::i32)); 1434 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); 1435 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 1436 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); 1437 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); 1438} 1439 1440static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG, 1441 MVT::ValueType PtrVT) { 1442 assert(Op.getValueType() == MVT::i64 && 1443 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 1444 // The generic code does a fine job expanding shift by a constant. 1445 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); 1446 1447 // Otherwise, expand into a bunch of logical ops. Note that these ops 1448 // depend on the PPC behavior for oversized shift amounts. 1449 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1450 DAG.getConstant(0, PtrVT)); 1451 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1452 DAG.getConstant(1, PtrVT)); 1453 SDOperand Amt = Op.getOperand(1); 1454 1455 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1456 DAG.getConstant(32, MVT::i32), Amt); 1457 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 1458 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 1459 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1460 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1461 DAG.getConstant(-32U, MVT::i32)); 1462 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); 1463 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 1464 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); 1465 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); 1466} 1467 1468static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG, 1469 MVT::ValueType PtrVT) { 1470 assert(Op.getValueType() == MVT::i64 && 1471 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); 1472 // The generic code does a fine job expanding shift by a constant. 1473 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand(); 1474 1475 // Otherwise, expand into a bunch of logical ops, followed by a select_cc. 1476 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1477 DAG.getConstant(0, PtrVT)); 1478 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1479 DAG.getConstant(1, PtrVT)); 1480 SDOperand Amt = Op.getOperand(1); 1481 1482 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1483 DAG.getConstant(32, MVT::i32), Amt); 1484 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 1485 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 1486 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1487 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1488 DAG.getConstant(-32U, MVT::i32)); 1489 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); 1490 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); 1491 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), 1492 Tmp4, Tmp6, ISD::SETLE); 1493 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi); 1494} 1495 1496//===----------------------------------------------------------------------===// 1497// Vector related lowering. 1498// 1499 1500// If this is a vector of constants or undefs, get the bits. A bit in 1501// UndefBits is set if the corresponding element of the vector is an 1502// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 1503// zero. Return true if this is not an array of constants, false if it is. 1504// 1505static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 1506 uint64_t UndefBits[2]) { 1507 // Start with zero'd results. 1508 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 1509 1510 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); 1511 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 1512 SDOperand OpVal = BV->getOperand(i); 1513 1514 unsigned PartNo = i >= e/2; // In the upper 128 bits? 1515 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 1516 1517 uint64_t EltBits = 0; 1518 if (OpVal.getOpcode() == ISD::UNDEF) { 1519 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 1520 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 1521 continue; 1522 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1523 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 1524 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1525 assert(CN->getValueType(0) == MVT::f32 && 1526 "Only one legal FP vector type!"); 1527 EltBits = FloatToBits(CN->getValue()); 1528 } else { 1529 // Nonconstant element. 1530 return true; 1531 } 1532 1533 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 1534 } 1535 1536 //printf("%llx %llx %llx %llx\n", 1537 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 1538 return false; 1539} 1540 1541// If this is a splat (repetition) of a value across the whole vector, return 1542// the smallest size that splats it. For example, "0x01010101010101..." is a 1543// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 1544// SplatSize = 1 byte. 1545static bool isConstantSplat(const uint64_t Bits128[2], 1546 const uint64_t Undef128[2], 1547 unsigned &SplatBits, unsigned &SplatUndef, 1548 unsigned &SplatSize) { 1549 1550 // Don't let undefs prevent splats from matching. See if the top 64-bits are 1551 // the same as the lower 64-bits, ignoring undefs. 1552 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 1553 return false; // Can't be a splat if two pieces don't match. 1554 1555 uint64_t Bits64 = Bits128[0] | Bits128[1]; 1556 uint64_t Undef64 = Undef128[0] & Undef128[1]; 1557 1558 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 1559 // undefs. 1560 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 1561 return false; // Can't be a splat if two pieces don't match. 1562 1563 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 1564 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 1565 1566 // If the top 16-bits are different than the lower 16-bits, ignoring 1567 // undefs, we have an i32 splat. 1568 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 1569 SplatBits = Bits32; 1570 SplatUndef = Undef32; 1571 SplatSize = 4; 1572 return true; 1573 } 1574 1575 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 1576 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 1577 1578 // If the top 8-bits are different than the lower 8-bits, ignoring 1579 // undefs, we have an i16 splat. 1580 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 1581 SplatBits = Bits16; 1582 SplatUndef = Undef16; 1583 SplatSize = 2; 1584 return true; 1585 } 1586 1587 // Otherwise, we have an 8-bit splat. 1588 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 1589 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 1590 SplatSize = 1; 1591 return true; 1592} 1593 1594/// BuildSplatI - Build a canonical splati of Val with an element size of 1595/// SplatSize. Cast the result to VT. 1596static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, 1597 SelectionDAG &DAG) { 1598 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 1599 1600 // Force vspltis[hw] -1 to vspltisb -1. 1601 if (Val == -1) SplatSize = 1; 1602 1603 static const MVT::ValueType VTys[] = { // canonical VT to use for each size. 1604 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 1605 }; 1606 MVT::ValueType CanonicalVT = VTys[SplatSize-1]; 1607 1608 // Build a canonical splat for this value. 1609 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT)); 1610 std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt); 1611 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops); 1612 return DAG.getNode(ISD::BIT_CONVERT, VT, Res); 1613} 1614 1615/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 1616/// specified intrinsic ID. 1617static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, 1618 SelectionDAG &DAG, 1619 MVT::ValueType DestVT = MVT::Other) { 1620 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 1621 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 1622 DAG.getConstant(IID, MVT::i32), LHS, RHS); 1623} 1624 1625/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 1626/// specified intrinsic ID. 1627static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, 1628 SDOperand Op2, SelectionDAG &DAG, 1629 MVT::ValueType DestVT = MVT::Other) { 1630 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 1631 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 1632 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 1633} 1634 1635 1636/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 1637/// amount. The result has the specified value type. 1638static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, 1639 MVT::ValueType VT, SelectionDAG &DAG) { 1640 // Force LHS/RHS to be the right type. 1641 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 1642 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 1643 1644 std::vector<SDOperand> Ops; 1645 for (unsigned i = 0; i != 16; ++i) 1646 Ops.push_back(DAG.getConstant(i+Amt, MVT::i32)); 1647 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 1648 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops)); 1649 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 1650} 1651 1652// If this is a case we can't handle, return null and let the default 1653// expansion code take care of it. If we CAN select this case, and if it 1654// selects to a single instruction, return Op. Otherwise, if we can codegen 1655// this case more efficiently than a constant pool load, lower it to the 1656// sequence of ops that should be used. 1657static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 1658 // If this is a vector of constants or undefs, get the bits. A bit in 1659 // UndefBits is set if the corresponding element of the vector is an 1660 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 1661 // zero. 1662 uint64_t VectorBits[2]; 1663 uint64_t UndefBits[2]; 1664 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) 1665 return SDOperand(); // Not a constant vector. 1666 1667 // If this is a splat (repetition) of a value across the whole vector, return 1668 // the smallest size that splats it. For example, "0x01010101010101..." is a 1669 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 1670 // SplatSize = 1 byte. 1671 unsigned SplatBits, SplatUndef, SplatSize; 1672 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 1673 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 1674 1675 // First, handle single instruction cases. 1676 1677 // All zeros? 1678 if (SplatBits == 0) { 1679 // Canonicalize all zero vectors to be v4i32. 1680 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 1681 SDOperand Z = DAG.getConstant(0, MVT::i32); 1682 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 1683 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 1684 } 1685 return Op; 1686 } 1687 1688 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 1689 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 1690 if (SextVal >= -16 && SextVal <= 15) 1691 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 1692 1693 1694 // Two instruction sequences. 1695 1696 // If this value is in the range [-32,30] and is even, use: 1697 // tmp = VSPLTI[bhw], result = add tmp, tmp 1698 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 1699 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); 1700 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); 1701 } 1702 1703 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 1704 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 1705 // for fneg/fabs. 1706 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 1707 // Make -1 and vspltisw -1: 1708 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 1709 1710 // Make the VSLW intrinsic, computing 0x8000_0000. 1711 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 1712 OnesV, DAG); 1713 1714 // xor by OnesV to invert it. 1715 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 1716 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 1717 } 1718 1719 // Check to see if this is a wide variety of vsplti*, binop self cases. 1720 unsigned SplatBitSize = SplatSize*8; 1721 static const char SplatCsts[] = { 1722 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 1723 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 1724 }; 1725 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){ 1726 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 1727 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 1728 int i = SplatCsts[idx]; 1729 1730 // Figure out what shift amount will be used by altivec if shifted by i in 1731 // this splat size. 1732 unsigned TypeShiftAmt = i & (SplatBitSize-1); 1733 1734 // vsplti + shl self. 1735 if (SextVal == (i << (int)TypeShiftAmt)) { 1736 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 1737 static const unsigned IIDs[] = { // Intrinsic to use for each size. 1738 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 1739 Intrinsic::ppc_altivec_vslw 1740 }; 1741 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 1742 } 1743 1744 // vsplti + srl self. 1745 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 1746 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 1747 static const unsigned IIDs[] = { // Intrinsic to use for each size. 1748 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 1749 Intrinsic::ppc_altivec_vsrw 1750 }; 1751 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 1752 } 1753 1754 // vsplti + sra self. 1755 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 1756 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 1757 static const unsigned IIDs[] = { // Intrinsic to use for each size. 1758 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 1759 Intrinsic::ppc_altivec_vsraw 1760 }; 1761 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 1762 } 1763 1764 // vsplti + rol self. 1765 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 1766 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 1767 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 1768 static const unsigned IIDs[] = { // Intrinsic to use for each size. 1769 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 1770 Intrinsic::ppc_altivec_vrlw 1771 }; 1772 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 1773 } 1774 1775 // t = vsplti c, result = vsldoi t, t, 1 1776 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 1777 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 1778 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 1779 } 1780 // t = vsplti c, result = vsldoi t, t, 2 1781 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 1782 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 1783 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 1784 } 1785 // t = vsplti c, result = vsldoi t, t, 3 1786 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 1787 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 1788 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 1789 } 1790 } 1791 1792 // Three instruction sequences. 1793 1794 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 1795 if (SextVal >= 0 && SextVal <= 31) { 1796 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG); 1797 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG); 1798 return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS); 1799 } 1800 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 1801 if (SextVal >= -31 && SextVal <= 0) { 1802 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG); 1803 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG); 1804 return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS); 1805 } 1806 } 1807 1808 return SDOperand(); 1809} 1810 1811/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 1812/// the specified operations to build the shuffle. 1813static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, 1814 SDOperand RHS, SelectionDAG &DAG) { 1815 unsigned OpNum = (PFEntry >> 26) & 0x0F; 1816 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 1817 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 1818 1819 enum { 1820 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 1821 OP_VMRGHW, 1822 OP_VMRGLW, 1823 OP_VSPLTISW0, 1824 OP_VSPLTISW1, 1825 OP_VSPLTISW2, 1826 OP_VSPLTISW3, 1827 OP_VSLDOI4, 1828 OP_VSLDOI8, 1829 OP_VSLDOI12 1830 }; 1831 1832 if (OpNum == OP_COPY) { 1833 if (LHSID == (1*9+2)*9+3) return LHS; 1834 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 1835 return RHS; 1836 } 1837 1838 SDOperand OpLHS, OpRHS; 1839 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 1840 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 1841 1842 unsigned ShufIdxs[16]; 1843 switch (OpNum) { 1844 default: assert(0 && "Unknown i32 permute!"); 1845 case OP_VMRGHW: 1846 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 1847 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 1848 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 1849 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 1850 break; 1851 case OP_VMRGLW: 1852 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 1853 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 1854 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 1855 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 1856 break; 1857 case OP_VSPLTISW0: 1858 for (unsigned i = 0; i != 16; ++i) 1859 ShufIdxs[i] = (i&3)+0; 1860 break; 1861 case OP_VSPLTISW1: 1862 for (unsigned i = 0; i != 16; ++i) 1863 ShufIdxs[i] = (i&3)+4; 1864 break; 1865 case OP_VSPLTISW2: 1866 for (unsigned i = 0; i != 16; ++i) 1867 ShufIdxs[i] = (i&3)+8; 1868 break; 1869 case OP_VSPLTISW3: 1870 for (unsigned i = 0; i != 16; ++i) 1871 ShufIdxs[i] = (i&3)+12; 1872 break; 1873 case OP_VSLDOI4: 1874 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 1875 case OP_VSLDOI8: 1876 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 1877 case OP_VSLDOI12: 1878 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 1879 } 1880 std::vector<SDOperand> Ops; 1881 for (unsigned i = 0; i != 16; ++i) 1882 Ops.push_back(DAG.getConstant(ShufIdxs[i], MVT::i32)); 1883 1884 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 1885 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops)); 1886} 1887 1888/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 1889/// is a shuffle we can handle in a single instruction, return it. Otherwise, 1890/// return the code it can be lowered into. Worst case, it can always be 1891/// lowered into a vperm. 1892static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 1893 SDOperand V1 = Op.getOperand(0); 1894 SDOperand V2 = Op.getOperand(1); 1895 SDOperand PermMask = Op.getOperand(2); 1896 1897 // Cases that are handled by instructions that take permute immediates 1898 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 1899 // selected by the instruction selector. 1900 if (V2.getOpcode() == ISD::UNDEF) { 1901 if (PPC::isSplatShuffleMask(PermMask.Val, 1) || 1902 PPC::isSplatShuffleMask(PermMask.Val, 2) || 1903 PPC::isSplatShuffleMask(PermMask.Val, 4) || 1904 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || 1905 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || 1906 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || 1907 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || 1908 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || 1909 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || 1910 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || 1911 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || 1912 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { 1913 return Op; 1914 } 1915 } 1916 1917 // Altivec has a variety of "shuffle immediates" that take two vector inputs 1918 // and produce a fixed permutation. If any of these match, do not lower to 1919 // VPERM. 1920 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || 1921 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || 1922 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || 1923 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || 1924 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || 1925 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || 1926 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || 1927 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || 1928 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) 1929 return Op; 1930 1931 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 1932 // perfect shuffle table to emit an optimal matching sequence. 1933 unsigned PFIndexes[4]; 1934 bool isFourElementShuffle = true; 1935 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 1936 unsigned EltNo = 8; // Start out undef. 1937 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 1938 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 1939 continue; // Undef, ignore it. 1940 1941 unsigned ByteSource = 1942 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 1943 if ((ByteSource & 3) != j) { 1944 isFourElementShuffle = false; 1945 break; 1946 } 1947 1948 if (EltNo == 8) { 1949 EltNo = ByteSource/4; 1950 } else if (EltNo != ByteSource/4) { 1951 isFourElementShuffle = false; 1952 break; 1953 } 1954 } 1955 PFIndexes[i] = EltNo; 1956 } 1957 1958 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 1959 // perfect shuffle vector to determine if it is cost effective to do this as 1960 // discrete instructions, or whether we should use a vperm. 1961 if (isFourElementShuffle) { 1962 // Compute the index in the perfect shuffle table. 1963 unsigned PFTableIndex = 1964 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 1965 1966 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 1967 unsigned Cost = (PFEntry >> 30); 1968 1969 // Determining when to avoid vperm is tricky. Many things affect the cost 1970 // of vperm, particularly how many times the perm mask needs to be computed. 1971 // For example, if the perm mask can be hoisted out of a loop or is already 1972 // used (perhaps because there are multiple permutes with the same shuffle 1973 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 1974 // the loop requires an extra register. 1975 // 1976 // As a compromise, we only emit discrete instructions if the shuffle can be 1977 // generated in 3 or fewer operations. When we have loop information 1978 // available, if this block is within a loop, we should avoid using vperm 1979 // for 3-operation perms and use a constant pool load instead. 1980 if (Cost < 3) 1981 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 1982 } 1983 1984 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 1985 // vector that will get spilled to the constant pool. 1986 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 1987 1988 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 1989 // that it is in input element units, not in bytes. Convert now. 1990 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); 1991 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 1992 1993 std::vector<SDOperand> ResultMask; 1994 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 1995 unsigned SrcElt; 1996 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 1997 SrcElt = 0; 1998 else 1999 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 2000 2001 for (unsigned j = 0; j != BytesPerElement; ++j) 2002 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 2003 MVT::i8)); 2004 } 2005 2006 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask); 2007 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 2008} 2009 2010/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 2011/// altivec comparison. If it is, return true and fill in Opc/isDot with 2012/// information about the intrinsic. 2013static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, 2014 bool &isDot) { 2015 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 2016 CompareOpc = -1; 2017 isDot = false; 2018 switch (IntrinsicID) { 2019 default: return false; 2020 // Comparison predicates. 2021 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 2022 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 2023 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 2024 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 2025 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 2026 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 2027 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 2028 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 2029 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 2030 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 2031 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 2032 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 2033 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 2034 2035 // Normal Comparisons. 2036 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 2037 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 2038 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 2039 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 2040 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 2041 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 2042 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 2043 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 2044 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 2045 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 2046 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 2047 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 2048 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 2049 } 2050 return true; 2051} 2052 2053/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 2054/// lower, do it, otherwise return null. 2055static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 2056 // If this is a lowered altivec predicate compare, CompareOpc is set to the 2057 // opcode number of the comparison. 2058 int CompareOpc; 2059 bool isDot; 2060 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 2061 return SDOperand(); // Don't custom lower most intrinsics. 2062 2063 // If this is a non-dot comparison, make the VCMP node and we are done. 2064 if (!isDot) { 2065 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 2066 Op.getOperand(1), Op.getOperand(2), 2067 DAG.getConstant(CompareOpc, MVT::i32)); 2068 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 2069 } 2070 2071 // Create the PPCISD altivec 'dot' comparison node. 2072 std::vector<SDOperand> Ops; 2073 std::vector<MVT::ValueType> VTs; 2074 Ops.push_back(Op.getOperand(2)); // LHS 2075 Ops.push_back(Op.getOperand(3)); // RHS 2076 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32)); 2077 VTs.push_back(Op.getOperand(2).getValueType()); 2078 VTs.push_back(MVT::Flag); 2079 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops); 2080 2081 // Now that we have the comparison, emit a copy from the CR to a GPR. 2082 // This is flagged to the above dot comparison. 2083 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 2084 DAG.getRegister(PPC::CR6, MVT::i32), 2085 CompNode.getValue(1)); 2086 2087 // Unpack the result based on how the target uses it. 2088 unsigned BitNo; // Bit # of CR6. 2089 bool InvertBit; // Invert result? 2090 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 2091 default: // Can't happen, don't crash on invalid number though. 2092 case 0: // Return the value of the EQ bit of CR6. 2093 BitNo = 0; InvertBit = false; 2094 break; 2095 case 1: // Return the inverted value of the EQ bit of CR6. 2096 BitNo = 0; InvertBit = true; 2097 break; 2098 case 2: // Return the value of the LT bit of CR6. 2099 BitNo = 2; InvertBit = false; 2100 break; 2101 case 3: // Return the inverted value of the LT bit of CR6. 2102 BitNo = 2; InvertBit = true; 2103 break; 2104 } 2105 2106 // Shift the bit into the low position. 2107 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 2108 DAG.getConstant(8-(3-BitNo), MVT::i32)); 2109 // Isolate the bit. 2110 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 2111 DAG.getConstant(1, MVT::i32)); 2112 2113 // If we are supposed to, toggle the bit. 2114 if (InvertBit) 2115 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 2116 DAG.getConstant(1, MVT::i32)); 2117 return Flags; 2118} 2119 2120static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2121 // Create a stack slot that is 16-byte aligned. 2122 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2123 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 2124 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2125 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2126 2127 // Store the input value into Value#0 of the stack slot. 2128 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(), 2129 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL)); 2130 // Load it out. 2131 return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL)); 2132} 2133 2134static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) { 2135 if (Op.getValueType() == MVT::v4i32) { 2136 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2137 2138 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 2139 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 2140 2141 SDOperand RHSSwap = // = vrlw RHS, 16 2142 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 2143 2144 // Shrinkify inputs to v8i16. 2145 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 2146 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 2147 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 2148 2149 // Low parts multiplied together, generating 32-bit results (we ignore the 2150 // top parts). 2151 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 2152 LHS, RHS, DAG, MVT::v4i32); 2153 2154 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 2155 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 2156 // Shift the high parts up 16 bits. 2157 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 2158 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 2159 } else if (Op.getValueType() == MVT::v8i16) { 2160 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2161 2162 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 2163 2164 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 2165 LHS, RHS, Zero, DAG); 2166 } else if (Op.getValueType() == MVT::v16i8) { 2167 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2168 2169 // Multiply the even 8-bit parts, producing 16-bit sums. 2170 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 2171 LHS, RHS, DAG, MVT::v8i16); 2172 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 2173 2174 // Multiply the odd 8-bit parts, producing 16-bit sums. 2175 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 2176 LHS, RHS, DAG, MVT::v8i16); 2177 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 2178 2179 // Merge the results together. 2180 std::vector<SDOperand> Ops; 2181 for (unsigned i = 0; i != 8; ++i) { 2182 Ops.push_back(DAG.getConstant(2*i+1, MVT::i8)); 2183 Ops.push_back(DAG.getConstant(2*i+1+16, MVT::i8)); 2184 } 2185 2186 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 2187 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops)); 2188 } else { 2189 assert(0 && "Unknown mul to lower!"); 2190 abort(); 2191 } 2192} 2193 2194/// LowerOperation - Provide custom lowering hooks for some operations. 2195/// 2196SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2197 switch (Op.getOpcode()) { 2198 default: assert(0 && "Wasn't expecting to be able to lower this!"); 2199 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2200 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2201 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2202 case ISD::SETCC: return LowerSETCC(Op, DAG); 2203 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex); 2204 case ISD::FORMAL_ARGUMENTS: 2205 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex); 2206 case ISD::CALL: return LowerCALL(Op, DAG); 2207 case ISD::RET: return LowerRET(Op, DAG); 2208 2209 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2210 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 2211 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 2212 2213 // Lower 64-bit shifts. 2214 case ISD::SHL: return LowerSHL(Op, DAG, getPointerTy()); 2215 case ISD::SRL: return LowerSRL(Op, DAG, getPointerTy()); 2216 case ISD::SRA: return LowerSRA(Op, DAG, getPointerTy()); 2217 2218 // Vector-related lowering. 2219 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 2220 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2221 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2222 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 2223 case ISD::MUL: return LowerMUL(Op, DAG); 2224 } 2225 return SDOperand(); 2226} 2227 2228//===----------------------------------------------------------------------===// 2229// Other Lowering Code 2230//===----------------------------------------------------------------------===// 2231 2232MachineBasicBlock * 2233PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2234 MachineBasicBlock *BB) { 2235 assert((MI->getOpcode() == PPC::SELECT_CC_I4 || 2236 MI->getOpcode() == PPC::SELECT_CC_I8 || 2237 MI->getOpcode() == PPC::SELECT_CC_F4 || 2238 MI->getOpcode() == PPC::SELECT_CC_F8 || 2239 MI->getOpcode() == PPC::SELECT_CC_VRRC) && 2240 "Unexpected instr type to insert"); 2241 2242 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2243 // control-flow pattern. The incoming instruction knows the destination vreg 2244 // to set, the condition code register to branch on, the true/false values to 2245 // select between, and a branch opcode to use. 2246 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2247 ilist<MachineBasicBlock>::iterator It = BB; 2248 ++It; 2249 2250 // thisMBB: 2251 // ... 2252 // TrueVal = ... 2253 // cmpTY ccX, r1, r2 2254 // bCC copy1MBB 2255 // fallthrough --> copy0MBB 2256 MachineBasicBlock *thisMBB = BB; 2257 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 2258 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 2259 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2) 2260 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 2261 MachineFunction *F = BB->getParent(); 2262 F->getBasicBlockList().insert(It, copy0MBB); 2263 F->getBasicBlockList().insert(It, sinkMBB); 2264 // Update machine-CFG edges by first adding all successors of the current 2265 // block to the new block which will contain the Phi node for the select. 2266 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 2267 e = BB->succ_end(); i != e; ++i) 2268 sinkMBB->addSuccessor(*i); 2269 // Next, remove all successors of the current block, and add the true 2270 // and fallthrough blocks as its successors. 2271 while(!BB->succ_empty()) 2272 BB->removeSuccessor(BB->succ_begin()); 2273 BB->addSuccessor(copy0MBB); 2274 BB->addSuccessor(sinkMBB); 2275 2276 // copy0MBB: 2277 // %FalseValue = ... 2278 // # fallthrough to sinkMBB 2279 BB = copy0MBB; 2280 2281 // Update machine-CFG edges 2282 BB->addSuccessor(sinkMBB); 2283 2284 // sinkMBB: 2285 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 2286 // ... 2287 BB = sinkMBB; 2288 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg()) 2289 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 2290 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 2291 2292 delete MI; // The pseudo instruction is gone now. 2293 return BB; 2294} 2295 2296//===----------------------------------------------------------------------===// 2297// Target Optimization Hooks 2298//===----------------------------------------------------------------------===// 2299 2300SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 2301 DAGCombinerInfo &DCI) const { 2302 TargetMachine &TM = getTargetMachine(); 2303 SelectionDAG &DAG = DCI.DAG; 2304 switch (N->getOpcode()) { 2305 default: break; 2306 case ISD::SINT_TO_FP: 2307 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 2308 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 2309 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 2310 // We allow the src/dst to be either f32/f64, but the intermediate 2311 // type must be i64. 2312 if (N->getOperand(0).getValueType() == MVT::i64) { 2313 SDOperand Val = N->getOperand(0).getOperand(0); 2314 if (Val.getValueType() == MVT::f32) { 2315 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 2316 DCI.AddToWorklist(Val.Val); 2317 } 2318 2319 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 2320 DCI.AddToWorklist(Val.Val); 2321 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 2322 DCI.AddToWorklist(Val.Val); 2323 if (N->getValueType(0) == MVT::f32) { 2324 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); 2325 DCI.AddToWorklist(Val.Val); 2326 } 2327 return Val; 2328 } else if (N->getOperand(0).getValueType() == MVT::i32) { 2329 // If the intermediate type is i32, we can avoid the load/store here 2330 // too. 2331 } 2332 } 2333 } 2334 break; 2335 case ISD::STORE: 2336 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 2337 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 2338 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 2339 N->getOperand(1).getValueType() == MVT::i32) { 2340 SDOperand Val = N->getOperand(1).getOperand(0); 2341 if (Val.getValueType() == MVT::f32) { 2342 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 2343 DCI.AddToWorklist(Val.Val); 2344 } 2345 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 2346 DCI.AddToWorklist(Val.Val); 2347 2348 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 2349 N->getOperand(2), N->getOperand(3)); 2350 DCI.AddToWorklist(Val.Val); 2351 return Val; 2352 } 2353 2354 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 2355 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 2356 N->getOperand(1).Val->hasOneUse() && 2357 (N->getOperand(1).getValueType() == MVT::i32 || 2358 N->getOperand(1).getValueType() == MVT::i16)) { 2359 SDOperand BSwapOp = N->getOperand(1).getOperand(0); 2360 // Do an any-extend to 32-bits if this is a half-word input. 2361 if (BSwapOp.getValueType() == MVT::i16) 2362 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 2363 2364 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 2365 N->getOperand(2), N->getOperand(3), 2366 DAG.getValueType(N->getOperand(1).getValueType())); 2367 } 2368 break; 2369 case ISD::BSWAP: 2370 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 2371 if (N->getOperand(0).getOpcode() == ISD::LOAD && 2372 N->getOperand(0).hasOneUse() && 2373 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 2374 SDOperand Load = N->getOperand(0); 2375 // Create the byte-swapping load. 2376 std::vector<MVT::ValueType> VTs; 2377 VTs.push_back(MVT::i32); 2378 VTs.push_back(MVT::Other); 2379 std::vector<SDOperand> Ops; 2380 Ops.push_back(Load.getOperand(0)); // Chain 2381 Ops.push_back(Load.getOperand(1)); // Ptr 2382 Ops.push_back(Load.getOperand(2)); // SrcValue 2383 Ops.push_back(DAG.getValueType(N->getValueType(0))); // VT 2384 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops); 2385 2386 // If this is an i16 load, insert the truncate. 2387 SDOperand ResVal = BSLoad; 2388 if (N->getValueType(0) == MVT::i16) 2389 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 2390 2391 // First, combine the bswap away. This makes the value produced by the 2392 // load dead. 2393 DCI.CombineTo(N, ResVal); 2394 2395 // Next, combine the load away, we give it a bogus result value but a real 2396 // chain result. The result value is dead because the bswap is dead. 2397 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); 2398 2399 // Return N so it doesn't get rechecked! 2400 return SDOperand(N, 0); 2401 } 2402 2403 break; 2404 case PPCISD::VCMP: { 2405 // If a VCMPo node already exists with exactly the same operands as this 2406 // node, use its result instead of this node (VCMPo computes both a CR6 and 2407 // a normal output). 2408 // 2409 if (!N->getOperand(0).hasOneUse() && 2410 !N->getOperand(1).hasOneUse() && 2411 !N->getOperand(2).hasOneUse()) { 2412 2413 // Scan all of the users of the LHS, looking for VCMPo's that match. 2414 SDNode *VCMPoNode = 0; 2415 2416 SDNode *LHSN = N->getOperand(0).Val; 2417 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 2418 UI != E; ++UI) 2419 if ((*UI)->getOpcode() == PPCISD::VCMPo && 2420 (*UI)->getOperand(1) == N->getOperand(1) && 2421 (*UI)->getOperand(2) == N->getOperand(2) && 2422 (*UI)->getOperand(0) == N->getOperand(0)) { 2423 VCMPoNode = *UI; 2424 break; 2425 } 2426 2427 // If there is no VCMPo node, or if the flag value has a single use, don't 2428 // transform this. 2429 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 2430 break; 2431 2432 // Look at the (necessarily single) use of the flag value. If it has a 2433 // chain, this transformation is more complex. Note that multiple things 2434 // could use the value result, which we should ignore. 2435 SDNode *FlagUser = 0; 2436 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 2437 FlagUser == 0; ++UI) { 2438 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 2439 SDNode *User = *UI; 2440 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 2441 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { 2442 FlagUser = User; 2443 break; 2444 } 2445 } 2446 } 2447 2448 // If the user is a MFCR instruction, we know this is safe. Otherwise we 2449 // give up for right now. 2450 if (FlagUser->getOpcode() == PPCISD::MFCR) 2451 return SDOperand(VCMPoNode, 0); 2452 } 2453 break; 2454 } 2455 case ISD::BR_CC: { 2456 // If this is a branch on an altivec predicate comparison, lower this so 2457 // that we don't have to do a MFCR: instead, branch directly on CR6. This 2458 // lowering is done pre-legalize, because the legalizer lowers the predicate 2459 // compare down to code that is difficult to reassemble. 2460 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 2461 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); 2462 int CompareOpc; 2463 bool isDot; 2464 2465 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2466 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 2467 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 2468 assert(isDot && "Can't compare against a vector result!"); 2469 2470 // If this is a comparison against something other than 0/1, then we know 2471 // that the condition is never/always true. 2472 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 2473 if (Val != 0 && Val != 1) { 2474 if (CC == ISD::SETEQ) // Cond never true, remove branch. 2475 return N->getOperand(0); 2476 // Always !=, turn it into an unconditional branch. 2477 return DAG.getNode(ISD::BR, MVT::Other, 2478 N->getOperand(0), N->getOperand(4)); 2479 } 2480 2481 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 2482 2483 // Create the PPCISD altivec 'dot' comparison node. 2484 std::vector<SDOperand> Ops; 2485 std::vector<MVT::ValueType> VTs; 2486 Ops.push_back(LHS.getOperand(2)); // LHS of compare 2487 Ops.push_back(LHS.getOperand(3)); // RHS of compare 2488 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32)); 2489 VTs.push_back(LHS.getOperand(2).getValueType()); 2490 VTs.push_back(MVT::Flag); 2491 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops); 2492 2493 // Unpack the result based on how the target uses it. 2494 unsigned CompOpc; 2495 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 2496 default: // Can't happen, don't crash on invalid number though. 2497 case 0: // Branch on the value of the EQ bit of CR6. 2498 CompOpc = BranchOnWhenPredTrue ? PPC::BEQ : PPC::BNE; 2499 break; 2500 case 1: // Branch on the inverted value of the EQ bit of CR6. 2501 CompOpc = BranchOnWhenPredTrue ? PPC::BNE : PPC::BEQ; 2502 break; 2503 case 2: // Branch on the value of the LT bit of CR6. 2504 CompOpc = BranchOnWhenPredTrue ? PPC::BLT : PPC::BGE; 2505 break; 2506 case 3: // Branch on the inverted value of the LT bit of CR6. 2507 CompOpc = BranchOnWhenPredTrue ? PPC::BGE : PPC::BLT; 2508 break; 2509 } 2510 2511 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 2512 DAG.getRegister(PPC::CR6, MVT::i32), 2513 DAG.getConstant(CompOpc, MVT::i32), 2514 N->getOperand(4), CompNode.getValue(1)); 2515 } 2516 break; 2517 } 2518 } 2519 2520 return SDOperand(); 2521} 2522 2523//===----------------------------------------------------------------------===// 2524// Inline Assembly Support 2525//===----------------------------------------------------------------------===// 2526 2527void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 2528 uint64_t Mask, 2529 uint64_t &KnownZero, 2530 uint64_t &KnownOne, 2531 unsigned Depth) const { 2532 KnownZero = 0; 2533 KnownOne = 0; 2534 switch (Op.getOpcode()) { 2535 default: break; 2536 case PPCISD::LBRX: { 2537 // lhbrx is known to have the top bits cleared out. 2538 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 2539 KnownZero = 0xFFFF0000; 2540 break; 2541 } 2542 case ISD::INTRINSIC_WO_CHAIN: { 2543 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 2544 default: break; 2545 case Intrinsic::ppc_altivec_vcmpbfp_p: 2546 case Intrinsic::ppc_altivec_vcmpeqfp_p: 2547 case Intrinsic::ppc_altivec_vcmpequb_p: 2548 case Intrinsic::ppc_altivec_vcmpequh_p: 2549 case Intrinsic::ppc_altivec_vcmpequw_p: 2550 case Intrinsic::ppc_altivec_vcmpgefp_p: 2551 case Intrinsic::ppc_altivec_vcmpgtfp_p: 2552 case Intrinsic::ppc_altivec_vcmpgtsb_p: 2553 case Intrinsic::ppc_altivec_vcmpgtsh_p: 2554 case Intrinsic::ppc_altivec_vcmpgtsw_p: 2555 case Intrinsic::ppc_altivec_vcmpgtub_p: 2556 case Intrinsic::ppc_altivec_vcmpgtuh_p: 2557 case Intrinsic::ppc_altivec_vcmpgtuw_p: 2558 KnownZero = ~1U; // All bits but the low one are known to be zero. 2559 break; 2560 } 2561 } 2562 } 2563} 2564 2565 2566/// getConstraintType - Given a constraint letter, return the type of 2567/// constraint it is for this target. 2568PPCTargetLowering::ConstraintType 2569PPCTargetLowering::getConstraintType(char ConstraintLetter) const { 2570 switch (ConstraintLetter) { 2571 default: break; 2572 case 'b': 2573 case 'r': 2574 case 'f': 2575 case 'v': 2576 case 'y': 2577 return C_RegisterClass; 2578 } 2579 return TargetLowering::getConstraintType(ConstraintLetter); 2580} 2581 2582 2583std::vector<unsigned> PPCTargetLowering:: 2584getRegClassForInlineAsmConstraint(const std::string &Constraint, 2585 MVT::ValueType VT) const { 2586 if (Constraint.size() == 1) { 2587 switch (Constraint[0]) { // GCC RS6000 Constraint Letters 2588 default: break; // Unknown constriant letter 2589 case 'b': 2590 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 , 2591 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , 2592 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, 2593 PPC::R12, PPC::R13, PPC::R14, PPC::R15, 2594 PPC::R16, PPC::R17, PPC::R18, PPC::R19, 2595 PPC::R20, PPC::R21, PPC::R22, PPC::R23, 2596 PPC::R24, PPC::R25, PPC::R26, PPC::R27, 2597 PPC::R28, PPC::R29, PPC::R30, PPC::R31, 2598 0); 2599 case 'r': 2600 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 , 2601 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 , 2602 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11, 2603 PPC::R12, PPC::R13, PPC::R14, PPC::R15, 2604 PPC::R16, PPC::R17, PPC::R18, PPC::R19, 2605 PPC::R20, PPC::R21, PPC::R22, PPC::R23, 2606 PPC::R24, PPC::R25, PPC::R26, PPC::R27, 2607 PPC::R28, PPC::R29, PPC::R30, PPC::R31, 2608 0); 2609 case 'f': 2610 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 , 2611 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 , 2612 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11, 2613 PPC::F12, PPC::F13, PPC::F14, PPC::F15, 2614 PPC::F16, PPC::F17, PPC::F18, PPC::F19, 2615 PPC::F20, PPC::F21, PPC::F22, PPC::F23, 2616 PPC::F24, PPC::F25, PPC::F26, PPC::F27, 2617 PPC::F28, PPC::F29, PPC::F30, PPC::F31, 2618 0); 2619 case 'v': 2620 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , 2621 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 , 2622 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, 2623 PPC::V12, PPC::V13, PPC::V14, PPC::V15, 2624 PPC::V16, PPC::V17, PPC::V18, PPC::V19, 2625 PPC::V20, PPC::V21, PPC::V22, PPC::V23, 2626 PPC::V24, PPC::V25, PPC::V26, PPC::V27, 2627 PPC::V28, PPC::V29, PPC::V30, PPC::V31, 2628 0); 2629 case 'y': 2630 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3, 2631 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7, 2632 0); 2633 } 2634 } 2635 2636 return std::vector<unsigned>(); 2637} 2638 2639// isOperandValidForConstraint 2640bool PPCTargetLowering:: 2641isOperandValidForConstraint(SDOperand Op, char Letter) { 2642 switch (Letter) { 2643 default: break; 2644 case 'I': 2645 case 'J': 2646 case 'K': 2647 case 'L': 2648 case 'M': 2649 case 'N': 2650 case 'O': 2651 case 'P': { 2652 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate. 2653 unsigned Value = cast<ConstantSDNode>(Op)->getValue(); 2654 switch (Letter) { 2655 default: assert(0 && "Unknown constraint letter!"); 2656 case 'I': // "I" is a signed 16-bit constant. 2657 return (short)Value == (int)Value; 2658 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 2659 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 2660 return (short)Value == 0; 2661 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 2662 return (Value >> 16) == 0; 2663 case 'M': // "M" is a constant that is greater than 31. 2664 return Value > 31; 2665 case 'N': // "N" is a positive constant that is an exact power of two. 2666 return (int)Value > 0 && isPowerOf2_32(Value); 2667 case 'O': // "O" is the constant zero. 2668 return Value == 0; 2669 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 2670 return (short)-Value == (int)-Value; 2671 } 2672 break; 2673 } 2674 } 2675 2676 // Handle standard constraint letters. 2677 return TargetLowering::isOperandValidForConstraint(Op, Letter); 2678} 2679 2680/// isLegalAddressImmediate - Return true if the integer value can be used 2681/// as the offset of the target addressing mode. 2682bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const { 2683 // PPC allows a sign-extended 16-bit immediate field. 2684 return (V > -(1 << 16) && V < (1 << 16)-1); 2685} 2686