PPCISelLowering.cpp revision 619965d32e2cb961cc315aaece2e32f53f236784
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPredicates.h" 17#include "PPCTargetMachine.h" 18#include "PPCPerfectShuffle.h" 19#include "llvm/ADT/VectorExtras.h" 20#include "llvm/Analysis/ScalarEvolutionExpressions.h" 21#include "llvm/CodeGen/MachineFrameInfo.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/CodeGen/SSARegMap.h" 26#include "llvm/Constants.h" 27#include "llvm/Function.h" 28#include "llvm/Intrinsics.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Target/TargetOptions.h" 31#include "llvm/Support/CommandLine.h" 32using namespace llvm; 33 34static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc"); 35 36PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 37 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) { 38 39 // Fold away setcc operations if possible. 40 setSetCCIsExpensive(); 41 setPow2DivIsCheap(); 42 43 // Use _setjmp/_longjmp instead of setjmp/longjmp. 44 setUseUnderscoreSetJmpLongJmp(true); 45 46 // Set up the register classes. 47 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 48 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 49 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 50 51 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 52 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 53 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand); 54 55 // PowerPC does not have truncstore for i1. 56 setStoreXAction(MVT::i1, Promote); 57 58 // PowerPC has pre-inc load and store's. 59 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 60 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 61 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 62 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 63 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 64 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 65 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 66 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 67 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 68 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 69 70 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 71 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 72 73 // PowerPC has no intrinsics for these particular operations 74 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); 75 setOperationAction(ISD::MEMSET, MVT::Other, Expand); 76 setOperationAction(ISD::MEMCPY, MVT::Other, Expand); 77 78 // PowerPC has no SREM/UREM instructions 79 setOperationAction(ISD::SREM, MVT::i32, Expand); 80 setOperationAction(ISD::UREM, MVT::i32, Expand); 81 setOperationAction(ISD::SREM, MVT::i64, Expand); 82 setOperationAction(ISD::UREM, MVT::i64, Expand); 83 84 // We don't support sin/cos/sqrt/fmod 85 setOperationAction(ISD::FSIN , MVT::f64, Expand); 86 setOperationAction(ISD::FCOS , MVT::f64, Expand); 87 setOperationAction(ISD::FREM , MVT::f64, Expand); 88 setOperationAction(ISD::FSIN , MVT::f32, Expand); 89 setOperationAction(ISD::FCOS , MVT::f32, Expand); 90 setOperationAction(ISD::FREM , MVT::f32, Expand); 91 92 // If we're enabling GP optimizations, use hardware square root 93 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 94 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 95 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 96 } 97 98 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 99 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 100 101 // PowerPC does not have BSWAP, CTPOP or CTTZ 102 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 103 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 104 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 105 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 106 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 107 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 108 109 // PowerPC does not have ROTR 110 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 111 112 // PowerPC does not have Select 113 setOperationAction(ISD::SELECT, MVT::i32, Expand); 114 setOperationAction(ISD::SELECT, MVT::i64, Expand); 115 setOperationAction(ISD::SELECT, MVT::f32, Expand); 116 setOperationAction(ISD::SELECT, MVT::f64, Expand); 117 118 // PowerPC wants to turn select_cc of FP into fsel when possible. 119 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 120 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 121 122 // PowerPC wants to optimize integer setcc a bit 123 setOperationAction(ISD::SETCC, MVT::i32, Custom); 124 125 // PowerPC does not have BRCOND which requires SetCC 126 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 127 128 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 129 130 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 131 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 132 133 // PowerPC does not have [U|S]INT_TO_FP 134 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 135 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 136 137 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 138 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 139 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 140 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 141 142 // We cannot sextinreg(i1). Expand to shifts. 143 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 144 145 146 // Support label based line numbers. 147 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 148 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 149 // FIXME - use subtarget debug flags 150 if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) 151 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); 152 153 // We want to legalize GlobalAddress and ConstantPool nodes into the 154 // appropriate instructions to materialize the address. 155 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 156 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 157 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 158 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 159 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 160 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 161 162 // RET must be custom lowered, to meet ABI requirements 163 setOperationAction(ISD::RET , MVT::Other, Custom); 164 165 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 166 setOperationAction(ISD::VASTART , MVT::Other, Custom); 167 168 // Use the default implementation. 169 setOperationAction(ISD::VAARG , MVT::Other, Expand); 170 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 171 setOperationAction(ISD::VAEND , MVT::Other, Expand); 172 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 173 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); 174 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 175 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 176 177 // We want to custom lower some of our intrinsics. 178 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 179 180 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 181 // They also have instructions for converting between i64 and fp. 182 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 183 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 184 185 // FIXME: disable this lowered code. This generates 64-bit register values, 186 // and we don't model the fact that the top part is clobbered by calls. We 187 // need to flag these together so that the value isn't live across a call. 188 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 189 190 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 191 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 192 } else { 193 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 194 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 195 } 196 197 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 198 // 64 bit PowerPC implementations can support i64 types directly 199 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 200 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 201 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 202 } else { 203 // 32 bit PowerPC wants to expand i64 shifts itself. 204 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 205 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 206 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 207 } 208 209 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 210 // First set operation action for all vector types to expand. Then we 211 // will selectively turn on ones that can be effectively codegen'd. 212 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 213 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 214 // add/sub are legal for all supported vector VT's. 215 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 216 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 217 218 // We promote all shuffles to v16i8. 219 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 220 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 221 222 // We promote all non-typed operations to v4i32. 223 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); 224 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); 225 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); 226 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); 227 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); 228 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); 229 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); 230 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); 231 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 232 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); 233 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); 234 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); 235 236 // No other operations are legal. 237 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 238 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 239 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 240 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 241 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 242 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 243 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 244 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 245 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 246 247 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 248 } 249 250 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 251 // with merges, splats, etc. 252 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 253 254 setOperationAction(ISD::AND , MVT::v4i32, Legal); 255 setOperationAction(ISD::OR , MVT::v4i32, Legal); 256 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 257 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 258 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 259 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 260 261 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 262 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 263 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 264 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 265 266 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 267 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 268 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 269 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 270 271 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 272 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 273 274 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 275 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 276 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 277 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 278 } 279 280 setSetCCResultType(MVT::i32); 281 setShiftAmountType(MVT::i32); 282 setSetCCResultContents(ZeroOrOneSetCCResult); 283 284 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) 285 setStackPointerRegisterToSaveRestore(PPC::X1); 286 else 287 setStackPointerRegisterToSaveRestore(PPC::R1); 288 289 // We have target-specific dag combine patterns for the following nodes: 290 setTargetDAGCombine(ISD::SINT_TO_FP); 291 setTargetDAGCombine(ISD::STORE); 292 setTargetDAGCombine(ISD::BR_CC); 293 setTargetDAGCombine(ISD::BSWAP); 294 295 computeRegisterProperties(); 296} 297 298const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 299 switch (Opcode) { 300 default: return 0; 301 case PPCISD::FSEL: return "PPCISD::FSEL"; 302 case PPCISD::FCFID: return "PPCISD::FCFID"; 303 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 304 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 305 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 306 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 307 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 308 case PPCISD::VPERM: return "PPCISD::VPERM"; 309 case PPCISD::Hi: return "PPCISD::Hi"; 310 case PPCISD::Lo: return "PPCISD::Lo"; 311 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 312 case PPCISD::SRL: return "PPCISD::SRL"; 313 case PPCISD::SRA: return "PPCISD::SRA"; 314 case PPCISD::SHL: return "PPCISD::SHL"; 315 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 316 case PPCISD::STD_32: return "PPCISD::STD_32"; 317 case PPCISD::CALL: return "PPCISD::CALL"; 318 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 319 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 320 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 321 case PPCISD::MFCR: return "PPCISD::MFCR"; 322 case PPCISD::VCMP: return "PPCISD::VCMP"; 323 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 324 case PPCISD::LBRX: return "PPCISD::LBRX"; 325 case PPCISD::STBRX: return "PPCISD::STBRX"; 326 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 327 } 328} 329 330//===----------------------------------------------------------------------===// 331// Node matching predicates, for use by the tblgen matching code. 332//===----------------------------------------------------------------------===// 333 334/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 335static bool isFloatingPointZero(SDOperand Op) { 336 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 337 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 338 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { 339 // Maybe this has already been legalized into the constant pool? 340 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 341 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 342 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 343 } 344 return false; 345} 346 347/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 348/// true if Op is undef or if it matches the specified value. 349static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 350 return Op.getOpcode() == ISD::UNDEF || 351 cast<ConstantSDNode>(Op)->getValue() == Val; 352} 353 354/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 355/// VPKUHUM instruction. 356bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 357 if (!isUnary) { 358 for (unsigned i = 0; i != 16; ++i) 359 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 360 return false; 361 } else { 362 for (unsigned i = 0; i != 8; ++i) 363 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 364 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 365 return false; 366 } 367 return true; 368} 369 370/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 371/// VPKUWUM instruction. 372bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 373 if (!isUnary) { 374 for (unsigned i = 0; i != 16; i += 2) 375 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 376 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 377 return false; 378 } else { 379 for (unsigned i = 0; i != 8; i += 2) 380 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 381 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 382 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 383 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 384 return false; 385 } 386 return true; 387} 388 389/// isVMerge - Common function, used to match vmrg* shuffles. 390/// 391static bool isVMerge(SDNode *N, unsigned UnitSize, 392 unsigned LHSStart, unsigned RHSStart) { 393 assert(N->getOpcode() == ISD::BUILD_VECTOR && 394 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 395 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 396 "Unsupported merge size!"); 397 398 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 399 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 400 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 401 LHSStart+j+i*UnitSize) || 402 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 403 RHSStart+j+i*UnitSize)) 404 return false; 405 } 406 return true; 407} 408 409/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 410/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 411bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 412 if (!isUnary) 413 return isVMerge(N, UnitSize, 8, 24); 414 return isVMerge(N, UnitSize, 8, 8); 415} 416 417/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 418/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 419bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 420 if (!isUnary) 421 return isVMerge(N, UnitSize, 0, 16); 422 return isVMerge(N, UnitSize, 0, 0); 423} 424 425 426/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 427/// amount, otherwise return -1. 428int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 429 assert(N->getOpcode() == ISD::BUILD_VECTOR && 430 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 431 // Find the first non-undef value in the shuffle mask. 432 unsigned i; 433 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 434 /*search*/; 435 436 if (i == 16) return -1; // all undef. 437 438 // Otherwise, check to see if the rest of the elements are consequtively 439 // numbered from this value. 440 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 441 if (ShiftAmt < i) return -1; 442 ShiftAmt -= i; 443 444 if (!isUnary) { 445 // Check the rest of the elements to see if they are consequtive. 446 for (++i; i != 16; ++i) 447 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 448 return -1; 449 } else { 450 // Check the rest of the elements to see if they are consequtive. 451 for (++i; i != 16; ++i) 452 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 453 return -1; 454 } 455 456 return ShiftAmt; 457} 458 459/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 460/// specifies a splat of a single element that is suitable for input to 461/// VSPLTB/VSPLTH/VSPLTW. 462bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 463 assert(N->getOpcode() == ISD::BUILD_VECTOR && 464 N->getNumOperands() == 16 && 465 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 466 467 // This is a splat operation if each element of the permute is the same, and 468 // if the value doesn't reference the second vector. 469 unsigned ElementBase = 0; 470 SDOperand Elt = N->getOperand(0); 471 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 472 ElementBase = EltV->getValue(); 473 else 474 return false; // FIXME: Handle UNDEF elements too! 475 476 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 477 return false; 478 479 // Check that they are consequtive. 480 for (unsigned i = 1; i != EltSize; ++i) { 481 if (!isa<ConstantSDNode>(N->getOperand(i)) || 482 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 483 return false; 484 } 485 486 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 487 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 488 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 489 assert(isa<ConstantSDNode>(N->getOperand(i)) && 490 "Invalid VECTOR_SHUFFLE mask!"); 491 for (unsigned j = 0; j != EltSize; ++j) 492 if (N->getOperand(i+j) != N->getOperand(j)) 493 return false; 494 } 495 496 return true; 497} 498 499/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 500/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 501unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 502 assert(isSplatShuffleMask(N, EltSize)); 503 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 504} 505 506/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 507/// by using a vspltis[bhw] instruction of the specified element size, return 508/// the constant being splatted. The ByteSize field indicates the number of 509/// bytes of each element [124] -> [bhw]. 510SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 511 SDOperand OpVal(0, 0); 512 513 // If ByteSize of the splat is bigger than the element size of the 514 // build_vector, then we have a case where we are checking for a splat where 515 // multiple elements of the buildvector are folded together into a single 516 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 517 unsigned EltSize = 16/N->getNumOperands(); 518 if (EltSize < ByteSize) { 519 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 520 SDOperand UniquedVals[4]; 521 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 522 523 // See if all of the elements in the buildvector agree across. 524 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 525 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 526 // If the element isn't a constant, bail fully out. 527 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); 528 529 530 if (UniquedVals[i&(Multiple-1)].Val == 0) 531 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 532 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 533 return SDOperand(); // no match. 534 } 535 536 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 537 // either constant or undef values that are identical for each chunk. See 538 // if these chunks can form into a larger vspltis*. 539 540 // Check to see if all of the leading entries are either 0 or -1. If 541 // neither, then this won't fit into the immediate field. 542 bool LeadingZero = true; 543 bool LeadingOnes = true; 544 for (unsigned i = 0; i != Multiple-1; ++i) { 545 if (UniquedVals[i].Val == 0) continue; // Must have been undefs. 546 547 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 548 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 549 } 550 // Finally, check the least significant entry. 551 if (LeadingZero) { 552 if (UniquedVals[Multiple-1].Val == 0) 553 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 554 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 555 if (Val < 16) 556 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 557 } 558 if (LeadingOnes) { 559 if (UniquedVals[Multiple-1].Val == 0) 560 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 561 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 562 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 563 return DAG.getTargetConstant(Val, MVT::i32); 564 } 565 566 return SDOperand(); 567 } 568 569 // Check to see if this buildvec has a single non-undef value in its elements. 570 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 571 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 572 if (OpVal.Val == 0) 573 OpVal = N->getOperand(i); 574 else if (OpVal != N->getOperand(i)) 575 return SDOperand(); 576 } 577 578 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. 579 580 unsigned ValSizeInBytes = 0; 581 uint64_t Value = 0; 582 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 583 Value = CN->getValue(); 584 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 585 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 586 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 587 Value = FloatToBits(CN->getValue()); 588 ValSizeInBytes = 4; 589 } 590 591 // If the splat value is larger than the element value, then we can never do 592 // this splat. The only case that we could fit the replicated bits into our 593 // immediate field for would be zero, and we prefer to use vxor for it. 594 if (ValSizeInBytes < ByteSize) return SDOperand(); 595 596 // If the element value is larger than the splat value, cut it in half and 597 // check to see if the two halves are equal. Continue doing this until we 598 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 599 while (ValSizeInBytes > ByteSize) { 600 ValSizeInBytes >>= 1; 601 602 // If the top half equals the bottom half, we're still ok. 603 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 604 (Value & ((1 << (8*ValSizeInBytes))-1))) 605 return SDOperand(); 606 } 607 608 // Properly sign extend the value. 609 int ShAmt = (4-ByteSize)*8; 610 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 611 612 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 613 if (MaskVal == 0) return SDOperand(); 614 615 // Finally, if this value fits in a 5 bit sext field, return it 616 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 617 return DAG.getTargetConstant(MaskVal, MVT::i32); 618 return SDOperand(); 619} 620 621//===----------------------------------------------------------------------===// 622// Addressing Mode Selection 623//===----------------------------------------------------------------------===// 624 625/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 626/// or 64-bit immediate, and if the value can be accurately represented as a 627/// sign extension from a 16-bit value. If so, this returns true and the 628/// immediate. 629static bool isIntS16Immediate(SDNode *N, short &Imm) { 630 if (N->getOpcode() != ISD::Constant) 631 return false; 632 633 Imm = (short)cast<ConstantSDNode>(N)->getValue(); 634 if (N->getValueType(0) == MVT::i32) 635 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue(); 636 else 637 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); 638} 639static bool isIntS16Immediate(SDOperand Op, short &Imm) { 640 return isIntS16Immediate(Op.Val, Imm); 641} 642 643 644/// SelectAddressRegReg - Given the specified addressed, check to see if it 645/// can be represented as an indexed [r+r] operation. Returns false if it 646/// can be more efficiently represented with [r+imm]. 647bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, 648 SDOperand &Index, 649 SelectionDAG &DAG) { 650 short imm = 0; 651 if (N.getOpcode() == ISD::ADD) { 652 if (isIntS16Immediate(N.getOperand(1), imm)) 653 return false; // r+i 654 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 655 return false; // r+i 656 657 Base = N.getOperand(0); 658 Index = N.getOperand(1); 659 return true; 660 } else if (N.getOpcode() == ISD::OR) { 661 if (isIntS16Immediate(N.getOperand(1), imm)) 662 return false; // r+i can fold it if we can. 663 664 // If this is an or of disjoint bitfields, we can codegen this as an add 665 // (for better address arithmetic) if the LHS and RHS of the OR are provably 666 // disjoint. 667 uint64_t LHSKnownZero, LHSKnownOne; 668 uint64_t RHSKnownZero, RHSKnownOne; 669 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 670 671 if (LHSKnownZero) { 672 ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne); 673 // If all of the bits are known zero on the LHS or RHS, the add won't 674 // carry. 675 if ((LHSKnownZero | RHSKnownZero) == ~0U) { 676 Base = N.getOperand(0); 677 Index = N.getOperand(1); 678 return true; 679 } 680 } 681 } 682 683 return false; 684} 685 686/// Returns true if the address N can be represented by a base register plus 687/// a signed 16-bit displacement [r+imm], and if it is not better 688/// represented as reg+reg. 689bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, 690 SDOperand &Base, SelectionDAG &DAG){ 691 // If this can be more profitably realized as r+r, fail. 692 if (SelectAddressRegReg(N, Disp, Base, DAG)) 693 return false; 694 695 if (N.getOpcode() == ISD::ADD) { 696 short imm = 0; 697 if (isIntS16Immediate(N.getOperand(1), imm)) { 698 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 699 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 700 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 701 } else { 702 Base = N.getOperand(0); 703 } 704 return true; // [r+i] 705 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 706 // Match LOAD (ADD (X, Lo(G))). 707 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 708 && "Cannot handle constant offsets yet!"); 709 Disp = N.getOperand(1).getOperand(0); // The global address. 710 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 711 Disp.getOpcode() == ISD::TargetConstantPool || 712 Disp.getOpcode() == ISD::TargetJumpTable); 713 Base = N.getOperand(0); 714 return true; // [&g+r] 715 } 716 } else if (N.getOpcode() == ISD::OR) { 717 short imm = 0; 718 if (isIntS16Immediate(N.getOperand(1), imm)) { 719 // If this is an or of disjoint bitfields, we can codegen this as an add 720 // (for better address arithmetic) if the LHS and RHS of the OR are 721 // provably disjoint. 722 uint64_t LHSKnownZero, LHSKnownOne; 723 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 724 if ((LHSKnownZero|~(unsigned)imm) == ~0U) { 725 // If all of the bits are known zero on the LHS or RHS, the add won't 726 // carry. 727 Base = N.getOperand(0); 728 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 729 return true; 730 } 731 } 732 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 733 // Loading from a constant address. 734 735 // If this address fits entirely in a 16-bit sext immediate field, codegen 736 // this as "d, 0" 737 short Imm; 738 if (isIntS16Immediate(CN, Imm)) { 739 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 740 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 741 return true; 742 } 743 744 // FIXME: Handle small sext constant offsets in PPC64 mode also! 745 if (CN->getValueType(0) == MVT::i32) { 746 int Addr = (int)CN->getValue(); 747 748 // Otherwise, break this down into an LIS + disp. 749 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 750 Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32); 751 return true; 752 } 753 } 754 755 Disp = DAG.getTargetConstant(0, getPointerTy()); 756 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 757 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 758 else 759 Base = N; 760 return true; // [r+0] 761} 762 763/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 764/// represented as an indexed [r+r] operation. 765bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, 766 SDOperand &Index, 767 SelectionDAG &DAG) { 768 // Check to see if we can easily represent this as an [r+r] address. This 769 // will fail if it thinks that the address is more profitably represented as 770 // reg+imm, e.g. where imm = 0. 771 if (SelectAddressRegReg(N, Base, Index, DAG)) 772 return true; 773 774 // If the operand is an addition, always emit this as [r+r], since this is 775 // better (for code size, and execution, as the memop does the add for free) 776 // than emitting an explicit add. 777 if (N.getOpcode() == ISD::ADD) { 778 Base = N.getOperand(0); 779 Index = N.getOperand(1); 780 return true; 781 } 782 783 // Otherwise, do it the hard way, using R0 as the base register. 784 Base = DAG.getRegister(PPC::R0, N.getValueType()); 785 Index = N; 786 return true; 787} 788 789/// SelectAddressRegImmShift - Returns true if the address N can be 790/// represented by a base register plus a signed 14-bit displacement 791/// [r+imm*4]. Suitable for use by STD and friends. 792bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, 793 SDOperand &Base, 794 SelectionDAG &DAG) { 795 // If this can be more profitably realized as r+r, fail. 796 if (SelectAddressRegReg(N, Disp, Base, DAG)) 797 return false; 798 799 if (N.getOpcode() == ISD::ADD) { 800 short imm = 0; 801 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 802 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 803 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 804 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 805 } else { 806 Base = N.getOperand(0); 807 } 808 return true; // [r+i] 809 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 810 // Match LOAD (ADD (X, Lo(G))). 811 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 812 && "Cannot handle constant offsets yet!"); 813 Disp = N.getOperand(1).getOperand(0); // The global address. 814 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 815 Disp.getOpcode() == ISD::TargetConstantPool || 816 Disp.getOpcode() == ISD::TargetJumpTable); 817 Base = N.getOperand(0); 818 return true; // [&g+r] 819 } 820 } else if (N.getOpcode() == ISD::OR) { 821 short imm = 0; 822 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 823 // If this is an or of disjoint bitfields, we can codegen this as an add 824 // (for better address arithmetic) if the LHS and RHS of the OR are 825 // provably disjoint. 826 uint64_t LHSKnownZero, LHSKnownOne; 827 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 828 if ((LHSKnownZero|~(unsigned)imm) == ~0U) { 829 // If all of the bits are known zero on the LHS or RHS, the add won't 830 // carry. 831 Base = N.getOperand(0); 832 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 833 return true; 834 } 835 } 836 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 837 // Loading from a constant address. 838 839 // If this address fits entirely in a 14-bit sext immediate field, codegen 840 // this as "d, 0" 841 short Imm; 842 if (isIntS16Immediate(CN, Imm)) { 843 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 844 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 845 return true; 846 } 847 848 // FIXME: Handle small sext constant offsets in PPC64 mode also! 849 if (CN->getValueType(0) == MVT::i32) { 850 int Addr = (int)CN->getValue(); 851 852 // Otherwise, break this down into an LIS + disp. 853 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 854 Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32); 855 return true; 856 } 857 } 858 859 Disp = DAG.getTargetConstant(0, getPointerTy()); 860 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 861 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 862 else 863 Base = N; 864 return true; // [r+0] 865} 866 867 868/// getPreIndexedAddressParts - returns true by value, base pointer and 869/// offset pointer and addressing mode by reference if the node's address 870/// can be legally represented as pre-indexed load / store address. 871bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, 872 SDOperand &Offset, 873 ISD::MemIndexedMode &AM, 874 SelectionDAG &DAG) { 875 // Disabled by default for now. 876 if (!EnablePPCPreinc) return false; 877 878 SDOperand Ptr; 879 MVT::ValueType VT; 880 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 881 Ptr = LD->getBasePtr(); 882 VT = LD->getLoadedVT(); 883 884 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 885 ST = ST; 886 Ptr = ST->getBasePtr(); 887 VT = ST->getStoredVT(); 888 } else 889 return false; 890 891 // PowerPC doesn't have preinc load/store instructions for vectors. 892 if (MVT::isVector(VT)) 893 return false; 894 895 // TODO: Check reg+reg first. 896 897 // LDU/STU use reg+imm*4, others use reg+imm. 898 if (VT != MVT::i64) { 899 // reg + imm 900 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 901 return false; 902 } else { 903 // reg + imm * 4. 904 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 905 return false; 906 } 907 908 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 909 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 910 // sext i32 to i64 when addr mode is r+i. 911 if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 && 912 LD->getExtensionType() == ISD::SEXTLOAD && 913 isa<ConstantSDNode>(Offset)) 914 return false; 915 } 916 917 AM = ISD::PRE_INC; 918 return true; 919} 920 921//===----------------------------------------------------------------------===// 922// LowerOperation implementation 923//===----------------------------------------------------------------------===// 924 925static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 926 MVT::ValueType PtrVT = Op.getValueType(); 927 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 928 Constant *C = CP->getConstVal(); 929 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 930 SDOperand Zero = DAG.getConstant(0, PtrVT); 931 932 const TargetMachine &TM = DAG.getTarget(); 933 934 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 935 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 936 937 // If this is a non-darwin platform, we don't support non-static relo models 938 // yet. 939 if (TM.getRelocationModel() == Reloc::Static || 940 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 941 // Generate non-pic code that has direct accesses to the constant pool. 942 // The address of the global is just (hi(&g)+lo(&g)). 943 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 944 } 945 946 if (TM.getRelocationModel() == Reloc::PIC_) { 947 // With PIC, the first instruction is actually "GR+hi(&G)". 948 Hi = DAG.getNode(ISD::ADD, PtrVT, 949 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 950 } 951 952 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 953 return Lo; 954} 955 956static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 957 MVT::ValueType PtrVT = Op.getValueType(); 958 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 959 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 960 SDOperand Zero = DAG.getConstant(0, PtrVT); 961 962 const TargetMachine &TM = DAG.getTarget(); 963 964 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 965 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 966 967 // If this is a non-darwin platform, we don't support non-static relo models 968 // yet. 969 if (TM.getRelocationModel() == Reloc::Static || 970 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 971 // Generate non-pic code that has direct accesses to the constant pool. 972 // The address of the global is just (hi(&g)+lo(&g)). 973 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 974 } 975 976 if (TM.getRelocationModel() == Reloc::PIC_) { 977 // With PIC, the first instruction is actually "GR+hi(&G)". 978 Hi = DAG.getNode(ISD::ADD, PtrVT, 979 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 980 } 981 982 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 983 return Lo; 984} 985 986static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 987 MVT::ValueType PtrVT = Op.getValueType(); 988 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 989 GlobalValue *GV = GSDN->getGlobal(); 990 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 991 SDOperand Zero = DAG.getConstant(0, PtrVT); 992 993 const TargetMachine &TM = DAG.getTarget(); 994 995 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 996 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 997 998 // If this is a non-darwin platform, we don't support non-static relo models 999 // yet. 1000 if (TM.getRelocationModel() == Reloc::Static || 1001 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1002 // Generate non-pic code that has direct accesses to globals. 1003 // The address of the global is just (hi(&g)+lo(&g)). 1004 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1005 } 1006 1007 if (TM.getRelocationModel() == Reloc::PIC_) { 1008 // With PIC, the first instruction is actually "GR+hi(&G)". 1009 Hi = DAG.getNode(ISD::ADD, PtrVT, 1010 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1011 } 1012 1013 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1014 1015 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() && 1016 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode())) 1017 return Lo; 1018 1019 // If the global is weak or external, we have to go through the lazy 1020 // resolution stub. 1021 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); 1022} 1023 1024static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 1025 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1026 1027 // If we're comparing for equality to zero, expose the fact that this is 1028 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1029 // fold the new nodes. 1030 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1031 if (C->isNullValue() && CC == ISD::SETEQ) { 1032 MVT::ValueType VT = Op.getOperand(0).getValueType(); 1033 SDOperand Zext = Op.getOperand(0); 1034 if (VT < MVT::i32) { 1035 VT = MVT::i32; 1036 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 1037 } 1038 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 1039 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 1040 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 1041 DAG.getConstant(Log2b, MVT::i32)); 1042 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 1043 } 1044 // Leave comparisons against 0 and -1 alone for now, since they're usually 1045 // optimized. FIXME: revisit this when we can custom lower all setcc 1046 // optimizations. 1047 if (C->isAllOnesValue() || C->isNullValue()) 1048 return SDOperand(); 1049 } 1050 1051 // If we have an integer seteq/setne, turn it into a compare against zero 1052 // by xor'ing the rhs with the lhs, which is faster than setting a 1053 // condition register, reading it back out, and masking the correct bit. The 1054 // normal approach here uses sub to do this instead of xor. Using xor exposes 1055 // the result to other bit-twiddling opportunities. 1056 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 1057 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1058 MVT::ValueType VT = Op.getValueType(); 1059 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), 1060 Op.getOperand(1)); 1061 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 1062 } 1063 return SDOperand(); 1064} 1065 1066static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, 1067 unsigned VarArgsFrameIndex) { 1068 // vastart just stores the address of the VarArgsFrameIndex slot into the 1069 // memory location argument. 1070 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1071 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1072 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 1073 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(), 1074 SV->getOffset()); 1075} 1076 1077static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, 1078 int &VarArgsFrameIndex) { 1079 // TODO: add description of PPC stack frame format, or at least some docs. 1080 // 1081 MachineFunction &MF = DAG.getMachineFunction(); 1082 MachineFrameInfo *MFI = MF.getFrameInfo(); 1083 SSARegMap *RegMap = MF.getSSARegMap(); 1084 SmallVector<SDOperand, 8> ArgValues; 1085 SDOperand Root = Op.getOperand(0); 1086 1087 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1088 bool isPPC64 = PtrVT == MVT::i64; 1089 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1090 1091 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64); 1092 1093 static const unsigned GPR_32[] = { // 32-bit registers. 1094 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1095 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1096 }; 1097 static const unsigned GPR_64[] = { // 64-bit registers. 1098 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1099 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1100 }; 1101 static const unsigned FPR[] = { 1102 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1103 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1104 }; 1105 static const unsigned VR[] = { 1106 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1107 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1108 }; 1109 1110 const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1111 const unsigned Num_FPR_Regs = sizeof(FPR)/sizeof(FPR[0]); 1112 const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]); 1113 1114 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1115 1116 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1117 1118 // Add DAG nodes to load the arguments or copy them out of registers. On 1119 // entry to a function on PPC, the arguments start after the linkage area, 1120 // although the first ones are often in registers. 1121 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { 1122 SDOperand ArgVal; 1123 bool needsLoad = false; 1124 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1125 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1126 unsigned ArgSize = ObjSize; 1127 1128 unsigned CurArgOffset = ArgOffset; 1129 switch (ObjectVT) { 1130 default: assert(0 && "Unhandled argument type!"); 1131 case MVT::i32: 1132 // All int arguments reserve stack space. 1133 ArgOffset += PtrByteSize; 1134 1135 if (GPR_idx != Num_GPR_Regs) { 1136 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1137 MF.addLiveIn(GPR[GPR_idx], VReg); 1138 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 1139 ++GPR_idx; 1140 } else { 1141 needsLoad = true; 1142 ArgSize = PtrByteSize; 1143 } 1144 break; 1145 case MVT::i64: // PPC64 1146 // All int arguments reserve stack space. 1147 ArgOffset += 8; 1148 1149 if (GPR_idx != Num_GPR_Regs) { 1150 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 1151 MF.addLiveIn(GPR[GPR_idx], VReg); 1152 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1153 ++GPR_idx; 1154 } else { 1155 needsLoad = true; 1156 } 1157 break; 1158 case MVT::f32: 1159 case MVT::f64: 1160 // All FP arguments reserve stack space. 1161 ArgOffset += isPPC64 ? 8 : ObjSize; 1162 1163 // Every 4 bytes of argument space consumes one of the GPRs available for 1164 // argument passing. 1165 if (GPR_idx != Num_GPR_Regs) { 1166 ++GPR_idx; 1167 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1168 ++GPR_idx; 1169 } 1170 if (FPR_idx != Num_FPR_Regs) { 1171 unsigned VReg; 1172 if (ObjectVT == MVT::f32) 1173 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); 1174 else 1175 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); 1176 MF.addLiveIn(FPR[FPR_idx], VReg); 1177 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1178 ++FPR_idx; 1179 } else { 1180 needsLoad = true; 1181 } 1182 break; 1183 case MVT::v4f32: 1184 case MVT::v4i32: 1185 case MVT::v8i16: 1186 case MVT::v16i8: 1187 // Note that vector arguments in registers don't reserve stack space. 1188 if (VR_idx != Num_VR_Regs) { 1189 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass); 1190 MF.addLiveIn(VR[VR_idx], VReg); 1191 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1192 ++VR_idx; 1193 } else { 1194 // This should be simple, but requires getting 16-byte aligned stack 1195 // values. 1196 assert(0 && "Loading VR argument not implemented yet!"); 1197 needsLoad = true; 1198 } 1199 break; 1200 } 1201 1202 // We need to load the argument to a virtual register if we determined above 1203 // that we ran out of physical registers of the appropriate type 1204 if (needsLoad) { 1205 // If the argument is actually used, emit a load from the right stack 1206 // slot. 1207 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) { 1208 int FI = MFI->CreateFixedObject(ObjSize, 1209 CurArgOffset + (ArgSize - ObjSize)); 1210 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1211 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 1212 } else { 1213 // Don't emit a dead load. 1214 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT); 1215 } 1216 } 1217 1218 ArgValues.push_back(ArgVal); 1219 } 1220 1221 // If the function takes variable number of arguments, make a frame index for 1222 // the start of the first vararg value... for expansion of llvm.va_start. 1223 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1224 if (isVarArg) { 1225 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1226 ArgOffset); 1227 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1228 // If this function is vararg, store any remaining integer argument regs 1229 // to their spots on the stack so that they may be loaded by deferencing the 1230 // result of va_next. 1231 SmallVector<SDOperand, 8> MemOps; 1232 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 1233 unsigned VReg; 1234 if (isPPC64) 1235 VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 1236 else 1237 VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1238 1239 MF.addLiveIn(GPR[GPR_idx], VReg); 1240 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1241 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1242 MemOps.push_back(Store); 1243 // Increment the address by four for the next argument to store 1244 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1245 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1246 } 1247 if (!MemOps.empty()) 1248 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size()); 1249 } 1250 1251 ArgValues.push_back(Root); 1252 1253 // Return the new list of results. 1254 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 1255 Op.Val->value_end()); 1256 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); 1257} 1258 1259/// isCallCompatibleAddress - Return the immediate to use if the specified 1260/// 32-bit value is representable in the immediate field of a BxA instruction. 1261static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { 1262 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1263 if (!C) return 0; 1264 1265 int Addr = C->getValue(); 1266 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 1267 (Addr << 6 >> 6) != Addr) 1268 return 0; // Top 6 bits have to be sext of immediate. 1269 1270 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val; 1271} 1272 1273static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1274 SDOperand Chain = Op.getOperand(0); 1275 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1276 SDOperand Callee = Op.getOperand(4); 1277 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1278 1279 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1280 bool isPPC64 = PtrVT == MVT::i64; 1281 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1282 1283 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 1284 // SelectExpr to use to put the arguments in the appropriate registers. 1285 std::vector<SDOperand> args_to_use; 1286 1287 // Count how many bytes are to be pushed on the stack, including the linkage 1288 // area, and parameter passing area. We start with 24/48 bytes, which is 1289 // prereserved space for [SP][CR][LR][3 x unused]. 1290 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64); 1291 1292 // Add up all the space actually used. 1293 for (unsigned i = 0; i != NumOps; ++i) { 1294 unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8; 1295 ArgSize = std::max(ArgSize, PtrByteSize); 1296 NumBytes += ArgSize; 1297 } 1298 1299 // The prolog code of the callee may store up to 8 GPR argument registers to 1300 // the stack, allowing va_start to index over them in memory if its varargs. 1301 // Because we cannot tell if this is needed on the caller side, we have to 1302 // conservatively assume that it is needed. As such, make sure we have at 1303 // least enough stack space for the caller to store the 8 GPRs. 1304 NumBytes = std::max(NumBytes, PPCFrameInfo::getMinCallFrameSize(isPPC64)); 1305 1306 // Adjust the stack pointer for the new arguments... 1307 // These operations are automatically eliminated by the prolog/epilog pass 1308 Chain = DAG.getCALLSEQ_START(Chain, 1309 DAG.getConstant(NumBytes, PtrVT)); 1310 1311 // Set up a copy of the stack pointer for use loading and storing any 1312 // arguments that may not fit in the registers available for argument 1313 // passing. 1314 SDOperand StackPtr; 1315 if (isPPC64) 1316 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 1317 else 1318 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 1319 1320 // Figure out which arguments are going to go in registers, and which in 1321 // memory. Also, if this is a vararg function, floating point operations 1322 // must be stored to our stack, and loaded into integer regs as well, if 1323 // any integer regs are available for argument passing. 1324 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64); 1325 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1326 1327 static const unsigned GPR_32[] = { // 32-bit registers. 1328 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1329 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1330 }; 1331 static const unsigned GPR_64[] = { // 64-bit registers. 1332 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1333 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1334 }; 1335 static const unsigned FPR[] = { 1336 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1337 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1338 }; 1339 static const unsigned VR[] = { 1340 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1341 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1342 }; 1343 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1344 const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]); 1345 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]); 1346 1347 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1348 1349 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1350 SmallVector<SDOperand, 8> MemOpChains; 1351 for (unsigned i = 0; i != NumOps; ++i) { 1352 SDOperand Arg = Op.getOperand(5+2*i); 1353 1354 // PtrOff will be used to store the current argument to the stack if a 1355 // register cannot be found for it. 1356 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 1357 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 1358 1359 // On PPC64, promote integers to 64-bit values. 1360 if (isPPC64 && Arg.getValueType() == MVT::i32) { 1361 unsigned ExtOp = ISD::ZERO_EXTEND; 1362 if (cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue()) 1363 ExtOp = ISD::SIGN_EXTEND; 1364 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 1365 } 1366 1367 switch (Arg.getValueType()) { 1368 default: assert(0 && "Unexpected ValueType for argument!"); 1369 case MVT::i32: 1370 case MVT::i64: 1371 if (GPR_idx != NumGPRs) { 1372 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 1373 } else { 1374 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1375 } 1376 ArgOffset += PtrByteSize; 1377 break; 1378 case MVT::f32: 1379 case MVT::f64: 1380 if (FPR_idx != NumFPRs) { 1381 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 1382 1383 if (isVarArg) { 1384 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1385 MemOpChains.push_back(Store); 1386 1387 // Float varargs are always shadowed in available integer registers 1388 if (GPR_idx != NumGPRs) { 1389 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1390 MemOpChains.push_back(Load.getValue(1)); 1391 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1392 } 1393 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64) { 1394 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 1395 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 1396 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1397 MemOpChains.push_back(Load.getValue(1)); 1398 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1399 } 1400 } else { 1401 // If we have any FPRs remaining, we may also have GPRs remaining. 1402 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 1403 // GPRs. 1404 if (GPR_idx != NumGPRs) 1405 ++GPR_idx; 1406 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64) 1407 ++GPR_idx; 1408 } 1409 } else { 1410 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1411 } 1412 if (isPPC64) 1413 ArgOffset += 8; 1414 else 1415 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 1416 break; 1417 case MVT::v4f32: 1418 case MVT::v4i32: 1419 case MVT::v8i16: 1420 case MVT::v16i8: 1421 assert(!isVarArg && "Don't support passing vectors to varargs yet!"); 1422 assert(VR_idx != NumVRs && 1423 "Don't support passing more than 12 vector args yet!"); 1424 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 1425 break; 1426 } 1427 } 1428 if (!MemOpChains.empty()) 1429 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1430 &MemOpChains[0], MemOpChains.size()); 1431 1432 // Build a sequence of copy-to-reg nodes chained together with token chain 1433 // and flag operands which copy the outgoing args into the appropriate regs. 1434 SDOperand InFlag; 1435 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1436 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1437 InFlag); 1438 InFlag = Chain.getValue(1); 1439 } 1440 1441 std::vector<MVT::ValueType> NodeTys; 1442 NodeTys.push_back(MVT::Other); // Returns a chain 1443 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1444 1445 SmallVector<SDOperand, 8> Ops; 1446 unsigned CallOpc = PPCISD::CALL; 1447 1448 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1449 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1450 // node so that legalize doesn't hack it. 1451 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1452 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 1453 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1454 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 1455 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 1456 // If this is an absolute destination address, use the munged value. 1457 Callee = SDOperand(Dest, 0); 1458 else { 1459 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 1460 // to do the call, we can't use PPCISD::CALL. 1461 SDOperand MTCTROps[] = {Chain, Callee, InFlag}; 1462 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0)); 1463 InFlag = Chain.getValue(1); 1464 1465 // Copy the callee address into R12 on darwin. 1466 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag); 1467 InFlag = Chain.getValue(1); 1468 1469 NodeTys.clear(); 1470 NodeTys.push_back(MVT::Other); 1471 NodeTys.push_back(MVT::Flag); 1472 Ops.push_back(Chain); 1473 CallOpc = PPCISD::BCTRL; 1474 Callee.Val = 0; 1475 } 1476 1477 // If this is a direct call, pass the chain and the callee. 1478 if (Callee.Val) { 1479 Ops.push_back(Chain); 1480 Ops.push_back(Callee); 1481 } 1482 1483 // Add argument registers to the end of the list so that they are known live 1484 // into the call. 1485 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1486 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1487 RegsToPass[i].second.getValueType())); 1488 1489 if (InFlag.Val) 1490 Ops.push_back(InFlag); 1491 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 1492 InFlag = Chain.getValue(1); 1493 1494 SDOperand ResultVals[3]; 1495 unsigned NumResults = 0; 1496 NodeTys.clear(); 1497 1498 // If the call has results, copy the values out of the ret val registers. 1499 switch (Op.Val->getValueType(0)) { 1500 default: assert(0 && "Unexpected ret value!"); 1501 case MVT::Other: break; 1502 case MVT::i32: 1503 if (Op.Val->getValueType(1) == MVT::i32) { 1504 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1); 1505 ResultVals[0] = Chain.getValue(0); 1506 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, 1507 Chain.getValue(2)).getValue(1); 1508 ResultVals[1] = Chain.getValue(0); 1509 NumResults = 2; 1510 NodeTys.push_back(MVT::i32); 1511 } else { 1512 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 1513 ResultVals[0] = Chain.getValue(0); 1514 NumResults = 1; 1515 } 1516 NodeTys.push_back(MVT::i32); 1517 break; 1518 case MVT::i64: 1519 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 1520 ResultVals[0] = Chain.getValue(0); 1521 NumResults = 1; 1522 NodeTys.push_back(MVT::i64); 1523 break; 1524 case MVT::f32: 1525 case MVT::f64: 1526 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0), 1527 InFlag).getValue(1); 1528 ResultVals[0] = Chain.getValue(0); 1529 NumResults = 1; 1530 NodeTys.push_back(Op.Val->getValueType(0)); 1531 break; 1532 case MVT::v4f32: 1533 case MVT::v4i32: 1534 case MVT::v8i16: 1535 case MVT::v16i8: 1536 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0), 1537 InFlag).getValue(1); 1538 ResultVals[0] = Chain.getValue(0); 1539 NumResults = 1; 1540 NodeTys.push_back(Op.Val->getValueType(0)); 1541 break; 1542 } 1543 1544 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, 1545 DAG.getConstant(NumBytes, PtrVT)); 1546 NodeTys.push_back(MVT::Other); 1547 1548 // If the function returns void, just return the chain. 1549 if (NumResults == 0) 1550 return Chain; 1551 1552 // Otherwise, merge everything together with a MERGE_VALUES node. 1553 ResultVals[NumResults++] = Chain; 1554 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1555 ResultVals, NumResults); 1556 return Res.getValue(Op.ResNo); 1557} 1558 1559static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { 1560 SDOperand Copy; 1561 switch(Op.getNumOperands()) { 1562 default: 1563 assert(0 && "Do not know how to return this many arguments!"); 1564 abort(); 1565 case 1: 1566 return SDOperand(); // ret void is legal 1567 case 3: { 1568 MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); 1569 unsigned ArgReg; 1570 if (ArgVT == MVT::i32) { 1571 ArgReg = PPC::R3; 1572 } else if (ArgVT == MVT::i64) { 1573 ArgReg = PPC::X3; 1574 } else if (MVT::isVector(ArgVT)) { 1575 ArgReg = PPC::V2; 1576 } else { 1577 assert(MVT::isFloatingPoint(ArgVT)); 1578 ArgReg = PPC::F1; 1579 } 1580 1581 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), 1582 SDOperand()); 1583 1584 // If we haven't noted the R3/F1 are live out, do so now. 1585 if (DAG.getMachineFunction().liveout_empty()) 1586 DAG.getMachineFunction().addLiveOut(ArgReg); 1587 break; 1588 } 1589 case 5: 1590 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3), 1591 SDOperand()); 1592 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1)); 1593 // If we haven't noted the R3+R4 are live out, do so now. 1594 if (DAG.getMachineFunction().liveout_empty()) { 1595 DAG.getMachineFunction().addLiveOut(PPC::R3); 1596 DAG.getMachineFunction().addLiveOut(PPC::R4); 1597 } 1598 break; 1599 } 1600 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); 1601} 1602 1603static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG, 1604 const PPCSubtarget &Subtarget) { 1605 MachineFunction &MF = DAG.getMachineFunction(); 1606 bool IsPPC64 = Subtarget.isPPC64(); 1607 1608 // Get current frame pointer save index. The users of this index will be 1609 // primarily DYNALLOC instructions. 1610 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1611 int FPSI = FI->getFramePointerSaveIndex(); 1612 1613 // If the frame pointer save index hasn't been defined yet. 1614 if (!FPSI) { 1615 // Find out what the fix offset of the frame pointer save area. 1616 int Offset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64); 1617 // Allocate the frame index for frame pointer save area. 1618 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, Offset); 1619 // Save the result. 1620 FI->setFramePointerSaveIndex(FPSI); 1621 } 1622 1623 // Get the inputs. 1624 SDOperand Chain = Op.getOperand(0); 1625 SDOperand Size = Op.getOperand(1); 1626 1627 // Get the corect type for pointers. 1628 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1629 // Negate the size. 1630 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, 1631 DAG.getConstant(0, PtrVT), Size); 1632 // Construct a node for the frame pointer save index. 1633 SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT); 1634 // Build a DYNALLOC node. 1635 SDOperand Ops[3] = { Chain, NegSize, FPSIdx }; 1636 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 1637 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); 1638} 1639 1640 1641/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 1642/// possible. 1643static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { 1644 // Not FP? Not a fsel. 1645 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 1646 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 1647 return SDOperand(); 1648 1649 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1650 1651 // Cannot handle SETEQ/SETNE. 1652 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); 1653 1654 MVT::ValueType ResVT = Op.getValueType(); 1655 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 1656 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 1657 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 1658 1659 // If the RHS of the comparison is a 0.0, we don't need to do the 1660 // subtraction at all. 1661 if (isFloatingPointZero(RHS)) 1662 switch (CC) { 1663 default: break; // SETUO etc aren't handled by fsel. 1664 case ISD::SETULT: 1665 case ISD::SETOLT: 1666 case ISD::SETLT: 1667 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1668 case ISD::SETUGE: 1669 case ISD::SETOGE: 1670 case ISD::SETGE: 1671 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1672 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1673 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 1674 case ISD::SETUGT: 1675 case ISD::SETOGT: 1676 case ISD::SETGT: 1677 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1678 case ISD::SETULE: 1679 case ISD::SETOLE: 1680 case ISD::SETLE: 1681 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1682 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1683 return DAG.getNode(PPCISD::FSEL, ResVT, 1684 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 1685 } 1686 1687 SDOperand Cmp; 1688 switch (CC) { 1689 default: break; // SETUO etc aren't handled by fsel. 1690 case ISD::SETULT: 1691 case ISD::SETOLT: 1692 case ISD::SETLT: 1693 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1694 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1695 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1696 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1697 case ISD::SETUGE: 1698 case ISD::SETOGE: 1699 case ISD::SETGE: 1700 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1701 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1702 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1703 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1704 case ISD::SETUGT: 1705 case ISD::SETOGT: 1706 case ISD::SETGT: 1707 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1708 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1709 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1710 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1711 case ISD::SETULE: 1712 case ISD::SETOLE: 1713 case ISD::SETLE: 1714 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1715 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1716 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1717 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1718 } 1719 return SDOperand(); 1720} 1721 1722static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 1723 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 1724 SDOperand Src = Op.getOperand(0); 1725 if (Src.getValueType() == MVT::f32) 1726 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 1727 1728 SDOperand Tmp; 1729 switch (Op.getValueType()) { 1730 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 1731 case MVT::i32: 1732 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 1733 break; 1734 case MVT::i64: 1735 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 1736 break; 1737 } 1738 1739 // Convert the FP value to an int value through memory. 1740 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); 1741 if (Op.getValueType() == MVT::i32) 1742 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); 1743 return Bits; 1744} 1745 1746static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 1747 if (Op.getOperand(0).getValueType() == MVT::i64) { 1748 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 1749 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 1750 if (Op.getValueType() == MVT::f32) 1751 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 1752 return FP; 1753 } 1754 1755 assert(Op.getOperand(0).getValueType() == MVT::i32 && 1756 "Unhandled SINT_TO_FP type in custom expander!"); 1757 // Since we only generate this in 64-bit mode, we can take advantage of 1758 // 64-bit registers. In particular, sign extend the input value into the 1759 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 1760 // then lfd it and fcfid it. 1761 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 1762 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 1763 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1764 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 1765 1766 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 1767 Op.getOperand(0)); 1768 1769 // STD the extended value into the stack slot. 1770 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 1771 DAG.getEntryNode(), Ext64, FIdx, 1772 DAG.getSrcValue(NULL)); 1773 // Load the value as a double. 1774 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); 1775 1776 // FCFID it and return it. 1777 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 1778 if (Op.getValueType() == MVT::f32) 1779 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 1780 return FP; 1781} 1782 1783static SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { 1784 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 1785 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 1786 1787 // Expand into a bunch of logical ops. Note that these ops 1788 // depend on the PPC behavior for oversized shift amounts. 1789 SDOperand Lo = Op.getOperand(0); 1790 SDOperand Hi = Op.getOperand(1); 1791 SDOperand Amt = Op.getOperand(2); 1792 1793 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1794 DAG.getConstant(32, MVT::i32), Amt); 1795 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); 1796 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); 1797 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1798 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1799 DAG.getConstant(-32U, MVT::i32)); 1800 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); 1801 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 1802 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); 1803 SDOperand OutOps[] = { OutLo, OutHi }; 1804 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 1805 OutOps, 2); 1806} 1807 1808static SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { 1809 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 1810 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRL!"); 1811 1812 // Otherwise, expand into a bunch of logical ops. Note that these ops 1813 // depend on the PPC behavior for oversized shift amounts. 1814 SDOperand Lo = Op.getOperand(0); 1815 SDOperand Hi = Op.getOperand(1); 1816 SDOperand Amt = Op.getOperand(2); 1817 1818 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1819 DAG.getConstant(32, MVT::i32), Amt); 1820 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 1821 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 1822 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1823 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1824 DAG.getConstant(-32U, MVT::i32)); 1825 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); 1826 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 1827 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); 1828 SDOperand OutOps[] = { OutLo, OutHi }; 1829 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 1830 OutOps, 2); 1831} 1832 1833static SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { 1834 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 1835 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); 1836 1837 // Otherwise, expand into a bunch of logical ops, followed by a select_cc. 1838 SDOperand Lo = Op.getOperand(0); 1839 SDOperand Hi = Op.getOperand(1); 1840 SDOperand Amt = Op.getOperand(2); 1841 1842 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1843 DAG.getConstant(32, MVT::i32), Amt); 1844 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 1845 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 1846 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1847 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1848 DAG.getConstant(-32U, MVT::i32)); 1849 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); 1850 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); 1851 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), 1852 Tmp4, Tmp6, ISD::SETLE); 1853 SDOperand OutOps[] = { OutLo, OutHi }; 1854 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 1855 OutOps, 2); 1856} 1857 1858//===----------------------------------------------------------------------===// 1859// Vector related lowering. 1860// 1861 1862// If this is a vector of constants or undefs, get the bits. A bit in 1863// UndefBits is set if the corresponding element of the vector is an 1864// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 1865// zero. Return true if this is not an array of constants, false if it is. 1866// 1867static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 1868 uint64_t UndefBits[2]) { 1869 // Start with zero'd results. 1870 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 1871 1872 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); 1873 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 1874 SDOperand OpVal = BV->getOperand(i); 1875 1876 unsigned PartNo = i >= e/2; // In the upper 128 bits? 1877 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 1878 1879 uint64_t EltBits = 0; 1880 if (OpVal.getOpcode() == ISD::UNDEF) { 1881 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 1882 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 1883 continue; 1884 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1885 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 1886 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1887 assert(CN->getValueType(0) == MVT::f32 && 1888 "Only one legal FP vector type!"); 1889 EltBits = FloatToBits(CN->getValue()); 1890 } else { 1891 // Nonconstant element. 1892 return true; 1893 } 1894 1895 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 1896 } 1897 1898 //printf("%llx %llx %llx %llx\n", 1899 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 1900 return false; 1901} 1902 1903// If this is a splat (repetition) of a value across the whole vector, return 1904// the smallest size that splats it. For example, "0x01010101010101..." is a 1905// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 1906// SplatSize = 1 byte. 1907static bool isConstantSplat(const uint64_t Bits128[2], 1908 const uint64_t Undef128[2], 1909 unsigned &SplatBits, unsigned &SplatUndef, 1910 unsigned &SplatSize) { 1911 1912 // Don't let undefs prevent splats from matching. See if the top 64-bits are 1913 // the same as the lower 64-bits, ignoring undefs. 1914 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 1915 return false; // Can't be a splat if two pieces don't match. 1916 1917 uint64_t Bits64 = Bits128[0] | Bits128[1]; 1918 uint64_t Undef64 = Undef128[0] & Undef128[1]; 1919 1920 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 1921 // undefs. 1922 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 1923 return false; // Can't be a splat if two pieces don't match. 1924 1925 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 1926 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 1927 1928 // If the top 16-bits are different than the lower 16-bits, ignoring 1929 // undefs, we have an i32 splat. 1930 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 1931 SplatBits = Bits32; 1932 SplatUndef = Undef32; 1933 SplatSize = 4; 1934 return true; 1935 } 1936 1937 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 1938 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 1939 1940 // If the top 8-bits are different than the lower 8-bits, ignoring 1941 // undefs, we have an i16 splat. 1942 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 1943 SplatBits = Bits16; 1944 SplatUndef = Undef16; 1945 SplatSize = 2; 1946 return true; 1947 } 1948 1949 // Otherwise, we have an 8-bit splat. 1950 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 1951 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 1952 SplatSize = 1; 1953 return true; 1954} 1955 1956/// BuildSplatI - Build a canonical splati of Val with an element size of 1957/// SplatSize. Cast the result to VT. 1958static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, 1959 SelectionDAG &DAG) { 1960 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 1961 1962 // Force vspltis[hw] -1 to vspltisb -1. 1963 if (Val == -1) SplatSize = 1; 1964 1965 static const MVT::ValueType VTys[] = { // canonical VT to use for each size. 1966 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 1967 }; 1968 MVT::ValueType CanonicalVT = VTys[SplatSize-1]; 1969 1970 // Build a canonical splat for this value. 1971 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT)); 1972 SmallVector<SDOperand, 8> Ops; 1973 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); 1974 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, 1975 &Ops[0], Ops.size()); 1976 return DAG.getNode(ISD::BIT_CONVERT, VT, Res); 1977} 1978 1979/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 1980/// specified intrinsic ID. 1981static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, 1982 SelectionDAG &DAG, 1983 MVT::ValueType DestVT = MVT::Other) { 1984 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 1985 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 1986 DAG.getConstant(IID, MVT::i32), LHS, RHS); 1987} 1988 1989/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 1990/// specified intrinsic ID. 1991static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, 1992 SDOperand Op2, SelectionDAG &DAG, 1993 MVT::ValueType DestVT = MVT::Other) { 1994 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 1995 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 1996 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 1997} 1998 1999 2000/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 2001/// amount. The result has the specified value type. 2002static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, 2003 MVT::ValueType VT, SelectionDAG &DAG) { 2004 // Force LHS/RHS to be the right type. 2005 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 2006 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 2007 2008 SDOperand Ops[16]; 2009 for (unsigned i = 0; i != 16; ++i) 2010 Ops[i] = DAG.getConstant(i+Amt, MVT::i32); 2011 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 2012 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); 2013 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 2014} 2015 2016// If this is a case we can't handle, return null and let the default 2017// expansion code take care of it. If we CAN select this case, and if it 2018// selects to a single instruction, return Op. Otherwise, if we can codegen 2019// this case more efficiently than a constant pool load, lower it to the 2020// sequence of ops that should be used. 2021static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2022 // If this is a vector of constants or undefs, get the bits. A bit in 2023 // UndefBits is set if the corresponding element of the vector is an 2024 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2025 // zero. 2026 uint64_t VectorBits[2]; 2027 uint64_t UndefBits[2]; 2028 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) 2029 return SDOperand(); // Not a constant vector. 2030 2031 // If this is a splat (repetition) of a value across the whole vector, return 2032 // the smallest size that splats it. For example, "0x01010101010101..." is a 2033 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2034 // SplatSize = 1 byte. 2035 unsigned SplatBits, SplatUndef, SplatSize; 2036 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 2037 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 2038 2039 // First, handle single instruction cases. 2040 2041 // All zeros? 2042 if (SplatBits == 0) { 2043 // Canonicalize all zero vectors to be v4i32. 2044 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 2045 SDOperand Z = DAG.getConstant(0, MVT::i32); 2046 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 2047 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 2048 } 2049 return Op; 2050 } 2051 2052 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 2053 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 2054 if (SextVal >= -16 && SextVal <= 15) 2055 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 2056 2057 2058 // Two instruction sequences. 2059 2060 // If this value is in the range [-32,30] and is even, use: 2061 // tmp = VSPLTI[bhw], result = add tmp, tmp 2062 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 2063 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); 2064 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); 2065 } 2066 2067 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 2068 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 2069 // for fneg/fabs. 2070 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 2071 // Make -1 and vspltisw -1: 2072 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 2073 2074 // Make the VSLW intrinsic, computing 0x8000_0000. 2075 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 2076 OnesV, DAG); 2077 2078 // xor by OnesV to invert it. 2079 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 2080 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2081 } 2082 2083 // Check to see if this is a wide variety of vsplti*, binop self cases. 2084 unsigned SplatBitSize = SplatSize*8; 2085 static const char SplatCsts[] = { 2086 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 2087 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 2088 }; 2089 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){ 2090 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 2091 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 2092 int i = SplatCsts[idx]; 2093 2094 // Figure out what shift amount will be used by altivec if shifted by i in 2095 // this splat size. 2096 unsigned TypeShiftAmt = i & (SplatBitSize-1); 2097 2098 // vsplti + shl self. 2099 if (SextVal == (i << (int)TypeShiftAmt)) { 2100 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 2101 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2102 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 2103 Intrinsic::ppc_altivec_vslw 2104 }; 2105 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 2106 } 2107 2108 // vsplti + srl self. 2109 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2110 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 2111 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2112 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 2113 Intrinsic::ppc_altivec_vsrw 2114 }; 2115 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 2116 } 2117 2118 // vsplti + sra self. 2119 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2120 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 2121 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2122 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 2123 Intrinsic::ppc_altivec_vsraw 2124 }; 2125 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 2126 } 2127 2128 // vsplti + rol self. 2129 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 2130 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 2131 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG); 2132 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2133 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 2134 Intrinsic::ppc_altivec_vrlw 2135 }; 2136 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG); 2137 } 2138 2139 // t = vsplti c, result = vsldoi t, t, 1 2140 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 2141 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2142 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 2143 } 2144 // t = vsplti c, result = vsldoi t, t, 2 2145 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 2146 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2147 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 2148 } 2149 // t = vsplti c, result = vsldoi t, t, 3 2150 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 2151 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2152 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 2153 } 2154 } 2155 2156 // Three instruction sequences. 2157 2158 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 2159 if (SextVal >= 0 && SextVal <= 31) { 2160 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG); 2161 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG); 2162 return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS); 2163 } 2164 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 2165 if (SextVal >= -31 && SextVal <= 0) { 2166 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG); 2167 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG); 2168 return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS); 2169 } 2170 } 2171 2172 return SDOperand(); 2173} 2174 2175/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 2176/// the specified operations to build the shuffle. 2177static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, 2178 SDOperand RHS, SelectionDAG &DAG) { 2179 unsigned OpNum = (PFEntry >> 26) & 0x0F; 2180 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 2181 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 2182 2183 enum { 2184 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 2185 OP_VMRGHW, 2186 OP_VMRGLW, 2187 OP_VSPLTISW0, 2188 OP_VSPLTISW1, 2189 OP_VSPLTISW2, 2190 OP_VSPLTISW3, 2191 OP_VSLDOI4, 2192 OP_VSLDOI8, 2193 OP_VSLDOI12 2194 }; 2195 2196 if (OpNum == OP_COPY) { 2197 if (LHSID == (1*9+2)*9+3) return LHS; 2198 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 2199 return RHS; 2200 } 2201 2202 SDOperand OpLHS, OpRHS; 2203 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 2204 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 2205 2206 unsigned ShufIdxs[16]; 2207 switch (OpNum) { 2208 default: assert(0 && "Unknown i32 permute!"); 2209 case OP_VMRGHW: 2210 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 2211 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 2212 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 2213 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 2214 break; 2215 case OP_VMRGLW: 2216 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 2217 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 2218 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 2219 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 2220 break; 2221 case OP_VSPLTISW0: 2222 for (unsigned i = 0; i != 16; ++i) 2223 ShufIdxs[i] = (i&3)+0; 2224 break; 2225 case OP_VSPLTISW1: 2226 for (unsigned i = 0; i != 16; ++i) 2227 ShufIdxs[i] = (i&3)+4; 2228 break; 2229 case OP_VSPLTISW2: 2230 for (unsigned i = 0; i != 16; ++i) 2231 ShufIdxs[i] = (i&3)+8; 2232 break; 2233 case OP_VSPLTISW3: 2234 for (unsigned i = 0; i != 16; ++i) 2235 ShufIdxs[i] = (i&3)+12; 2236 break; 2237 case OP_VSLDOI4: 2238 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 2239 case OP_VSLDOI8: 2240 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 2241 case OP_VSLDOI12: 2242 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 2243 } 2244 SDOperand Ops[16]; 2245 for (unsigned i = 0; i != 16; ++i) 2246 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32); 2247 2248 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 2249 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 2250} 2251 2252/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 2253/// is a shuffle we can handle in a single instruction, return it. Otherwise, 2254/// return the code it can be lowered into. Worst case, it can always be 2255/// lowered into a vperm. 2256static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 2257 SDOperand V1 = Op.getOperand(0); 2258 SDOperand V2 = Op.getOperand(1); 2259 SDOperand PermMask = Op.getOperand(2); 2260 2261 // Cases that are handled by instructions that take permute immediates 2262 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 2263 // selected by the instruction selector. 2264 if (V2.getOpcode() == ISD::UNDEF) { 2265 if (PPC::isSplatShuffleMask(PermMask.Val, 1) || 2266 PPC::isSplatShuffleMask(PermMask.Val, 2) || 2267 PPC::isSplatShuffleMask(PermMask.Val, 4) || 2268 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || 2269 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || 2270 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || 2271 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || 2272 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || 2273 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || 2274 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || 2275 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || 2276 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { 2277 return Op; 2278 } 2279 } 2280 2281 // Altivec has a variety of "shuffle immediates" that take two vector inputs 2282 // and produce a fixed permutation. If any of these match, do not lower to 2283 // VPERM. 2284 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || 2285 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || 2286 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || 2287 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || 2288 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || 2289 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || 2290 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || 2291 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || 2292 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) 2293 return Op; 2294 2295 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 2296 // perfect shuffle table to emit an optimal matching sequence. 2297 unsigned PFIndexes[4]; 2298 bool isFourElementShuffle = true; 2299 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 2300 unsigned EltNo = 8; // Start out undef. 2301 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 2302 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 2303 continue; // Undef, ignore it. 2304 2305 unsigned ByteSource = 2306 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 2307 if ((ByteSource & 3) != j) { 2308 isFourElementShuffle = false; 2309 break; 2310 } 2311 2312 if (EltNo == 8) { 2313 EltNo = ByteSource/4; 2314 } else if (EltNo != ByteSource/4) { 2315 isFourElementShuffle = false; 2316 break; 2317 } 2318 } 2319 PFIndexes[i] = EltNo; 2320 } 2321 2322 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 2323 // perfect shuffle vector to determine if it is cost effective to do this as 2324 // discrete instructions, or whether we should use a vperm. 2325 if (isFourElementShuffle) { 2326 // Compute the index in the perfect shuffle table. 2327 unsigned PFTableIndex = 2328 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 2329 2330 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 2331 unsigned Cost = (PFEntry >> 30); 2332 2333 // Determining when to avoid vperm is tricky. Many things affect the cost 2334 // of vperm, particularly how many times the perm mask needs to be computed. 2335 // For example, if the perm mask can be hoisted out of a loop or is already 2336 // used (perhaps because there are multiple permutes with the same shuffle 2337 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 2338 // the loop requires an extra register. 2339 // 2340 // As a compromise, we only emit discrete instructions if the shuffle can be 2341 // generated in 3 or fewer operations. When we have loop information 2342 // available, if this block is within a loop, we should avoid using vperm 2343 // for 3-operation perms and use a constant pool load instead. 2344 if (Cost < 3) 2345 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 2346 } 2347 2348 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 2349 // vector that will get spilled to the constant pool. 2350 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 2351 2352 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 2353 // that it is in input element units, not in bytes. Convert now. 2354 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); 2355 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 2356 2357 SmallVector<SDOperand, 16> ResultMask; 2358 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 2359 unsigned SrcElt; 2360 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 2361 SrcElt = 0; 2362 else 2363 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 2364 2365 for (unsigned j = 0; j != BytesPerElement; ++j) 2366 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 2367 MVT::i8)); 2368 } 2369 2370 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, 2371 &ResultMask[0], ResultMask.size()); 2372 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 2373} 2374 2375/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 2376/// altivec comparison. If it is, return true and fill in Opc/isDot with 2377/// information about the intrinsic. 2378static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, 2379 bool &isDot) { 2380 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 2381 CompareOpc = -1; 2382 isDot = false; 2383 switch (IntrinsicID) { 2384 default: return false; 2385 // Comparison predicates. 2386 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 2387 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 2388 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 2389 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 2390 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 2391 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 2392 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 2393 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 2394 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 2395 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 2396 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 2397 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 2398 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 2399 2400 // Normal Comparisons. 2401 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 2402 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 2403 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 2404 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 2405 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 2406 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 2407 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 2408 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 2409 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 2410 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 2411 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 2412 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 2413 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 2414 } 2415 return true; 2416} 2417 2418/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 2419/// lower, do it, otherwise return null. 2420static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 2421 // If this is a lowered altivec predicate compare, CompareOpc is set to the 2422 // opcode number of the comparison. 2423 int CompareOpc; 2424 bool isDot; 2425 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 2426 return SDOperand(); // Don't custom lower most intrinsics. 2427 2428 // If this is a non-dot comparison, make the VCMP node and we are done. 2429 if (!isDot) { 2430 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 2431 Op.getOperand(1), Op.getOperand(2), 2432 DAG.getConstant(CompareOpc, MVT::i32)); 2433 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 2434 } 2435 2436 // Create the PPCISD altivec 'dot' comparison node. 2437 SDOperand Ops[] = { 2438 Op.getOperand(2), // LHS 2439 Op.getOperand(3), // RHS 2440 DAG.getConstant(CompareOpc, MVT::i32) 2441 }; 2442 std::vector<MVT::ValueType> VTs; 2443 VTs.push_back(Op.getOperand(2).getValueType()); 2444 VTs.push_back(MVT::Flag); 2445 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 2446 2447 // Now that we have the comparison, emit a copy from the CR to a GPR. 2448 // This is flagged to the above dot comparison. 2449 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 2450 DAG.getRegister(PPC::CR6, MVT::i32), 2451 CompNode.getValue(1)); 2452 2453 // Unpack the result based on how the target uses it. 2454 unsigned BitNo; // Bit # of CR6. 2455 bool InvertBit; // Invert result? 2456 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 2457 default: // Can't happen, don't crash on invalid number though. 2458 case 0: // Return the value of the EQ bit of CR6. 2459 BitNo = 0; InvertBit = false; 2460 break; 2461 case 1: // Return the inverted value of the EQ bit of CR6. 2462 BitNo = 0; InvertBit = true; 2463 break; 2464 case 2: // Return the value of the LT bit of CR6. 2465 BitNo = 2; InvertBit = false; 2466 break; 2467 case 3: // Return the inverted value of the LT bit of CR6. 2468 BitNo = 2; InvertBit = true; 2469 break; 2470 } 2471 2472 // Shift the bit into the low position. 2473 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 2474 DAG.getConstant(8-(3-BitNo), MVT::i32)); 2475 // Isolate the bit. 2476 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 2477 DAG.getConstant(1, MVT::i32)); 2478 2479 // If we are supposed to, toggle the bit. 2480 if (InvertBit) 2481 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 2482 DAG.getConstant(1, MVT::i32)); 2483 return Flags; 2484} 2485 2486static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2487 // Create a stack slot that is 16-byte aligned. 2488 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2489 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 2490 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2491 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2492 2493 // Store the input value into Value#0 of the stack slot. 2494 SDOperand Store = DAG.getStore(DAG.getEntryNode(), 2495 Op.getOperand(0), FIdx, NULL, 0); 2496 // Load it out. 2497 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); 2498} 2499 2500static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) { 2501 if (Op.getValueType() == MVT::v4i32) { 2502 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2503 2504 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 2505 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 2506 2507 SDOperand RHSSwap = // = vrlw RHS, 16 2508 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 2509 2510 // Shrinkify inputs to v8i16. 2511 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 2512 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 2513 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 2514 2515 // Low parts multiplied together, generating 32-bit results (we ignore the 2516 // top parts). 2517 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 2518 LHS, RHS, DAG, MVT::v4i32); 2519 2520 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 2521 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 2522 // Shift the high parts up 16 bits. 2523 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 2524 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 2525 } else if (Op.getValueType() == MVT::v8i16) { 2526 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2527 2528 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 2529 2530 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 2531 LHS, RHS, Zero, DAG); 2532 } else if (Op.getValueType() == MVT::v16i8) { 2533 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2534 2535 // Multiply the even 8-bit parts, producing 16-bit sums. 2536 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 2537 LHS, RHS, DAG, MVT::v8i16); 2538 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 2539 2540 // Multiply the odd 8-bit parts, producing 16-bit sums. 2541 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 2542 LHS, RHS, DAG, MVT::v8i16); 2543 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 2544 2545 // Merge the results together. 2546 SDOperand Ops[16]; 2547 for (unsigned i = 0; i != 8; ++i) { 2548 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); 2549 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); 2550 } 2551 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 2552 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 2553 } else { 2554 assert(0 && "Unknown mul to lower!"); 2555 abort(); 2556 } 2557} 2558 2559/// LowerOperation - Provide custom lowering hooks for some operations. 2560/// 2561SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2562 switch (Op.getOpcode()) { 2563 default: assert(0 && "Wasn't expecting to be able to lower this!"); 2564 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2565 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2566 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2567 case ISD::SETCC: return LowerSETCC(Op, DAG); 2568 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex); 2569 case ISD::FORMAL_ARGUMENTS: 2570 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex); 2571 case ISD::CALL: return LowerCALL(Op, DAG); 2572 case ISD::RET: return LowerRET(Op, DAG); 2573 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, 2574 PPCSubTarget); 2575 2576 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2577 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 2578 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 2579 2580 // Lower 64-bit shifts. 2581 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 2582 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 2583 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 2584 2585 // Vector-related lowering. 2586 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 2587 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2588 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2589 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 2590 case ISD::MUL: return LowerMUL(Op, DAG); 2591 } 2592 return SDOperand(); 2593} 2594 2595//===----------------------------------------------------------------------===// 2596// Other Lowering Code 2597//===----------------------------------------------------------------------===// 2598 2599MachineBasicBlock * 2600PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2601 MachineBasicBlock *BB) { 2602 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2603 assert((MI->getOpcode() == PPC::SELECT_CC_I4 || 2604 MI->getOpcode() == PPC::SELECT_CC_I8 || 2605 MI->getOpcode() == PPC::SELECT_CC_F4 || 2606 MI->getOpcode() == PPC::SELECT_CC_F8 || 2607 MI->getOpcode() == PPC::SELECT_CC_VRRC) && 2608 "Unexpected instr type to insert"); 2609 2610 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2611 // control-flow pattern. The incoming instruction knows the destination vreg 2612 // to set, the condition code register to branch on, the true/false values to 2613 // select between, and a branch opcode to use. 2614 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2615 ilist<MachineBasicBlock>::iterator It = BB; 2616 ++It; 2617 2618 // thisMBB: 2619 // ... 2620 // TrueVal = ... 2621 // cmpTY ccX, r1, r2 2622 // bCC copy1MBB 2623 // fallthrough --> copy0MBB 2624 MachineBasicBlock *thisMBB = BB; 2625 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 2626 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 2627 unsigned SelectPred = MI->getOperand(4).getImm(); 2628 BuildMI(BB, TII->get(PPC::BCC)) 2629 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 2630 MachineFunction *F = BB->getParent(); 2631 F->getBasicBlockList().insert(It, copy0MBB); 2632 F->getBasicBlockList().insert(It, sinkMBB); 2633 // Update machine-CFG edges by first adding all successors of the current 2634 // block to the new block which will contain the Phi node for the select. 2635 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 2636 e = BB->succ_end(); i != e; ++i) 2637 sinkMBB->addSuccessor(*i); 2638 // Next, remove all successors of the current block, and add the true 2639 // and fallthrough blocks as its successors. 2640 while(!BB->succ_empty()) 2641 BB->removeSuccessor(BB->succ_begin()); 2642 BB->addSuccessor(copy0MBB); 2643 BB->addSuccessor(sinkMBB); 2644 2645 // copy0MBB: 2646 // %FalseValue = ... 2647 // # fallthrough to sinkMBB 2648 BB = copy0MBB; 2649 2650 // Update machine-CFG edges 2651 BB->addSuccessor(sinkMBB); 2652 2653 // sinkMBB: 2654 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 2655 // ... 2656 BB = sinkMBB; 2657 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 2658 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 2659 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 2660 2661 delete MI; // The pseudo instruction is gone now. 2662 return BB; 2663} 2664 2665//===----------------------------------------------------------------------===// 2666// Target Optimization Hooks 2667//===----------------------------------------------------------------------===// 2668 2669SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 2670 DAGCombinerInfo &DCI) const { 2671 TargetMachine &TM = getTargetMachine(); 2672 SelectionDAG &DAG = DCI.DAG; 2673 switch (N->getOpcode()) { 2674 default: break; 2675 case PPCISD::SHL: 2676 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2677 if (C->getValue() == 0) // 0 << V -> 0. 2678 return N->getOperand(0); 2679 } 2680 break; 2681 case PPCISD::SRL: 2682 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2683 if (C->getValue() == 0) // 0 >>u V -> 0. 2684 return N->getOperand(0); 2685 } 2686 break; 2687 case PPCISD::SRA: 2688 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2689 if (C->getValue() == 0 || // 0 >>s V -> 0. 2690 C->isAllOnesValue()) // -1 >>s V -> -1. 2691 return N->getOperand(0); 2692 } 2693 break; 2694 2695 case ISD::SINT_TO_FP: 2696 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 2697 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 2698 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 2699 // We allow the src/dst to be either f32/f64, but the intermediate 2700 // type must be i64. 2701 if (N->getOperand(0).getValueType() == MVT::i64) { 2702 SDOperand Val = N->getOperand(0).getOperand(0); 2703 if (Val.getValueType() == MVT::f32) { 2704 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 2705 DCI.AddToWorklist(Val.Val); 2706 } 2707 2708 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 2709 DCI.AddToWorklist(Val.Val); 2710 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 2711 DCI.AddToWorklist(Val.Val); 2712 if (N->getValueType(0) == MVT::f32) { 2713 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); 2714 DCI.AddToWorklist(Val.Val); 2715 } 2716 return Val; 2717 } else if (N->getOperand(0).getValueType() == MVT::i32) { 2718 // If the intermediate type is i32, we can avoid the load/store here 2719 // too. 2720 } 2721 } 2722 } 2723 break; 2724 case ISD::STORE: 2725 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 2726 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 2727 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 2728 N->getOperand(1).getValueType() == MVT::i32) { 2729 SDOperand Val = N->getOperand(1).getOperand(0); 2730 if (Val.getValueType() == MVT::f32) { 2731 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 2732 DCI.AddToWorklist(Val.Val); 2733 } 2734 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 2735 DCI.AddToWorklist(Val.Val); 2736 2737 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 2738 N->getOperand(2), N->getOperand(3)); 2739 DCI.AddToWorklist(Val.Val); 2740 return Val; 2741 } 2742 2743 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 2744 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 2745 N->getOperand(1).Val->hasOneUse() && 2746 (N->getOperand(1).getValueType() == MVT::i32 || 2747 N->getOperand(1).getValueType() == MVT::i16)) { 2748 SDOperand BSwapOp = N->getOperand(1).getOperand(0); 2749 // Do an any-extend to 32-bits if this is a half-word input. 2750 if (BSwapOp.getValueType() == MVT::i16) 2751 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 2752 2753 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 2754 N->getOperand(2), N->getOperand(3), 2755 DAG.getValueType(N->getOperand(1).getValueType())); 2756 } 2757 break; 2758 case ISD::BSWAP: 2759 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 2760 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) && 2761 N->getOperand(0).hasOneUse() && 2762 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 2763 SDOperand Load = N->getOperand(0); 2764 LoadSDNode *LD = cast<LoadSDNode>(Load); 2765 // Create the byte-swapping load. 2766 std::vector<MVT::ValueType> VTs; 2767 VTs.push_back(MVT::i32); 2768 VTs.push_back(MVT::Other); 2769 SDOperand SV = DAG.getSrcValue(LD->getSrcValue(), LD->getSrcValueOffset()); 2770 SDOperand Ops[] = { 2771 LD->getChain(), // Chain 2772 LD->getBasePtr(), // Ptr 2773 SV, // SrcValue 2774 DAG.getValueType(N->getValueType(0)) // VT 2775 }; 2776 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); 2777 2778 // If this is an i16 load, insert the truncate. 2779 SDOperand ResVal = BSLoad; 2780 if (N->getValueType(0) == MVT::i16) 2781 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 2782 2783 // First, combine the bswap away. This makes the value produced by the 2784 // load dead. 2785 DCI.CombineTo(N, ResVal); 2786 2787 // Next, combine the load away, we give it a bogus result value but a real 2788 // chain result. The result value is dead because the bswap is dead. 2789 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); 2790 2791 // Return N so it doesn't get rechecked! 2792 return SDOperand(N, 0); 2793 } 2794 2795 break; 2796 case PPCISD::VCMP: { 2797 // If a VCMPo node already exists with exactly the same operands as this 2798 // node, use its result instead of this node (VCMPo computes both a CR6 and 2799 // a normal output). 2800 // 2801 if (!N->getOperand(0).hasOneUse() && 2802 !N->getOperand(1).hasOneUse() && 2803 !N->getOperand(2).hasOneUse()) { 2804 2805 // Scan all of the users of the LHS, looking for VCMPo's that match. 2806 SDNode *VCMPoNode = 0; 2807 2808 SDNode *LHSN = N->getOperand(0).Val; 2809 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 2810 UI != E; ++UI) 2811 if ((*UI)->getOpcode() == PPCISD::VCMPo && 2812 (*UI)->getOperand(1) == N->getOperand(1) && 2813 (*UI)->getOperand(2) == N->getOperand(2) && 2814 (*UI)->getOperand(0) == N->getOperand(0)) { 2815 VCMPoNode = *UI; 2816 break; 2817 } 2818 2819 // If there is no VCMPo node, or if the flag value has a single use, don't 2820 // transform this. 2821 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 2822 break; 2823 2824 // Look at the (necessarily single) use of the flag value. If it has a 2825 // chain, this transformation is more complex. Note that multiple things 2826 // could use the value result, which we should ignore. 2827 SDNode *FlagUser = 0; 2828 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 2829 FlagUser == 0; ++UI) { 2830 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 2831 SDNode *User = *UI; 2832 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 2833 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { 2834 FlagUser = User; 2835 break; 2836 } 2837 } 2838 } 2839 2840 // If the user is a MFCR instruction, we know this is safe. Otherwise we 2841 // give up for right now. 2842 if (FlagUser->getOpcode() == PPCISD::MFCR) 2843 return SDOperand(VCMPoNode, 0); 2844 } 2845 break; 2846 } 2847 case ISD::BR_CC: { 2848 // If this is a branch on an altivec predicate comparison, lower this so 2849 // that we don't have to do a MFCR: instead, branch directly on CR6. This 2850 // lowering is done pre-legalize, because the legalizer lowers the predicate 2851 // compare down to code that is difficult to reassemble. 2852 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 2853 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); 2854 int CompareOpc; 2855 bool isDot; 2856 2857 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2858 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 2859 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 2860 assert(isDot && "Can't compare against a vector result!"); 2861 2862 // If this is a comparison against something other than 0/1, then we know 2863 // that the condition is never/always true. 2864 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 2865 if (Val != 0 && Val != 1) { 2866 if (CC == ISD::SETEQ) // Cond never true, remove branch. 2867 return N->getOperand(0); 2868 // Always !=, turn it into an unconditional branch. 2869 return DAG.getNode(ISD::BR, MVT::Other, 2870 N->getOperand(0), N->getOperand(4)); 2871 } 2872 2873 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 2874 2875 // Create the PPCISD altivec 'dot' comparison node. 2876 std::vector<MVT::ValueType> VTs; 2877 SDOperand Ops[] = { 2878 LHS.getOperand(2), // LHS of compare 2879 LHS.getOperand(3), // RHS of compare 2880 DAG.getConstant(CompareOpc, MVT::i32) 2881 }; 2882 VTs.push_back(LHS.getOperand(2).getValueType()); 2883 VTs.push_back(MVT::Flag); 2884 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 2885 2886 // Unpack the result based on how the target uses it. 2887 PPC::Predicate CompOpc; 2888 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 2889 default: // Can't happen, don't crash on invalid number though. 2890 case 0: // Branch on the value of the EQ bit of CR6. 2891 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 2892 break; 2893 case 1: // Branch on the inverted value of the EQ bit of CR6. 2894 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 2895 break; 2896 case 2: // Branch on the value of the LT bit of CR6. 2897 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 2898 break; 2899 case 3: // Branch on the inverted value of the LT bit of CR6. 2900 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 2901 break; 2902 } 2903 2904 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 2905 DAG.getConstant(CompOpc, MVT::i32), 2906 DAG.getRegister(PPC::CR6, MVT::i32), 2907 N->getOperand(4), CompNode.getValue(1)); 2908 } 2909 break; 2910 } 2911 } 2912 2913 return SDOperand(); 2914} 2915 2916//===----------------------------------------------------------------------===// 2917// Inline Assembly Support 2918//===----------------------------------------------------------------------===// 2919 2920void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 2921 uint64_t Mask, 2922 uint64_t &KnownZero, 2923 uint64_t &KnownOne, 2924 unsigned Depth) const { 2925 KnownZero = 0; 2926 KnownOne = 0; 2927 switch (Op.getOpcode()) { 2928 default: break; 2929 case PPCISD::LBRX: { 2930 // lhbrx is known to have the top bits cleared out. 2931 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 2932 KnownZero = 0xFFFF0000; 2933 break; 2934 } 2935 case ISD::INTRINSIC_WO_CHAIN: { 2936 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 2937 default: break; 2938 case Intrinsic::ppc_altivec_vcmpbfp_p: 2939 case Intrinsic::ppc_altivec_vcmpeqfp_p: 2940 case Intrinsic::ppc_altivec_vcmpequb_p: 2941 case Intrinsic::ppc_altivec_vcmpequh_p: 2942 case Intrinsic::ppc_altivec_vcmpequw_p: 2943 case Intrinsic::ppc_altivec_vcmpgefp_p: 2944 case Intrinsic::ppc_altivec_vcmpgtfp_p: 2945 case Intrinsic::ppc_altivec_vcmpgtsb_p: 2946 case Intrinsic::ppc_altivec_vcmpgtsh_p: 2947 case Intrinsic::ppc_altivec_vcmpgtsw_p: 2948 case Intrinsic::ppc_altivec_vcmpgtub_p: 2949 case Intrinsic::ppc_altivec_vcmpgtuh_p: 2950 case Intrinsic::ppc_altivec_vcmpgtuw_p: 2951 KnownZero = ~1U; // All bits but the low one are known to be zero. 2952 break; 2953 } 2954 } 2955 } 2956} 2957 2958 2959/// getConstraintType - Given a constraint letter, return the type of 2960/// constraint it is for this target. 2961PPCTargetLowering::ConstraintType 2962PPCTargetLowering::getConstraintType(char ConstraintLetter) const { 2963 switch (ConstraintLetter) { 2964 default: break; 2965 case 'b': 2966 case 'r': 2967 case 'f': 2968 case 'v': 2969 case 'y': 2970 return C_RegisterClass; 2971 } 2972 return TargetLowering::getConstraintType(ConstraintLetter); 2973} 2974 2975std::pair<unsigned, const TargetRegisterClass*> 2976PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 2977 MVT::ValueType VT) const { 2978 if (Constraint.size() == 1) { 2979 // GCC RS6000 Constraint Letters 2980 switch (Constraint[0]) { 2981 case 'b': // R1-R31 2982 case 'r': // R0-R31 2983 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 2984 return std::make_pair(0U, PPC::G8RCRegisterClass); 2985 return std::make_pair(0U, PPC::GPRCRegisterClass); 2986 case 'f': 2987 if (VT == MVT::f32) 2988 return std::make_pair(0U, PPC::F4RCRegisterClass); 2989 else if (VT == MVT::f64) 2990 return std::make_pair(0U, PPC::F8RCRegisterClass); 2991 break; 2992 case 'v': 2993 return std::make_pair(0U, PPC::VRRCRegisterClass); 2994 case 'y': // crrc 2995 return std::make_pair(0U, PPC::CRRCRegisterClass); 2996 } 2997 } 2998 2999 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3000} 3001 3002 3003// isOperandValidForConstraint 3004SDOperand PPCTargetLowering:: 3005isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) { 3006 switch (Letter) { 3007 default: break; 3008 case 'I': 3009 case 'J': 3010 case 'K': 3011 case 'L': 3012 case 'M': 3013 case 'N': 3014 case 'O': 3015 case 'P': { 3016 if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate. 3017 unsigned Value = cast<ConstantSDNode>(Op)->getValue(); 3018 switch (Letter) { 3019 default: assert(0 && "Unknown constraint letter!"); 3020 case 'I': // "I" is a signed 16-bit constant. 3021 if ((short)Value == (int)Value) return Op; 3022 break; 3023 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 3024 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 3025 if ((short)Value == 0) return Op; 3026 break; 3027 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 3028 if ((Value >> 16) == 0) return Op; 3029 break; 3030 case 'M': // "M" is a constant that is greater than 31. 3031 if (Value > 31) return Op; 3032 break; 3033 case 'N': // "N" is a positive constant that is an exact power of two. 3034 if ((int)Value > 0 && isPowerOf2_32(Value)) return Op; 3035 break; 3036 case 'O': // "O" is the constant zero. 3037 if (Value == 0) return Op; 3038 break; 3039 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 3040 if ((short)-Value == (int)-Value) return Op; 3041 break; 3042 } 3043 break; 3044 } 3045 } 3046 3047 // Handle standard constraint letters. 3048 return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG); 3049} 3050 3051/// isLegalAddressImmediate - Return true if the integer value can be used 3052/// as the offset of the target addressing mode. 3053bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const { 3054 // PPC allows a sign-extended 16-bit immediate field. 3055 return (V > -(1 << 16) && V < (1 << 16)-1); 3056} 3057 3058bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 3059 return TargetLowering::isLegalAddressImmediate(GV); 3060} 3061