PPCISelLowering.cpp revision 2ad9f17fee5d6395cd8db81668853e6dbf94060b
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Chris Lattner and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPredicates.h" 17#include "PPCTargetMachine.h" 18#include "PPCPerfectShuffle.h" 19#include "llvm/ADT/VectorExtras.h" 20#include "llvm/Analysis/ScalarEvolutionExpressions.h" 21#include "llvm/CodeGen/MachineFrameInfo.h" 22#include "llvm/CodeGen/MachineFunction.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/CodeGen/SSARegMap.h" 26#include "llvm/Constants.h" 27#include "llvm/Function.h" 28#include "llvm/Intrinsics.h" 29#include "llvm/Support/MathExtras.h" 30#include "llvm/Target/TargetOptions.h" 31#include "llvm/Support/CommandLine.h" 32using namespace llvm; 33 34static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc"); 35 36PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 37 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) { 38 39 setPow2DivIsCheap(); 40 41 // Use _setjmp/_longjmp instead of setjmp/longjmp. 42 setUseUnderscoreSetJmp(true); 43 setUseUnderscoreLongJmp(true); 44 45 // Set up the register classes. 46 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 47 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 48 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 49 50 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 51 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand); 52 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand); 53 54 // PowerPC does not have truncstore for i1. 55 setStoreXAction(MVT::i1, Promote); 56 57 // PowerPC has pre-inc load and store's. 58 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 59 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 60 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 61 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 62 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 63 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 64 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 65 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 66 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 67 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 68 69 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 70 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 71 72 // PowerPC has no intrinsics for these particular operations 73 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand); 74 setOperationAction(ISD::MEMSET, MVT::Other, Expand); 75 setOperationAction(ISD::MEMCPY, MVT::Other, Expand); 76 77 // PowerPC has no SREM/UREM instructions 78 setOperationAction(ISD::SREM, MVT::i32, Expand); 79 setOperationAction(ISD::UREM, MVT::i32, Expand); 80 setOperationAction(ISD::SREM, MVT::i64, Expand); 81 setOperationAction(ISD::UREM, MVT::i64, Expand); 82 83 // We don't support sin/cos/sqrt/fmod 84 setOperationAction(ISD::FSIN , MVT::f64, Expand); 85 setOperationAction(ISD::FCOS , MVT::f64, Expand); 86 setOperationAction(ISD::FREM , MVT::f64, Expand); 87 setOperationAction(ISD::FSIN , MVT::f32, Expand); 88 setOperationAction(ISD::FCOS , MVT::f32, Expand); 89 setOperationAction(ISD::FREM , MVT::f32, Expand); 90 91 // If we're enabling GP optimizations, use hardware square root 92 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 93 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 94 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 95 } 96 97 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 98 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 99 100 // PowerPC does not have BSWAP, CTPOP or CTTZ 101 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 102 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 103 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 104 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 105 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 106 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 107 108 // PowerPC does not have ROTR 109 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 110 111 // PowerPC does not have Select 112 setOperationAction(ISD::SELECT, MVT::i32, Expand); 113 setOperationAction(ISD::SELECT, MVT::i64, Expand); 114 setOperationAction(ISD::SELECT, MVT::f32, Expand); 115 setOperationAction(ISD::SELECT, MVT::f64, Expand); 116 117 // PowerPC wants to turn select_cc of FP into fsel when possible. 118 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 119 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 120 121 // PowerPC wants to optimize integer setcc a bit 122 setOperationAction(ISD::SETCC, MVT::i32, Custom); 123 124 // PowerPC does not have BRCOND which requires SetCC 125 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 126 127 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 128 129 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 130 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 131 132 // PowerPC does not have [U|S]INT_TO_FP 133 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 134 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 135 136 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 137 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 138 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 139 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 140 141 // We cannot sextinreg(i1). Expand to shifts. 142 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 143 144 // Support label based line numbers. 145 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 146 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 147 if (!TM.getSubtarget<PPCSubtarget>().isDarwin()) { 148 setOperationAction(ISD::LABEL, MVT::Other, Expand); 149 } else { 150 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 151 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 152 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 153 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 154 } 155 156 // We want to legalize GlobalAddress and ConstantPool nodes into the 157 // appropriate instructions to materialize the address. 158 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 159 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 160 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 161 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 162 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 163 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 164 165 // RET must be custom lowered, to meet ABI requirements 166 setOperationAction(ISD::RET , MVT::Other, Custom); 167 168 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 169 setOperationAction(ISD::VASTART , MVT::Other, Custom); 170 171 // Use the default implementation. 172 setOperationAction(ISD::VAARG , MVT::Other, Expand); 173 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 174 setOperationAction(ISD::VAEND , MVT::Other, Expand); 175 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 176 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 177 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 178 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 179 180 // We want to custom lower some of our intrinsics. 181 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 182 183 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 184 // They also have instructions for converting between i64 and fp. 185 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 186 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 187 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 188 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 189 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 190 191 // FIXME: disable this lowered code. This generates 64-bit register values, 192 // and we don't model the fact that the top part is clobbered by calls. We 193 // need to flag these together so that the value isn't live across a call. 194 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 195 196 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 197 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 198 } else { 199 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 200 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 201 } 202 203 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 204 // 64 bit PowerPC implementations can support i64 types directly 205 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 206 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 207 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 208 } else { 209 // 32 bit PowerPC wants to expand i64 shifts itself. 210 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 211 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 212 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 213 } 214 215 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 216 // First set operation action for all vector types to expand. Then we 217 // will selectively turn on ones that can be effectively codegen'd. 218 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 219 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 220 // add/sub are legal for all supported vector VT's. 221 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 222 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 223 224 // We promote all shuffles to v16i8. 225 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 226 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 227 228 // We promote all non-typed operations to v4i32. 229 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); 230 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); 231 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); 232 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); 233 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); 234 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); 235 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); 236 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); 237 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 238 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); 239 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); 240 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); 241 242 // No other operations are legal. 243 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 244 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 245 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 246 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 247 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 248 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 249 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 250 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 251 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 252 253 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 254 } 255 256 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 257 // with merges, splats, etc. 258 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 259 260 setOperationAction(ISD::AND , MVT::v4i32, Legal); 261 setOperationAction(ISD::OR , MVT::v4i32, Legal); 262 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 263 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 264 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 265 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 266 267 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 268 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 269 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 270 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 271 272 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 273 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 274 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 275 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 276 277 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 278 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 279 280 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 281 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 282 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 283 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 284 } 285 286 setSetCCResultType(MVT::i32); 287 setShiftAmountType(MVT::i32); 288 setSetCCResultContents(ZeroOrOneSetCCResult); 289 290 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 291 setStackPointerRegisterToSaveRestore(PPC::X1); 292 setExceptionPointerRegister(PPC::X3); 293 setExceptionSelectorRegister(PPC::X4); 294 } else { 295 setStackPointerRegisterToSaveRestore(PPC::R1); 296 setExceptionPointerRegister(PPC::R3); 297 setExceptionSelectorRegister(PPC::R4); 298 } 299 300 // We have target-specific dag combine patterns for the following nodes: 301 setTargetDAGCombine(ISD::SINT_TO_FP); 302 setTargetDAGCombine(ISD::STORE); 303 setTargetDAGCombine(ISD::BR_CC); 304 setTargetDAGCombine(ISD::BSWAP); 305 306 computeRegisterProperties(); 307} 308 309const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 310 switch (Opcode) { 311 default: return 0; 312 case PPCISD::FSEL: return "PPCISD::FSEL"; 313 case PPCISD::FCFID: return "PPCISD::FCFID"; 314 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 315 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 316 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 317 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 318 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 319 case PPCISD::VPERM: return "PPCISD::VPERM"; 320 case PPCISD::Hi: return "PPCISD::Hi"; 321 case PPCISD::Lo: return "PPCISD::Lo"; 322 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 323 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 324 case PPCISD::SRL: return "PPCISD::SRL"; 325 case PPCISD::SRA: return "PPCISD::SRA"; 326 case PPCISD::SHL: return "PPCISD::SHL"; 327 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 328 case PPCISD::STD_32: return "PPCISD::STD_32"; 329 case PPCISD::CALL: return "PPCISD::CALL"; 330 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 331 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 332 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 333 case PPCISD::MFCR: return "PPCISD::MFCR"; 334 case PPCISD::VCMP: return "PPCISD::VCMP"; 335 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 336 case PPCISD::LBRX: return "PPCISD::LBRX"; 337 case PPCISD::STBRX: return "PPCISD::STBRX"; 338 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 339 } 340} 341 342//===----------------------------------------------------------------------===// 343// Node matching predicates, for use by the tblgen matching code. 344//===----------------------------------------------------------------------===// 345 346/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 347static bool isFloatingPointZero(SDOperand Op) { 348 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 349 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 350 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { 351 // Maybe this has already been legalized into the constant pool? 352 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 353 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 354 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0); 355 } 356 return false; 357} 358 359/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 360/// true if Op is undef or if it matches the specified value. 361static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 362 return Op.getOpcode() == ISD::UNDEF || 363 cast<ConstantSDNode>(Op)->getValue() == Val; 364} 365 366/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 367/// VPKUHUM instruction. 368bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 369 if (!isUnary) { 370 for (unsigned i = 0; i != 16; ++i) 371 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 372 return false; 373 } else { 374 for (unsigned i = 0; i != 8; ++i) 375 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 376 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 377 return false; 378 } 379 return true; 380} 381 382/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 383/// VPKUWUM instruction. 384bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 385 if (!isUnary) { 386 for (unsigned i = 0; i != 16; i += 2) 387 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 388 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 389 return false; 390 } else { 391 for (unsigned i = 0; i != 8; i += 2) 392 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 393 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 394 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 395 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 396 return false; 397 } 398 return true; 399} 400 401/// isVMerge - Common function, used to match vmrg* shuffles. 402/// 403static bool isVMerge(SDNode *N, unsigned UnitSize, 404 unsigned LHSStart, unsigned RHSStart) { 405 assert(N->getOpcode() == ISD::BUILD_VECTOR && 406 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 407 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 408 "Unsupported merge size!"); 409 410 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 411 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 412 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 413 LHSStart+j+i*UnitSize) || 414 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 415 RHSStart+j+i*UnitSize)) 416 return false; 417 } 418 return true; 419} 420 421/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 422/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 423bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 424 if (!isUnary) 425 return isVMerge(N, UnitSize, 8, 24); 426 return isVMerge(N, UnitSize, 8, 8); 427} 428 429/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 430/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 431bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 432 if (!isUnary) 433 return isVMerge(N, UnitSize, 0, 16); 434 return isVMerge(N, UnitSize, 0, 0); 435} 436 437 438/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 439/// amount, otherwise return -1. 440int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 441 assert(N->getOpcode() == ISD::BUILD_VECTOR && 442 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 443 // Find the first non-undef value in the shuffle mask. 444 unsigned i; 445 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 446 /*search*/; 447 448 if (i == 16) return -1; // all undef. 449 450 // Otherwise, check to see if the rest of the elements are consequtively 451 // numbered from this value. 452 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 453 if (ShiftAmt < i) return -1; 454 ShiftAmt -= i; 455 456 if (!isUnary) { 457 // Check the rest of the elements to see if they are consequtive. 458 for (++i; i != 16; ++i) 459 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 460 return -1; 461 } else { 462 // Check the rest of the elements to see if they are consequtive. 463 for (++i; i != 16; ++i) 464 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 465 return -1; 466 } 467 468 return ShiftAmt; 469} 470 471/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 472/// specifies a splat of a single element that is suitable for input to 473/// VSPLTB/VSPLTH/VSPLTW. 474bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 475 assert(N->getOpcode() == ISD::BUILD_VECTOR && 476 N->getNumOperands() == 16 && 477 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 478 479 // This is a splat operation if each element of the permute is the same, and 480 // if the value doesn't reference the second vector. 481 unsigned ElementBase = 0; 482 SDOperand Elt = N->getOperand(0); 483 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 484 ElementBase = EltV->getValue(); 485 else 486 return false; // FIXME: Handle UNDEF elements too! 487 488 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 489 return false; 490 491 // Check that they are consequtive. 492 for (unsigned i = 1; i != EltSize; ++i) { 493 if (!isa<ConstantSDNode>(N->getOperand(i)) || 494 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 495 return false; 496 } 497 498 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 499 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 500 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 501 assert(isa<ConstantSDNode>(N->getOperand(i)) && 502 "Invalid VECTOR_SHUFFLE mask!"); 503 for (unsigned j = 0; j != EltSize; ++j) 504 if (N->getOperand(i+j) != N->getOperand(j)) 505 return false; 506 } 507 508 return true; 509} 510 511/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 512/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 513unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 514 assert(isSplatShuffleMask(N, EltSize)); 515 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 516} 517 518/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 519/// by using a vspltis[bhw] instruction of the specified element size, return 520/// the constant being splatted. The ByteSize field indicates the number of 521/// bytes of each element [124] -> [bhw]. 522SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 523 SDOperand OpVal(0, 0); 524 525 // If ByteSize of the splat is bigger than the element size of the 526 // build_vector, then we have a case where we are checking for a splat where 527 // multiple elements of the buildvector are folded together into a single 528 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 529 unsigned EltSize = 16/N->getNumOperands(); 530 if (EltSize < ByteSize) { 531 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 532 SDOperand UniquedVals[4]; 533 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 534 535 // See if all of the elements in the buildvector agree across. 536 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 537 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 538 // If the element isn't a constant, bail fully out. 539 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); 540 541 542 if (UniquedVals[i&(Multiple-1)].Val == 0) 543 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 544 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 545 return SDOperand(); // no match. 546 } 547 548 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 549 // either constant or undef values that are identical for each chunk. See 550 // if these chunks can form into a larger vspltis*. 551 552 // Check to see if all of the leading entries are either 0 or -1. If 553 // neither, then this won't fit into the immediate field. 554 bool LeadingZero = true; 555 bool LeadingOnes = true; 556 for (unsigned i = 0; i != Multiple-1; ++i) { 557 if (UniquedVals[i].Val == 0) continue; // Must have been undefs. 558 559 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 560 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 561 } 562 // Finally, check the least significant entry. 563 if (LeadingZero) { 564 if (UniquedVals[Multiple-1].Val == 0) 565 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 566 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 567 if (Val < 16) 568 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 569 } 570 if (LeadingOnes) { 571 if (UniquedVals[Multiple-1].Val == 0) 572 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 573 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 574 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 575 return DAG.getTargetConstant(Val, MVT::i32); 576 } 577 578 return SDOperand(); 579 } 580 581 // Check to see if this buildvec has a single non-undef value in its elements. 582 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 583 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 584 if (OpVal.Val == 0) 585 OpVal = N->getOperand(i); 586 else if (OpVal != N->getOperand(i)) 587 return SDOperand(); 588 } 589 590 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. 591 592 unsigned ValSizeInBytes = 0; 593 uint64_t Value = 0; 594 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 595 Value = CN->getValue(); 596 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 597 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 598 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 599 Value = FloatToBits(CN->getValue()); 600 ValSizeInBytes = 4; 601 } 602 603 // If the splat value is larger than the element value, then we can never do 604 // this splat. The only case that we could fit the replicated bits into our 605 // immediate field for would be zero, and we prefer to use vxor for it. 606 if (ValSizeInBytes < ByteSize) return SDOperand(); 607 608 // If the element value is larger than the splat value, cut it in half and 609 // check to see if the two halves are equal. Continue doing this until we 610 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 611 while (ValSizeInBytes > ByteSize) { 612 ValSizeInBytes >>= 1; 613 614 // If the top half equals the bottom half, we're still ok. 615 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 616 (Value & ((1 << (8*ValSizeInBytes))-1))) 617 return SDOperand(); 618 } 619 620 // Properly sign extend the value. 621 int ShAmt = (4-ByteSize)*8; 622 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 623 624 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 625 if (MaskVal == 0) return SDOperand(); 626 627 // Finally, if this value fits in a 5 bit sext field, return it 628 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 629 return DAG.getTargetConstant(MaskVal, MVT::i32); 630 return SDOperand(); 631} 632 633//===----------------------------------------------------------------------===// 634// Addressing Mode Selection 635//===----------------------------------------------------------------------===// 636 637/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 638/// or 64-bit immediate, and if the value can be accurately represented as a 639/// sign extension from a 16-bit value. If so, this returns true and the 640/// immediate. 641static bool isIntS16Immediate(SDNode *N, short &Imm) { 642 if (N->getOpcode() != ISD::Constant) 643 return false; 644 645 Imm = (short)cast<ConstantSDNode>(N)->getValue(); 646 if (N->getValueType(0) == MVT::i32) 647 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue(); 648 else 649 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); 650} 651static bool isIntS16Immediate(SDOperand Op, short &Imm) { 652 return isIntS16Immediate(Op.Val, Imm); 653} 654 655 656/// SelectAddressRegReg - Given the specified addressed, check to see if it 657/// can be represented as an indexed [r+r] operation. Returns false if it 658/// can be more efficiently represented with [r+imm]. 659bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, 660 SDOperand &Index, 661 SelectionDAG &DAG) { 662 short imm = 0; 663 if (N.getOpcode() == ISD::ADD) { 664 if (isIntS16Immediate(N.getOperand(1), imm)) 665 return false; // r+i 666 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 667 return false; // r+i 668 669 Base = N.getOperand(0); 670 Index = N.getOperand(1); 671 return true; 672 } else if (N.getOpcode() == ISD::OR) { 673 if (isIntS16Immediate(N.getOperand(1), imm)) 674 return false; // r+i can fold it if we can. 675 676 // If this is an or of disjoint bitfields, we can codegen this as an add 677 // (for better address arithmetic) if the LHS and RHS of the OR are provably 678 // disjoint. 679 uint64_t LHSKnownZero, LHSKnownOne; 680 uint64_t RHSKnownZero, RHSKnownOne; 681 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 682 683 if (LHSKnownZero) { 684 ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne); 685 // If all of the bits are known zero on the LHS or RHS, the add won't 686 // carry. 687 if ((LHSKnownZero | RHSKnownZero) == ~0U) { 688 Base = N.getOperand(0); 689 Index = N.getOperand(1); 690 return true; 691 } 692 } 693 } 694 695 return false; 696} 697 698/// Returns true if the address N can be represented by a base register plus 699/// a signed 16-bit displacement [r+imm], and if it is not better 700/// represented as reg+reg. 701bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, 702 SDOperand &Base, SelectionDAG &DAG){ 703 // If this can be more profitably realized as r+r, fail. 704 if (SelectAddressRegReg(N, Disp, Base, DAG)) 705 return false; 706 707 if (N.getOpcode() == ISD::ADD) { 708 short imm = 0; 709 if (isIntS16Immediate(N.getOperand(1), imm)) { 710 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 711 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 712 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 713 } else { 714 Base = N.getOperand(0); 715 } 716 return true; // [r+i] 717 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 718 // Match LOAD (ADD (X, Lo(G))). 719 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 720 && "Cannot handle constant offsets yet!"); 721 Disp = N.getOperand(1).getOperand(0); // The global address. 722 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 723 Disp.getOpcode() == ISD::TargetConstantPool || 724 Disp.getOpcode() == ISD::TargetJumpTable); 725 Base = N.getOperand(0); 726 return true; // [&g+r] 727 } 728 } else if (N.getOpcode() == ISD::OR) { 729 short imm = 0; 730 if (isIntS16Immediate(N.getOperand(1), imm)) { 731 // If this is an or of disjoint bitfields, we can codegen this as an add 732 // (for better address arithmetic) if the LHS and RHS of the OR are 733 // provably disjoint. 734 uint64_t LHSKnownZero, LHSKnownOne; 735 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 736 if ((LHSKnownZero|~(unsigned)imm) == ~0U) { 737 // If all of the bits are known zero on the LHS or RHS, the add won't 738 // carry. 739 Base = N.getOperand(0); 740 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 741 return true; 742 } 743 } 744 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 745 // Loading from a constant address. 746 747 // If this address fits entirely in a 16-bit sext immediate field, codegen 748 // this as "d, 0" 749 short Imm; 750 if (isIntS16Immediate(CN, Imm)) { 751 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 752 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 753 return true; 754 } 755 756 // Handle 32-bit sext immediates with LIS + addr mode. 757 if (CN->getValueType(0) == MVT::i32 || 758 (int64_t)CN->getValue() == (int)CN->getValue()) { 759 int Addr = (int)CN->getValue(); 760 761 // Otherwise, break this down into an LIS + disp. 762 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 763 764 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 765 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 766 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 767 return true; 768 } 769 } 770 771 Disp = DAG.getTargetConstant(0, getPointerTy()); 772 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 773 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 774 else 775 Base = N; 776 return true; // [r+0] 777} 778 779/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 780/// represented as an indexed [r+r] operation. 781bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, 782 SDOperand &Index, 783 SelectionDAG &DAG) { 784 // Check to see if we can easily represent this as an [r+r] address. This 785 // will fail if it thinks that the address is more profitably represented as 786 // reg+imm, e.g. where imm = 0. 787 if (SelectAddressRegReg(N, Base, Index, DAG)) 788 return true; 789 790 // If the operand is an addition, always emit this as [r+r], since this is 791 // better (for code size, and execution, as the memop does the add for free) 792 // than emitting an explicit add. 793 if (N.getOpcode() == ISD::ADD) { 794 Base = N.getOperand(0); 795 Index = N.getOperand(1); 796 return true; 797 } 798 799 // Otherwise, do it the hard way, using R0 as the base register. 800 Base = DAG.getRegister(PPC::R0, N.getValueType()); 801 Index = N; 802 return true; 803} 804 805/// SelectAddressRegImmShift - Returns true if the address N can be 806/// represented by a base register plus a signed 14-bit displacement 807/// [r+imm*4]. Suitable for use by STD and friends. 808bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, 809 SDOperand &Base, 810 SelectionDAG &DAG) { 811 // If this can be more profitably realized as r+r, fail. 812 if (SelectAddressRegReg(N, Disp, Base, DAG)) 813 return false; 814 815 if (N.getOpcode() == ISD::ADD) { 816 short imm = 0; 817 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 818 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 819 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 820 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 821 } else { 822 Base = N.getOperand(0); 823 } 824 return true; // [r+i] 825 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 826 // Match LOAD (ADD (X, Lo(G))). 827 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 828 && "Cannot handle constant offsets yet!"); 829 Disp = N.getOperand(1).getOperand(0); // The global address. 830 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 831 Disp.getOpcode() == ISD::TargetConstantPool || 832 Disp.getOpcode() == ISD::TargetJumpTable); 833 Base = N.getOperand(0); 834 return true; // [&g+r] 835 } 836 } else if (N.getOpcode() == ISD::OR) { 837 short imm = 0; 838 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 839 // If this is an or of disjoint bitfields, we can codegen this as an add 840 // (for better address arithmetic) if the LHS and RHS of the OR are 841 // provably disjoint. 842 uint64_t LHSKnownZero, LHSKnownOne; 843 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne); 844 if ((LHSKnownZero|~(unsigned)imm) == ~0U) { 845 // If all of the bits are known zero on the LHS or RHS, the add won't 846 // carry. 847 Base = N.getOperand(0); 848 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 849 return true; 850 } 851 } 852 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 853 // Loading from a constant address. Verify low two bits are clear. 854 if ((CN->getValue() & 3) == 0) { 855 // If this address fits entirely in a 14-bit sext immediate field, codegen 856 // this as "d, 0" 857 short Imm; 858 if (isIntS16Immediate(CN, Imm)) { 859 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 860 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 861 return true; 862 } 863 864 // Fold the low-part of 32-bit absolute addresses into addr mode. 865 if (CN->getValueType(0) == MVT::i32 || 866 (int64_t)CN->getValue() == (int)CN->getValue()) { 867 int Addr = (int)CN->getValue(); 868 869 // Otherwise, break this down into an LIS + disp. 870 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 871 872 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 873 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 874 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 875 return true; 876 } 877 } 878 } 879 880 Disp = DAG.getTargetConstant(0, getPointerTy()); 881 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 882 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 883 else 884 Base = N; 885 return true; // [r+0] 886} 887 888 889/// getPreIndexedAddressParts - returns true by value, base pointer and 890/// offset pointer and addressing mode by reference if the node's address 891/// can be legally represented as pre-indexed load / store address. 892bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, 893 SDOperand &Offset, 894 ISD::MemIndexedMode &AM, 895 SelectionDAG &DAG) { 896 // Disabled by default for now. 897 if (!EnablePPCPreinc) return false; 898 899 SDOperand Ptr; 900 MVT::ValueType VT; 901 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 902 Ptr = LD->getBasePtr(); 903 VT = LD->getLoadedVT(); 904 905 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 906 ST = ST; 907 Ptr = ST->getBasePtr(); 908 VT = ST->getStoredVT(); 909 } else 910 return false; 911 912 // PowerPC doesn't have preinc load/store instructions for vectors. 913 if (MVT::isVector(VT)) 914 return false; 915 916 // TODO: Check reg+reg first. 917 918 // LDU/STU use reg+imm*4, others use reg+imm. 919 if (VT != MVT::i64) { 920 // reg + imm 921 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 922 return false; 923 } else { 924 // reg + imm * 4. 925 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 926 return false; 927 } 928 929 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 930 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 931 // sext i32 to i64 when addr mode is r+i. 932 if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 && 933 LD->getExtensionType() == ISD::SEXTLOAD && 934 isa<ConstantSDNode>(Offset)) 935 return false; 936 } 937 938 AM = ISD::PRE_INC; 939 return true; 940} 941 942//===----------------------------------------------------------------------===// 943// LowerOperation implementation 944//===----------------------------------------------------------------------===// 945 946static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 947 MVT::ValueType PtrVT = Op.getValueType(); 948 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 949 Constant *C = CP->getConstVal(); 950 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 951 SDOperand Zero = DAG.getConstant(0, PtrVT); 952 953 const TargetMachine &TM = DAG.getTarget(); 954 955 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 956 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 957 958 // If this is a non-darwin platform, we don't support non-static relo models 959 // yet. 960 if (TM.getRelocationModel() == Reloc::Static || 961 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 962 // Generate non-pic code that has direct accesses to the constant pool. 963 // The address of the global is just (hi(&g)+lo(&g)). 964 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 965 } 966 967 if (TM.getRelocationModel() == Reloc::PIC_) { 968 // With PIC, the first instruction is actually "GR+hi(&G)". 969 Hi = DAG.getNode(ISD::ADD, PtrVT, 970 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 971 } 972 973 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 974 return Lo; 975} 976 977static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 978 MVT::ValueType PtrVT = Op.getValueType(); 979 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 980 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 981 SDOperand Zero = DAG.getConstant(0, PtrVT); 982 983 const TargetMachine &TM = DAG.getTarget(); 984 985 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 986 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 987 988 // If this is a non-darwin platform, we don't support non-static relo models 989 // yet. 990 if (TM.getRelocationModel() == Reloc::Static || 991 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 992 // Generate non-pic code that has direct accesses to the constant pool. 993 // The address of the global is just (hi(&g)+lo(&g)). 994 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 995 } 996 997 if (TM.getRelocationModel() == Reloc::PIC_) { 998 // With PIC, the first instruction is actually "GR+hi(&G)". 999 Hi = DAG.getNode(ISD::ADD, PtrVT, 1000 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1001 } 1002 1003 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1004 return Lo; 1005} 1006 1007static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { 1008 MVT::ValueType PtrVT = Op.getValueType(); 1009 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1010 GlobalValue *GV = GSDN->getGlobal(); 1011 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1012 SDOperand Zero = DAG.getConstant(0, PtrVT); 1013 1014 const TargetMachine &TM = DAG.getTarget(); 1015 1016 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 1017 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 1018 1019 // If this is a non-darwin platform, we don't support non-static relo models 1020 // yet. 1021 if (TM.getRelocationModel() == Reloc::Static || 1022 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1023 // Generate non-pic code that has direct accesses to globals. 1024 // The address of the global is just (hi(&g)+lo(&g)). 1025 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1026 } 1027 1028 if (TM.getRelocationModel() == Reloc::PIC_) { 1029 // With PIC, the first instruction is actually "GR+hi(&G)". 1030 Hi = DAG.getNode(ISD::ADD, PtrVT, 1031 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1032 } 1033 1034 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1035 1036 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV)) 1037 return Lo; 1038 1039 // If the global is weak or external, we have to go through the lazy 1040 // resolution stub. 1041 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); 1042} 1043 1044static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 1045 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1046 1047 // If we're comparing for equality to zero, expose the fact that this is 1048 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1049 // fold the new nodes. 1050 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1051 if (C->isNullValue() && CC == ISD::SETEQ) { 1052 MVT::ValueType VT = Op.getOperand(0).getValueType(); 1053 SDOperand Zext = Op.getOperand(0); 1054 if (VT < MVT::i32) { 1055 VT = MVT::i32; 1056 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 1057 } 1058 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 1059 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 1060 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 1061 DAG.getConstant(Log2b, MVT::i32)); 1062 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 1063 } 1064 // Leave comparisons against 0 and -1 alone for now, since they're usually 1065 // optimized. FIXME: revisit this when we can custom lower all setcc 1066 // optimizations. 1067 if (C->isAllOnesValue() || C->isNullValue()) 1068 return SDOperand(); 1069 } 1070 1071 // If we have an integer seteq/setne, turn it into a compare against zero 1072 // by xor'ing the rhs with the lhs, which is faster than setting a 1073 // condition register, reading it back out, and masking the correct bit. The 1074 // normal approach here uses sub to do this instead of xor. Using xor exposes 1075 // the result to other bit-twiddling opportunities. 1076 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 1077 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1078 MVT::ValueType VT = Op.getValueType(); 1079 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), 1080 Op.getOperand(1)); 1081 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 1082 } 1083 return SDOperand(); 1084} 1085 1086static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, 1087 unsigned VarArgsFrameIndex) { 1088 // vastart just stores the address of the VarArgsFrameIndex slot into the 1089 // memory location argument. 1090 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1091 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1092 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 1093 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(), 1094 SV->getOffset()); 1095} 1096 1097static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, 1098 int &VarArgsFrameIndex) { 1099 // TODO: add description of PPC stack frame format, or at least some docs. 1100 // 1101 MachineFunction &MF = DAG.getMachineFunction(); 1102 MachineFrameInfo *MFI = MF.getFrameInfo(); 1103 SSARegMap *RegMap = MF.getSSARegMap(); 1104 SmallVector<SDOperand, 8> ArgValues; 1105 SDOperand Root = Op.getOperand(0); 1106 1107 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1108 bool isPPC64 = PtrVT == MVT::i64; 1109 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1110 1111 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64); 1112 1113 static const unsigned GPR_32[] = { // 32-bit registers. 1114 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1115 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1116 }; 1117 static const unsigned GPR_64[] = { // 64-bit registers. 1118 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1119 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1120 }; 1121 static const unsigned FPR[] = { 1122 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1123 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1124 }; 1125 static const unsigned VR[] = { 1126 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1127 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1128 }; 1129 1130 const unsigned Num_GPR_Regs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1131 const unsigned Num_FPR_Regs = sizeof(FPR)/sizeof(FPR[0]); 1132 const unsigned Num_VR_Regs = sizeof( VR)/sizeof( VR[0]); 1133 1134 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1135 1136 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1137 1138 // Add DAG nodes to load the arguments or copy them out of registers. On 1139 // entry to a function on PPC, the arguments start after the linkage area, 1140 // although the first ones are often in registers. 1141 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { 1142 SDOperand ArgVal; 1143 bool needsLoad = false; 1144 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1145 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1146 unsigned ArgSize = ObjSize; 1147 1148 unsigned CurArgOffset = ArgOffset; 1149 switch (ObjectVT) { 1150 default: assert(0 && "Unhandled argument type!"); 1151 case MVT::i32: 1152 // All int arguments reserve stack space. 1153 ArgOffset += PtrByteSize; 1154 1155 if (GPR_idx != Num_GPR_Regs) { 1156 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1157 MF.addLiveIn(GPR[GPR_idx], VReg); 1158 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 1159 ++GPR_idx; 1160 } else { 1161 needsLoad = true; 1162 ArgSize = PtrByteSize; 1163 } 1164 break; 1165 case MVT::i64: // PPC64 1166 // All int arguments reserve stack space. 1167 ArgOffset += 8; 1168 1169 if (GPR_idx != Num_GPR_Regs) { 1170 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 1171 MF.addLiveIn(GPR[GPR_idx], VReg); 1172 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1173 ++GPR_idx; 1174 } else { 1175 needsLoad = true; 1176 } 1177 break; 1178 case MVT::f32: 1179 case MVT::f64: 1180 // All FP arguments reserve stack space. 1181 ArgOffset += isPPC64 ? 8 : ObjSize; 1182 1183 // Every 4 bytes of argument space consumes one of the GPRs available for 1184 // argument passing. 1185 if (GPR_idx != Num_GPR_Regs) { 1186 ++GPR_idx; 1187 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1188 ++GPR_idx; 1189 } 1190 if (FPR_idx != Num_FPR_Regs) { 1191 unsigned VReg; 1192 if (ObjectVT == MVT::f32) 1193 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass); 1194 else 1195 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass); 1196 MF.addLiveIn(FPR[FPR_idx], VReg); 1197 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1198 ++FPR_idx; 1199 } else { 1200 needsLoad = true; 1201 } 1202 break; 1203 case MVT::v4f32: 1204 case MVT::v4i32: 1205 case MVT::v8i16: 1206 case MVT::v16i8: 1207 // Note that vector arguments in registers don't reserve stack space. 1208 if (VR_idx != Num_VR_Regs) { 1209 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass); 1210 MF.addLiveIn(VR[VR_idx], VReg); 1211 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1212 ++VR_idx; 1213 } else { 1214 // This should be simple, but requires getting 16-byte aligned stack 1215 // values. 1216 assert(0 && "Loading VR argument not implemented yet!"); 1217 needsLoad = true; 1218 } 1219 break; 1220 } 1221 1222 // We need to load the argument to a virtual register if we determined above 1223 // that we ran out of physical registers of the appropriate type 1224 if (needsLoad) { 1225 // If the argument is actually used, emit a load from the right stack 1226 // slot. 1227 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) { 1228 int FI = MFI->CreateFixedObject(ObjSize, 1229 CurArgOffset + (ArgSize - ObjSize)); 1230 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1231 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 1232 } else { 1233 // Don't emit a dead load. 1234 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT); 1235 } 1236 } 1237 1238 ArgValues.push_back(ArgVal); 1239 } 1240 1241 // If the function takes variable number of arguments, make a frame index for 1242 // the start of the first vararg value... for expansion of llvm.va_start. 1243 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1244 if (isVarArg) { 1245 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1246 ArgOffset); 1247 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1248 // If this function is vararg, store any remaining integer argument regs 1249 // to their spots on the stack so that they may be loaded by deferencing the 1250 // result of va_next. 1251 SmallVector<SDOperand, 8> MemOps; 1252 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 1253 unsigned VReg; 1254 if (isPPC64) 1255 VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass); 1256 else 1257 VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass); 1258 1259 MF.addLiveIn(GPR[GPR_idx], VReg); 1260 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1261 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1262 MemOps.push_back(Store); 1263 // Increment the address by four for the next argument to store 1264 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1265 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1266 } 1267 if (!MemOps.empty()) 1268 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size()); 1269 } 1270 1271 ArgValues.push_back(Root); 1272 1273 // Return the new list of results. 1274 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 1275 Op.Val->value_end()); 1276 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); 1277} 1278 1279/// isCallCompatibleAddress - Return the immediate to use if the specified 1280/// 32-bit value is representable in the immediate field of a BxA instruction. 1281static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { 1282 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1283 if (!C) return 0; 1284 1285 int Addr = C->getValue(); 1286 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 1287 (Addr << 6 >> 6) != Addr) 1288 return 0; // Top 6 bits have to be sext of immediate. 1289 1290 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val; 1291} 1292 1293static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { 1294 SDOperand Chain = Op.getOperand(0); 1295 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1296 SDOperand Callee = Op.getOperand(4); 1297 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1298 1299 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1300 bool isPPC64 = PtrVT == MVT::i64; 1301 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1302 1303 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 1304 // SelectExpr to use to put the arguments in the appropriate registers. 1305 std::vector<SDOperand> args_to_use; 1306 1307 // Count how many bytes are to be pushed on the stack, including the linkage 1308 // area, and parameter passing area. We start with 24/48 bytes, which is 1309 // prereserved space for [SP][CR][LR][3 x unused]. 1310 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64); 1311 1312 // Add up all the space actually used. 1313 for (unsigned i = 0; i != NumOps; ++i) { 1314 unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8; 1315 ArgSize = std::max(ArgSize, PtrByteSize); 1316 NumBytes += ArgSize; 1317 } 1318 1319 // The prolog code of the callee may store up to 8 GPR argument registers to 1320 // the stack, allowing va_start to index over them in memory if its varargs. 1321 // Because we cannot tell if this is needed on the caller side, we have to 1322 // conservatively assume that it is needed. As such, make sure we have at 1323 // least enough stack space for the caller to store the 8 GPRs. 1324 NumBytes = std::max(NumBytes, PPCFrameInfo::getMinCallFrameSize(isPPC64)); 1325 1326 // Adjust the stack pointer for the new arguments... 1327 // These operations are automatically eliminated by the prolog/epilog pass 1328 Chain = DAG.getCALLSEQ_START(Chain, 1329 DAG.getConstant(NumBytes, PtrVT)); 1330 1331 // Set up a copy of the stack pointer for use loading and storing any 1332 // arguments that may not fit in the registers available for argument 1333 // passing. 1334 SDOperand StackPtr; 1335 if (isPPC64) 1336 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 1337 else 1338 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 1339 1340 // Figure out which arguments are going to go in registers, and which in 1341 // memory. Also, if this is a vararg function, floating point operations 1342 // must be stored to our stack, and loaded into integer regs as well, if 1343 // any integer regs are available for argument passing. 1344 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64); 1345 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1346 1347 static const unsigned GPR_32[] = { // 32-bit registers. 1348 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1349 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1350 }; 1351 static const unsigned GPR_64[] = { // 64-bit registers. 1352 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1353 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1354 }; 1355 static const unsigned FPR[] = { 1356 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1357 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1358 }; 1359 static const unsigned VR[] = { 1360 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1361 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1362 }; 1363 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]); 1364 const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]); 1365 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]); 1366 1367 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1368 1369 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1370 SmallVector<SDOperand, 8> MemOpChains; 1371 for (unsigned i = 0; i != NumOps; ++i) { 1372 SDOperand Arg = Op.getOperand(5+2*i); 1373 1374 // PtrOff will be used to store the current argument to the stack if a 1375 // register cannot be found for it. 1376 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 1377 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 1378 1379 // On PPC64, promote integers to 64-bit values. 1380 if (isPPC64 && Arg.getValueType() == MVT::i32) { 1381 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue(); 1382 unsigned ExtOp = (Flags & 1) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1383 1384 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 1385 } 1386 1387 switch (Arg.getValueType()) { 1388 default: assert(0 && "Unexpected ValueType for argument!"); 1389 case MVT::i32: 1390 case MVT::i64: 1391 if (GPR_idx != NumGPRs) { 1392 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 1393 } else { 1394 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1395 } 1396 ArgOffset += PtrByteSize; 1397 break; 1398 case MVT::f32: 1399 case MVT::f64: 1400 if (isVarArg && isPPC64) { 1401 // Float varargs need to be promoted to double. 1402 if (Arg.getValueType() == MVT::f32) 1403 Arg = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Arg); 1404 } 1405 1406 if (FPR_idx != NumFPRs) { 1407 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 1408 1409 if (isVarArg) { 1410 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1411 MemOpChains.push_back(Store); 1412 1413 // Float varargs are always shadowed in available integer registers 1414 if (GPR_idx != NumGPRs) { 1415 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1416 MemOpChains.push_back(Load.getValue(1)); 1417 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1418 } 1419 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 1420 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 1421 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 1422 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1423 MemOpChains.push_back(Load.getValue(1)); 1424 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1425 } 1426 } else { 1427 // If we have any FPRs remaining, we may also have GPRs remaining. 1428 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 1429 // GPRs. 1430 if (GPR_idx != NumGPRs) 1431 ++GPR_idx; 1432 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64) 1433 ++GPR_idx; 1434 } 1435 } else { 1436 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1437 } 1438 if (isPPC64) 1439 ArgOffset += 8; 1440 else 1441 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 1442 break; 1443 case MVT::v4f32: 1444 case MVT::v4i32: 1445 case MVT::v8i16: 1446 case MVT::v16i8: 1447 assert(!isVarArg && "Don't support passing vectors to varargs yet!"); 1448 assert(VR_idx != NumVRs && 1449 "Don't support passing more than 12 vector args yet!"); 1450 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 1451 break; 1452 } 1453 } 1454 if (!MemOpChains.empty()) 1455 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 1456 &MemOpChains[0], MemOpChains.size()); 1457 1458 // Build a sequence of copy-to-reg nodes chained together with token chain 1459 // and flag operands which copy the outgoing args into the appropriate regs. 1460 SDOperand InFlag; 1461 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1462 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 1463 InFlag); 1464 InFlag = Chain.getValue(1); 1465 } 1466 1467 std::vector<MVT::ValueType> NodeTys; 1468 NodeTys.push_back(MVT::Other); // Returns a chain 1469 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 1470 1471 SmallVector<SDOperand, 8> Ops; 1472 unsigned CallOpc = PPCISD::CALL; 1473 1474 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1475 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1476 // node so that legalize doesn't hack it. 1477 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1478 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 1479 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 1480 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 1481 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 1482 // If this is an absolute destination address, use the munged value. 1483 Callee = SDOperand(Dest, 0); 1484 else { 1485 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 1486 // to do the call, we can't use PPCISD::CALL. 1487 SDOperand MTCTROps[] = {Chain, Callee, InFlag}; 1488 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0)); 1489 InFlag = Chain.getValue(1); 1490 1491 // Copy the callee address into R12 on darwin. 1492 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag); 1493 InFlag = Chain.getValue(1); 1494 1495 NodeTys.clear(); 1496 NodeTys.push_back(MVT::Other); 1497 NodeTys.push_back(MVT::Flag); 1498 Ops.push_back(Chain); 1499 CallOpc = PPCISD::BCTRL; 1500 Callee.Val = 0; 1501 } 1502 1503 // If this is a direct call, pass the chain and the callee. 1504 if (Callee.Val) { 1505 Ops.push_back(Chain); 1506 Ops.push_back(Callee); 1507 } 1508 1509 // Add argument registers to the end of the list so that they are known live 1510 // into the call. 1511 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1512 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1513 RegsToPass[i].second.getValueType())); 1514 1515 if (InFlag.Val) 1516 Ops.push_back(InFlag); 1517 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 1518 InFlag = Chain.getValue(1); 1519 1520 SDOperand ResultVals[3]; 1521 unsigned NumResults = 0; 1522 NodeTys.clear(); 1523 1524 // If the call has results, copy the values out of the ret val registers. 1525 switch (Op.Val->getValueType(0)) { 1526 default: assert(0 && "Unexpected ret value!"); 1527 case MVT::Other: break; 1528 case MVT::i32: 1529 if (Op.Val->getValueType(1) == MVT::i32) { 1530 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1); 1531 ResultVals[0] = Chain.getValue(0); 1532 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, 1533 Chain.getValue(2)).getValue(1); 1534 ResultVals[1] = Chain.getValue(0); 1535 NumResults = 2; 1536 NodeTys.push_back(MVT::i32); 1537 } else { 1538 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1); 1539 ResultVals[0] = Chain.getValue(0); 1540 NumResults = 1; 1541 } 1542 NodeTys.push_back(MVT::i32); 1543 break; 1544 case MVT::i64: 1545 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1); 1546 ResultVals[0] = Chain.getValue(0); 1547 NumResults = 1; 1548 NodeTys.push_back(MVT::i64); 1549 break; 1550 case MVT::f32: 1551 case MVT::f64: 1552 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0), 1553 InFlag).getValue(1); 1554 ResultVals[0] = Chain.getValue(0); 1555 NumResults = 1; 1556 NodeTys.push_back(Op.Val->getValueType(0)); 1557 break; 1558 case MVT::v4f32: 1559 case MVT::v4i32: 1560 case MVT::v8i16: 1561 case MVT::v16i8: 1562 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0), 1563 InFlag).getValue(1); 1564 ResultVals[0] = Chain.getValue(0); 1565 NumResults = 1; 1566 NodeTys.push_back(Op.Val->getValueType(0)); 1567 break; 1568 } 1569 1570 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain, 1571 DAG.getConstant(NumBytes, PtrVT)); 1572 NodeTys.push_back(MVT::Other); 1573 1574 // If the function returns void, just return the chain. 1575 if (NumResults == 0) 1576 return Chain; 1577 1578 // Otherwise, merge everything together with a MERGE_VALUES node. 1579 ResultVals[NumResults++] = Chain; 1580 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, 1581 ResultVals, NumResults); 1582 return Res.getValue(Op.ResNo); 1583} 1584 1585static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { 1586 SDOperand Copy; 1587 switch(Op.getNumOperands()) { 1588 default: 1589 assert(0 && "Do not know how to return this many arguments!"); 1590 abort(); 1591 case 1: 1592 return SDOperand(); // ret void is legal 1593 case 3: { 1594 MVT::ValueType ArgVT = Op.getOperand(1).getValueType(); 1595 unsigned ArgReg; 1596 if (ArgVT == MVT::i32) { 1597 ArgReg = PPC::R3; 1598 } else if (ArgVT == MVT::i64) { 1599 ArgReg = PPC::X3; 1600 } else if (MVT::isVector(ArgVT)) { 1601 ArgReg = PPC::V2; 1602 } else { 1603 assert(MVT::isFloatingPoint(ArgVT)); 1604 ArgReg = PPC::F1; 1605 } 1606 1607 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1), 1608 SDOperand()); 1609 1610 // If we haven't noted the R3/F1 are live out, do so now. 1611 if (DAG.getMachineFunction().liveout_empty()) 1612 DAG.getMachineFunction().addLiveOut(ArgReg); 1613 break; 1614 } 1615 case 5: 1616 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3), 1617 SDOperand()); 1618 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1)); 1619 // If we haven't noted the R3+R4 are live out, do so now. 1620 if (DAG.getMachineFunction().liveout_empty()) { 1621 DAG.getMachineFunction().addLiveOut(PPC::R3); 1622 DAG.getMachineFunction().addLiveOut(PPC::R4); 1623 } 1624 break; 1625 } 1626 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); 1627} 1628 1629static SDOperand LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, 1630 const PPCSubtarget &Subtarget) { 1631 // When we pop the dynamic allocation we need to restore the SP link. 1632 1633 // Get the corect type for pointers. 1634 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1635 1636 // Construct the stack pointer operand. 1637 bool IsPPC64 = Subtarget.isPPC64(); 1638 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; 1639 SDOperand StackPtr = DAG.getRegister(SP, PtrVT); 1640 1641 // Get the operands for the STACKRESTORE. 1642 SDOperand Chain = Op.getOperand(0); 1643 SDOperand SaveSP = Op.getOperand(1); 1644 1645 // Load the old link SP. 1646 SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); 1647 1648 // Restore the stack pointer. 1649 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP); 1650 1651 // Store the old link SP. 1652 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0); 1653} 1654 1655static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG, 1656 const PPCSubtarget &Subtarget) { 1657 MachineFunction &MF = DAG.getMachineFunction(); 1658 bool IsPPC64 = Subtarget.isPPC64(); 1659 1660 // Get current frame pointer save index. The users of this index will be 1661 // primarily DYNALLOC instructions. 1662 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1663 int FPSI = FI->getFramePointerSaveIndex(); 1664 1665 // If the frame pointer save index hasn't been defined yet. 1666 if (!FPSI) { 1667 // Find out what the fix offset of the frame pointer save area. 1668 int Offset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64); 1669 // Allocate the frame index for frame pointer save area. 1670 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, Offset); 1671 // Save the result. 1672 FI->setFramePointerSaveIndex(FPSI); 1673 } 1674 1675 // Get the inputs. 1676 SDOperand Chain = Op.getOperand(0); 1677 SDOperand Size = Op.getOperand(1); 1678 1679 // Get the corect type for pointers. 1680 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1681 // Negate the size. 1682 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, 1683 DAG.getConstant(0, PtrVT), Size); 1684 // Construct a node for the frame pointer save index. 1685 SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT); 1686 // Build a DYNALLOC node. 1687 SDOperand Ops[3] = { Chain, NegSize, FPSIdx }; 1688 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 1689 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); 1690} 1691 1692 1693/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 1694/// possible. 1695static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { 1696 // Not FP? Not a fsel. 1697 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 1698 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 1699 return SDOperand(); 1700 1701 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1702 1703 // Cannot handle SETEQ/SETNE. 1704 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); 1705 1706 MVT::ValueType ResVT = Op.getValueType(); 1707 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 1708 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 1709 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 1710 1711 // If the RHS of the comparison is a 0.0, we don't need to do the 1712 // subtraction at all. 1713 if (isFloatingPointZero(RHS)) 1714 switch (CC) { 1715 default: break; // SETUO etc aren't handled by fsel. 1716 case ISD::SETULT: 1717 case ISD::SETOLT: 1718 case ISD::SETLT: 1719 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1720 case ISD::SETUGE: 1721 case ISD::SETOGE: 1722 case ISD::SETGE: 1723 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1724 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1725 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 1726 case ISD::SETUGT: 1727 case ISD::SETOGT: 1728 case ISD::SETGT: 1729 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 1730 case ISD::SETULE: 1731 case ISD::SETOLE: 1732 case ISD::SETLE: 1733 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 1734 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 1735 return DAG.getNode(PPCISD::FSEL, ResVT, 1736 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 1737 } 1738 1739 SDOperand Cmp; 1740 switch (CC) { 1741 default: break; // SETUO etc aren't handled by fsel. 1742 case ISD::SETULT: 1743 case ISD::SETOLT: 1744 case ISD::SETLT: 1745 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1746 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1747 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1748 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1749 case ISD::SETUGE: 1750 case ISD::SETOGE: 1751 case ISD::SETGE: 1752 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 1753 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1754 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1755 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1756 case ISD::SETUGT: 1757 case ISD::SETOGT: 1758 case ISD::SETGT: 1759 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1760 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1761 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1762 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 1763 case ISD::SETULE: 1764 case ISD::SETOLE: 1765 case ISD::SETLE: 1766 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 1767 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 1768 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 1769 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 1770 } 1771 return SDOperand(); 1772} 1773 1774static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 1775 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 1776 SDOperand Src = Op.getOperand(0); 1777 if (Src.getValueType() == MVT::f32) 1778 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 1779 1780 SDOperand Tmp; 1781 switch (Op.getValueType()) { 1782 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 1783 case MVT::i32: 1784 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 1785 break; 1786 case MVT::i64: 1787 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 1788 break; 1789 } 1790 1791 // Convert the FP value to an int value through memory. 1792 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp); 1793 if (Op.getValueType() == MVT::i32) 1794 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits); 1795 return Bits; 1796} 1797 1798static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 1799 if (Op.getOperand(0).getValueType() == MVT::i64) { 1800 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 1801 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 1802 if (Op.getValueType() == MVT::f32) 1803 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 1804 return FP; 1805 } 1806 1807 assert(Op.getOperand(0).getValueType() == MVT::i32 && 1808 "Unhandled SINT_TO_FP type in custom expander!"); 1809 // Since we only generate this in 64-bit mode, we can take advantage of 1810 // 64-bit registers. In particular, sign extend the input value into the 1811 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 1812 // then lfd it and fcfid it. 1813 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 1814 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 1815 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1816 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 1817 1818 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 1819 Op.getOperand(0)); 1820 1821 // STD the extended value into the stack slot. 1822 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 1823 DAG.getEntryNode(), Ext64, FIdx, 1824 DAG.getSrcValue(NULL)); 1825 // Load the value as a double. 1826 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); 1827 1828 // FCFID it and return it. 1829 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 1830 if (Op.getValueType() == MVT::f32) 1831 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP); 1832 return FP; 1833} 1834 1835static SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { 1836 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 1837 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!"); 1838 1839 // Expand into a bunch of logical ops. Note that these ops 1840 // depend on the PPC behavior for oversized shift amounts. 1841 SDOperand Lo = Op.getOperand(0); 1842 SDOperand Hi = Op.getOperand(1); 1843 SDOperand Amt = Op.getOperand(2); 1844 1845 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1846 DAG.getConstant(32, MVT::i32), Amt); 1847 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt); 1848 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1); 1849 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1850 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1851 DAG.getConstant(-32U, MVT::i32)); 1852 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5); 1853 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 1854 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt); 1855 SDOperand OutOps[] = { OutLo, OutHi }; 1856 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 1857 OutOps, 2); 1858} 1859 1860static SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { 1861 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 1862 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRL!"); 1863 1864 // Otherwise, expand into a bunch of logical ops. Note that these ops 1865 // depend on the PPC behavior for oversized shift amounts. 1866 SDOperand Lo = Op.getOperand(0); 1867 SDOperand Hi = Op.getOperand(1); 1868 SDOperand Amt = Op.getOperand(2); 1869 1870 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1871 DAG.getConstant(32, MVT::i32), Amt); 1872 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 1873 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 1874 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1875 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1876 DAG.getConstant(-32U, MVT::i32)); 1877 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5); 1878 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6); 1879 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt); 1880 SDOperand OutOps[] = { OutLo, OutHi }; 1881 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 1882 OutOps, 2); 1883} 1884 1885static SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { 1886 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 && 1887 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!"); 1888 1889 // Otherwise, expand into a bunch of logical ops, followed by a select_cc. 1890 SDOperand Lo = Op.getOperand(0); 1891 SDOperand Hi = Op.getOperand(1); 1892 SDOperand Amt = Op.getOperand(2); 1893 1894 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32, 1895 DAG.getConstant(32, MVT::i32), Amt); 1896 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt); 1897 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1); 1898 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3); 1899 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt, 1900 DAG.getConstant(-32U, MVT::i32)); 1901 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5); 1902 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt); 1903 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32), 1904 Tmp4, Tmp6, ISD::SETLE); 1905 SDOperand OutOps[] = { OutLo, OutHi }; 1906 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32), 1907 OutOps, 2); 1908} 1909 1910//===----------------------------------------------------------------------===// 1911// Vector related lowering. 1912// 1913 1914// If this is a vector of constants or undefs, get the bits. A bit in 1915// UndefBits is set if the corresponding element of the vector is an 1916// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 1917// zero. Return true if this is not an array of constants, false if it is. 1918// 1919static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 1920 uint64_t UndefBits[2]) { 1921 // Start with zero'd results. 1922 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 1923 1924 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); 1925 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 1926 SDOperand OpVal = BV->getOperand(i); 1927 1928 unsigned PartNo = i >= e/2; // In the upper 128 bits? 1929 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 1930 1931 uint64_t EltBits = 0; 1932 if (OpVal.getOpcode() == ISD::UNDEF) { 1933 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 1934 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 1935 continue; 1936 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1937 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 1938 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1939 assert(CN->getValueType(0) == MVT::f32 && 1940 "Only one legal FP vector type!"); 1941 EltBits = FloatToBits(CN->getValue()); 1942 } else { 1943 // Nonconstant element. 1944 return true; 1945 } 1946 1947 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 1948 } 1949 1950 //printf("%llx %llx %llx %llx\n", 1951 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 1952 return false; 1953} 1954 1955// If this is a splat (repetition) of a value across the whole vector, return 1956// the smallest size that splats it. For example, "0x01010101010101..." is a 1957// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 1958// SplatSize = 1 byte. 1959static bool isConstantSplat(const uint64_t Bits128[2], 1960 const uint64_t Undef128[2], 1961 unsigned &SplatBits, unsigned &SplatUndef, 1962 unsigned &SplatSize) { 1963 1964 // Don't let undefs prevent splats from matching. See if the top 64-bits are 1965 // the same as the lower 64-bits, ignoring undefs. 1966 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 1967 return false; // Can't be a splat if two pieces don't match. 1968 1969 uint64_t Bits64 = Bits128[0] | Bits128[1]; 1970 uint64_t Undef64 = Undef128[0] & Undef128[1]; 1971 1972 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 1973 // undefs. 1974 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 1975 return false; // Can't be a splat if two pieces don't match. 1976 1977 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 1978 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 1979 1980 // If the top 16-bits are different than the lower 16-bits, ignoring 1981 // undefs, we have an i32 splat. 1982 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 1983 SplatBits = Bits32; 1984 SplatUndef = Undef32; 1985 SplatSize = 4; 1986 return true; 1987 } 1988 1989 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 1990 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 1991 1992 // If the top 8-bits are different than the lower 8-bits, ignoring 1993 // undefs, we have an i16 splat. 1994 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 1995 SplatBits = Bits16; 1996 SplatUndef = Undef16; 1997 SplatSize = 2; 1998 return true; 1999 } 2000 2001 // Otherwise, we have an 8-bit splat. 2002 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 2003 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 2004 SplatSize = 1; 2005 return true; 2006} 2007 2008/// BuildSplatI - Build a canonical splati of Val with an element size of 2009/// SplatSize. Cast the result to VT. 2010static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, 2011 SelectionDAG &DAG) { 2012 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 2013 2014 static const MVT::ValueType VTys[] = { // canonical VT to use for each size. 2015 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 2016 }; 2017 2018 MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 2019 2020 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 2021 if (Val == -1) 2022 SplatSize = 1; 2023 2024 MVT::ValueType CanonicalVT = VTys[SplatSize-1]; 2025 2026 // Build a canonical splat for this value. 2027 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT)); 2028 SmallVector<SDOperand, 8> Ops; 2029 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); 2030 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, 2031 &Ops[0], Ops.size()); 2032 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); 2033} 2034 2035/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 2036/// specified intrinsic ID. 2037static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, 2038 SelectionDAG &DAG, 2039 MVT::ValueType DestVT = MVT::Other) { 2040 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 2041 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2042 DAG.getConstant(IID, MVT::i32), LHS, RHS); 2043} 2044 2045/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 2046/// specified intrinsic ID. 2047static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, 2048 SDOperand Op2, SelectionDAG &DAG, 2049 MVT::ValueType DestVT = MVT::Other) { 2050 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 2051 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2052 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 2053} 2054 2055 2056/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 2057/// amount. The result has the specified value type. 2058static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, 2059 MVT::ValueType VT, SelectionDAG &DAG) { 2060 // Force LHS/RHS to be the right type. 2061 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 2062 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 2063 2064 SDOperand Ops[16]; 2065 for (unsigned i = 0; i != 16; ++i) 2066 Ops[i] = DAG.getConstant(i+Amt, MVT::i32); 2067 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 2068 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); 2069 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 2070} 2071 2072// If this is a case we can't handle, return null and let the default 2073// expansion code take care of it. If we CAN select this case, and if it 2074// selects to a single instruction, return Op. Otherwise, if we can codegen 2075// this case more efficiently than a constant pool load, lower it to the 2076// sequence of ops that should be used. 2077static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2078 // If this is a vector of constants or undefs, get the bits. A bit in 2079 // UndefBits is set if the corresponding element of the vector is an 2080 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2081 // zero. 2082 uint64_t VectorBits[2]; 2083 uint64_t UndefBits[2]; 2084 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) 2085 return SDOperand(); // Not a constant vector. 2086 2087 // If this is a splat (repetition) of a value across the whole vector, return 2088 // the smallest size that splats it. For example, "0x01010101010101..." is a 2089 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2090 // SplatSize = 1 byte. 2091 unsigned SplatBits, SplatUndef, SplatSize; 2092 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 2093 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 2094 2095 // First, handle single instruction cases. 2096 2097 // All zeros? 2098 if (SplatBits == 0) { 2099 // Canonicalize all zero vectors to be v4i32. 2100 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 2101 SDOperand Z = DAG.getConstant(0, MVT::i32); 2102 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 2103 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 2104 } 2105 return Op; 2106 } 2107 2108 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 2109 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 2110 if (SextVal >= -16 && SextVal <= 15) 2111 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 2112 2113 2114 // Two instruction sequences. 2115 2116 // If this value is in the range [-32,30] and is even, use: 2117 // tmp = VSPLTI[bhw], result = add tmp, tmp 2118 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 2119 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); 2120 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); 2121 } 2122 2123 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 2124 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 2125 // for fneg/fabs. 2126 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 2127 // Make -1 and vspltisw -1: 2128 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 2129 2130 // Make the VSLW intrinsic, computing 0x8000_0000. 2131 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 2132 OnesV, DAG); 2133 2134 // xor by OnesV to invert it. 2135 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 2136 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2137 } 2138 2139 // Check to see if this is a wide variety of vsplti*, binop self cases. 2140 unsigned SplatBitSize = SplatSize*8; 2141 static const char SplatCsts[] = { 2142 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 2143 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 2144 }; 2145 2146 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){ 2147 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 2148 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 2149 int i = SplatCsts[idx]; 2150 2151 // Figure out what shift amount will be used by altivec if shifted by i in 2152 // this splat size. 2153 unsigned TypeShiftAmt = i & (SplatBitSize-1); 2154 2155 // vsplti + shl self. 2156 if (SextVal == (i << (int)TypeShiftAmt)) { 2157 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2158 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2159 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 2160 Intrinsic::ppc_altivec_vslw 2161 }; 2162 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2163 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2164 } 2165 2166 // vsplti + srl self. 2167 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2168 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2169 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2170 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 2171 Intrinsic::ppc_altivec_vsrw 2172 }; 2173 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2174 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2175 } 2176 2177 // vsplti + sra self. 2178 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2179 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2180 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2181 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 2182 Intrinsic::ppc_altivec_vsraw 2183 }; 2184 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2185 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2186 } 2187 2188 // vsplti + rol self. 2189 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 2190 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 2191 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2192 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2193 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 2194 Intrinsic::ppc_altivec_vrlw 2195 }; 2196 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2197 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2198 } 2199 2200 // t = vsplti c, result = vsldoi t, t, 1 2201 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 2202 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2203 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 2204 } 2205 // t = vsplti c, result = vsldoi t, t, 2 2206 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 2207 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2208 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 2209 } 2210 // t = vsplti c, result = vsldoi t, t, 3 2211 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 2212 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2213 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 2214 } 2215 } 2216 2217 // Three instruction sequences. 2218 2219 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 2220 if (SextVal >= 0 && SextVal <= 31) { 2221 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); 2222 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 2223 LHS = DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS); 2224 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 2225 } 2226 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 2227 if (SextVal >= -31 && SextVal <= 0) { 2228 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); 2229 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 2230 LHS = DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS); 2231 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 2232 } 2233 } 2234 2235 return SDOperand(); 2236} 2237 2238/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 2239/// the specified operations to build the shuffle. 2240static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, 2241 SDOperand RHS, SelectionDAG &DAG) { 2242 unsigned OpNum = (PFEntry >> 26) & 0x0F; 2243 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 2244 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 2245 2246 enum { 2247 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 2248 OP_VMRGHW, 2249 OP_VMRGLW, 2250 OP_VSPLTISW0, 2251 OP_VSPLTISW1, 2252 OP_VSPLTISW2, 2253 OP_VSPLTISW3, 2254 OP_VSLDOI4, 2255 OP_VSLDOI8, 2256 OP_VSLDOI12 2257 }; 2258 2259 if (OpNum == OP_COPY) { 2260 if (LHSID == (1*9+2)*9+3) return LHS; 2261 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 2262 return RHS; 2263 } 2264 2265 SDOperand OpLHS, OpRHS; 2266 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 2267 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 2268 2269 unsigned ShufIdxs[16]; 2270 switch (OpNum) { 2271 default: assert(0 && "Unknown i32 permute!"); 2272 case OP_VMRGHW: 2273 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 2274 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 2275 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 2276 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 2277 break; 2278 case OP_VMRGLW: 2279 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 2280 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 2281 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 2282 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 2283 break; 2284 case OP_VSPLTISW0: 2285 for (unsigned i = 0; i != 16; ++i) 2286 ShufIdxs[i] = (i&3)+0; 2287 break; 2288 case OP_VSPLTISW1: 2289 for (unsigned i = 0; i != 16; ++i) 2290 ShufIdxs[i] = (i&3)+4; 2291 break; 2292 case OP_VSPLTISW2: 2293 for (unsigned i = 0; i != 16; ++i) 2294 ShufIdxs[i] = (i&3)+8; 2295 break; 2296 case OP_VSPLTISW3: 2297 for (unsigned i = 0; i != 16; ++i) 2298 ShufIdxs[i] = (i&3)+12; 2299 break; 2300 case OP_VSLDOI4: 2301 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 2302 case OP_VSLDOI8: 2303 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 2304 case OP_VSLDOI12: 2305 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 2306 } 2307 SDOperand Ops[16]; 2308 for (unsigned i = 0; i != 16; ++i) 2309 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32); 2310 2311 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 2312 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 2313} 2314 2315/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 2316/// is a shuffle we can handle in a single instruction, return it. Otherwise, 2317/// return the code it can be lowered into. Worst case, it can always be 2318/// lowered into a vperm. 2319static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { 2320 SDOperand V1 = Op.getOperand(0); 2321 SDOperand V2 = Op.getOperand(1); 2322 SDOperand PermMask = Op.getOperand(2); 2323 2324 // Cases that are handled by instructions that take permute immediates 2325 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 2326 // selected by the instruction selector. 2327 if (V2.getOpcode() == ISD::UNDEF) { 2328 if (PPC::isSplatShuffleMask(PermMask.Val, 1) || 2329 PPC::isSplatShuffleMask(PermMask.Val, 2) || 2330 PPC::isSplatShuffleMask(PermMask.Val, 4) || 2331 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || 2332 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || 2333 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || 2334 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || 2335 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || 2336 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || 2337 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || 2338 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || 2339 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { 2340 return Op; 2341 } 2342 } 2343 2344 // Altivec has a variety of "shuffle immediates" that take two vector inputs 2345 // and produce a fixed permutation. If any of these match, do not lower to 2346 // VPERM. 2347 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || 2348 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || 2349 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || 2350 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || 2351 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || 2352 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || 2353 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || 2354 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || 2355 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) 2356 return Op; 2357 2358 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 2359 // perfect shuffle table to emit an optimal matching sequence. 2360 unsigned PFIndexes[4]; 2361 bool isFourElementShuffle = true; 2362 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 2363 unsigned EltNo = 8; // Start out undef. 2364 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 2365 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 2366 continue; // Undef, ignore it. 2367 2368 unsigned ByteSource = 2369 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 2370 if ((ByteSource & 3) != j) { 2371 isFourElementShuffle = false; 2372 break; 2373 } 2374 2375 if (EltNo == 8) { 2376 EltNo = ByteSource/4; 2377 } else if (EltNo != ByteSource/4) { 2378 isFourElementShuffle = false; 2379 break; 2380 } 2381 } 2382 PFIndexes[i] = EltNo; 2383 } 2384 2385 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 2386 // perfect shuffle vector to determine if it is cost effective to do this as 2387 // discrete instructions, or whether we should use a vperm. 2388 if (isFourElementShuffle) { 2389 // Compute the index in the perfect shuffle table. 2390 unsigned PFTableIndex = 2391 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 2392 2393 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 2394 unsigned Cost = (PFEntry >> 30); 2395 2396 // Determining when to avoid vperm is tricky. Many things affect the cost 2397 // of vperm, particularly how many times the perm mask needs to be computed. 2398 // For example, if the perm mask can be hoisted out of a loop or is already 2399 // used (perhaps because there are multiple permutes with the same shuffle 2400 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 2401 // the loop requires an extra register. 2402 // 2403 // As a compromise, we only emit discrete instructions if the shuffle can be 2404 // generated in 3 or fewer operations. When we have loop information 2405 // available, if this block is within a loop, we should avoid using vperm 2406 // for 3-operation perms and use a constant pool load instead. 2407 if (Cost < 3) 2408 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 2409 } 2410 2411 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 2412 // vector that will get spilled to the constant pool. 2413 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 2414 2415 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 2416 // that it is in input element units, not in bytes. Convert now. 2417 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType()); 2418 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 2419 2420 SmallVector<SDOperand, 16> ResultMask; 2421 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 2422 unsigned SrcElt; 2423 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 2424 SrcElt = 0; 2425 else 2426 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 2427 2428 for (unsigned j = 0; j != BytesPerElement; ++j) 2429 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 2430 MVT::i8)); 2431 } 2432 2433 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, 2434 &ResultMask[0], ResultMask.size()); 2435 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 2436} 2437 2438/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 2439/// altivec comparison. If it is, return true and fill in Opc/isDot with 2440/// information about the intrinsic. 2441static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, 2442 bool &isDot) { 2443 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 2444 CompareOpc = -1; 2445 isDot = false; 2446 switch (IntrinsicID) { 2447 default: return false; 2448 // Comparison predicates. 2449 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 2450 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 2451 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 2452 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 2453 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 2454 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 2455 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 2456 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 2457 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 2458 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 2459 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 2460 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 2461 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 2462 2463 // Normal Comparisons. 2464 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 2465 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 2466 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 2467 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 2468 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 2469 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 2470 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 2471 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 2472 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 2473 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 2474 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 2475 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 2476 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 2477 } 2478 return true; 2479} 2480 2481/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 2482/// lower, do it, otherwise return null. 2483static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { 2484 // If this is a lowered altivec predicate compare, CompareOpc is set to the 2485 // opcode number of the comparison. 2486 int CompareOpc; 2487 bool isDot; 2488 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 2489 return SDOperand(); // Don't custom lower most intrinsics. 2490 2491 // If this is a non-dot comparison, make the VCMP node and we are done. 2492 if (!isDot) { 2493 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 2494 Op.getOperand(1), Op.getOperand(2), 2495 DAG.getConstant(CompareOpc, MVT::i32)); 2496 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 2497 } 2498 2499 // Create the PPCISD altivec 'dot' comparison node. 2500 SDOperand Ops[] = { 2501 Op.getOperand(2), // LHS 2502 Op.getOperand(3), // RHS 2503 DAG.getConstant(CompareOpc, MVT::i32) 2504 }; 2505 std::vector<MVT::ValueType> VTs; 2506 VTs.push_back(Op.getOperand(2).getValueType()); 2507 VTs.push_back(MVT::Flag); 2508 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 2509 2510 // Now that we have the comparison, emit a copy from the CR to a GPR. 2511 // This is flagged to the above dot comparison. 2512 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 2513 DAG.getRegister(PPC::CR6, MVT::i32), 2514 CompNode.getValue(1)); 2515 2516 // Unpack the result based on how the target uses it. 2517 unsigned BitNo; // Bit # of CR6. 2518 bool InvertBit; // Invert result? 2519 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 2520 default: // Can't happen, don't crash on invalid number though. 2521 case 0: // Return the value of the EQ bit of CR6. 2522 BitNo = 0; InvertBit = false; 2523 break; 2524 case 1: // Return the inverted value of the EQ bit of CR6. 2525 BitNo = 0; InvertBit = true; 2526 break; 2527 case 2: // Return the value of the LT bit of CR6. 2528 BitNo = 2; InvertBit = false; 2529 break; 2530 case 3: // Return the inverted value of the LT bit of CR6. 2531 BitNo = 2; InvertBit = true; 2532 break; 2533 } 2534 2535 // Shift the bit into the low position. 2536 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 2537 DAG.getConstant(8-(3-BitNo), MVT::i32)); 2538 // Isolate the bit. 2539 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 2540 DAG.getConstant(1, MVT::i32)); 2541 2542 // If we are supposed to, toggle the bit. 2543 if (InvertBit) 2544 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 2545 DAG.getConstant(1, MVT::i32)); 2546 return Flags; 2547} 2548 2549static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { 2550 // Create a stack slot that is 16-byte aligned. 2551 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2552 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 2553 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2554 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2555 2556 // Store the input value into Value#0 of the stack slot. 2557 SDOperand Store = DAG.getStore(DAG.getEntryNode(), 2558 Op.getOperand(0), FIdx, NULL, 0); 2559 // Load it out. 2560 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); 2561} 2562 2563static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) { 2564 if (Op.getValueType() == MVT::v4i32) { 2565 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2566 2567 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 2568 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 2569 2570 SDOperand RHSSwap = // = vrlw RHS, 16 2571 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 2572 2573 // Shrinkify inputs to v8i16. 2574 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 2575 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 2576 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 2577 2578 // Low parts multiplied together, generating 32-bit results (we ignore the 2579 // top parts). 2580 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 2581 LHS, RHS, DAG, MVT::v4i32); 2582 2583 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 2584 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 2585 // Shift the high parts up 16 bits. 2586 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 2587 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 2588 } else if (Op.getValueType() == MVT::v8i16) { 2589 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2590 2591 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 2592 2593 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 2594 LHS, RHS, Zero, DAG); 2595 } else if (Op.getValueType() == MVT::v16i8) { 2596 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2597 2598 // Multiply the even 8-bit parts, producing 16-bit sums. 2599 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 2600 LHS, RHS, DAG, MVT::v8i16); 2601 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 2602 2603 // Multiply the odd 8-bit parts, producing 16-bit sums. 2604 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 2605 LHS, RHS, DAG, MVT::v8i16); 2606 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 2607 2608 // Merge the results together. 2609 SDOperand Ops[16]; 2610 for (unsigned i = 0; i != 8; ++i) { 2611 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); 2612 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); 2613 } 2614 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 2615 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 2616 } else { 2617 assert(0 && "Unknown mul to lower!"); 2618 abort(); 2619 } 2620} 2621 2622/// LowerOperation - Provide custom lowering hooks for some operations. 2623/// 2624SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 2625 switch (Op.getOpcode()) { 2626 default: assert(0 && "Wasn't expecting to be able to lower this!"); 2627 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 2628 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 2629 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2630 case ISD::SETCC: return LowerSETCC(Op, DAG); 2631 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex); 2632 case ISD::FORMAL_ARGUMENTS: 2633 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex); 2634 case ISD::CALL: return LowerCALL(Op, DAG); 2635 case ISD::RET: return LowerRET(Op, DAG); 2636 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 2637 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG, 2638 PPCSubTarget); 2639 2640 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2641 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 2642 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 2643 2644 // Lower 64-bit shifts. 2645 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 2646 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 2647 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 2648 2649 // Vector-related lowering. 2650 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 2651 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2652 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2653 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 2654 case ISD::MUL: return LowerMUL(Op, DAG); 2655 2656 // Frame & Return address. Currently unimplemented 2657 case ISD::RETURNADDR: break; 2658 case ISD::FRAMEADDR: break; 2659 } 2660 return SDOperand(); 2661} 2662 2663//===----------------------------------------------------------------------===// 2664// Other Lowering Code 2665//===----------------------------------------------------------------------===// 2666 2667MachineBasicBlock * 2668PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 2669 MachineBasicBlock *BB) { 2670 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2671 assert((MI->getOpcode() == PPC::SELECT_CC_I4 || 2672 MI->getOpcode() == PPC::SELECT_CC_I8 || 2673 MI->getOpcode() == PPC::SELECT_CC_F4 || 2674 MI->getOpcode() == PPC::SELECT_CC_F8 || 2675 MI->getOpcode() == PPC::SELECT_CC_VRRC) && 2676 "Unexpected instr type to insert"); 2677 2678 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 2679 // control-flow pattern. The incoming instruction knows the destination vreg 2680 // to set, the condition code register to branch on, the true/false values to 2681 // select between, and a branch opcode to use. 2682 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2683 ilist<MachineBasicBlock>::iterator It = BB; 2684 ++It; 2685 2686 // thisMBB: 2687 // ... 2688 // TrueVal = ... 2689 // cmpTY ccX, r1, r2 2690 // bCC copy1MBB 2691 // fallthrough --> copy0MBB 2692 MachineBasicBlock *thisMBB = BB; 2693 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 2694 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 2695 unsigned SelectPred = MI->getOperand(4).getImm(); 2696 BuildMI(BB, TII->get(PPC::BCC)) 2697 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 2698 MachineFunction *F = BB->getParent(); 2699 F->getBasicBlockList().insert(It, copy0MBB); 2700 F->getBasicBlockList().insert(It, sinkMBB); 2701 // Update machine-CFG edges by first adding all successors of the current 2702 // block to the new block which will contain the Phi node for the select. 2703 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 2704 e = BB->succ_end(); i != e; ++i) 2705 sinkMBB->addSuccessor(*i); 2706 // Next, remove all successors of the current block, and add the true 2707 // and fallthrough blocks as its successors. 2708 while(!BB->succ_empty()) 2709 BB->removeSuccessor(BB->succ_begin()); 2710 BB->addSuccessor(copy0MBB); 2711 BB->addSuccessor(sinkMBB); 2712 2713 // copy0MBB: 2714 // %FalseValue = ... 2715 // # fallthrough to sinkMBB 2716 BB = copy0MBB; 2717 2718 // Update machine-CFG edges 2719 BB->addSuccessor(sinkMBB); 2720 2721 // sinkMBB: 2722 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 2723 // ... 2724 BB = sinkMBB; 2725 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 2726 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 2727 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 2728 2729 delete MI; // The pseudo instruction is gone now. 2730 return BB; 2731} 2732 2733//===----------------------------------------------------------------------===// 2734// Target Optimization Hooks 2735//===----------------------------------------------------------------------===// 2736 2737SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 2738 DAGCombinerInfo &DCI) const { 2739 TargetMachine &TM = getTargetMachine(); 2740 SelectionDAG &DAG = DCI.DAG; 2741 switch (N->getOpcode()) { 2742 default: break; 2743 case PPCISD::SHL: 2744 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2745 if (C->getValue() == 0) // 0 << V -> 0. 2746 return N->getOperand(0); 2747 } 2748 break; 2749 case PPCISD::SRL: 2750 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2751 if (C->getValue() == 0) // 0 >>u V -> 0. 2752 return N->getOperand(0); 2753 } 2754 break; 2755 case PPCISD::SRA: 2756 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 2757 if (C->getValue() == 0 || // 0 >>s V -> 0. 2758 C->isAllOnesValue()) // -1 >>s V -> -1. 2759 return N->getOperand(0); 2760 } 2761 break; 2762 2763 case ISD::SINT_TO_FP: 2764 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 2765 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 2766 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 2767 // We allow the src/dst to be either f32/f64, but the intermediate 2768 // type must be i64. 2769 if (N->getOperand(0).getValueType() == MVT::i64) { 2770 SDOperand Val = N->getOperand(0).getOperand(0); 2771 if (Val.getValueType() == MVT::f32) { 2772 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 2773 DCI.AddToWorklist(Val.Val); 2774 } 2775 2776 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 2777 DCI.AddToWorklist(Val.Val); 2778 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 2779 DCI.AddToWorklist(Val.Val); 2780 if (N->getValueType(0) == MVT::f32) { 2781 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val); 2782 DCI.AddToWorklist(Val.Val); 2783 } 2784 return Val; 2785 } else if (N->getOperand(0).getValueType() == MVT::i32) { 2786 // If the intermediate type is i32, we can avoid the load/store here 2787 // too. 2788 } 2789 } 2790 } 2791 break; 2792 case ISD::STORE: 2793 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 2794 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 2795 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 2796 N->getOperand(1).getValueType() == MVT::i32) { 2797 SDOperand Val = N->getOperand(1).getOperand(0); 2798 if (Val.getValueType() == MVT::f32) { 2799 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 2800 DCI.AddToWorklist(Val.Val); 2801 } 2802 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 2803 DCI.AddToWorklist(Val.Val); 2804 2805 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 2806 N->getOperand(2), N->getOperand(3)); 2807 DCI.AddToWorklist(Val.Val); 2808 return Val; 2809 } 2810 2811 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 2812 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 2813 N->getOperand(1).Val->hasOneUse() && 2814 (N->getOperand(1).getValueType() == MVT::i32 || 2815 N->getOperand(1).getValueType() == MVT::i16)) { 2816 SDOperand BSwapOp = N->getOperand(1).getOperand(0); 2817 // Do an any-extend to 32-bits if this is a half-word input. 2818 if (BSwapOp.getValueType() == MVT::i16) 2819 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 2820 2821 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 2822 N->getOperand(2), N->getOperand(3), 2823 DAG.getValueType(N->getOperand(1).getValueType())); 2824 } 2825 break; 2826 case ISD::BSWAP: 2827 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 2828 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) && 2829 N->getOperand(0).hasOneUse() && 2830 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 2831 SDOperand Load = N->getOperand(0); 2832 LoadSDNode *LD = cast<LoadSDNode>(Load); 2833 // Create the byte-swapping load. 2834 std::vector<MVT::ValueType> VTs; 2835 VTs.push_back(MVT::i32); 2836 VTs.push_back(MVT::Other); 2837 SDOperand SV = DAG.getSrcValue(LD->getSrcValue(), LD->getSrcValueOffset()); 2838 SDOperand Ops[] = { 2839 LD->getChain(), // Chain 2840 LD->getBasePtr(), // Ptr 2841 SV, // SrcValue 2842 DAG.getValueType(N->getValueType(0)) // VT 2843 }; 2844 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); 2845 2846 // If this is an i16 load, insert the truncate. 2847 SDOperand ResVal = BSLoad; 2848 if (N->getValueType(0) == MVT::i16) 2849 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 2850 2851 // First, combine the bswap away. This makes the value produced by the 2852 // load dead. 2853 DCI.CombineTo(N, ResVal); 2854 2855 // Next, combine the load away, we give it a bogus result value but a real 2856 // chain result. The result value is dead because the bswap is dead. 2857 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); 2858 2859 // Return N so it doesn't get rechecked! 2860 return SDOperand(N, 0); 2861 } 2862 2863 break; 2864 case PPCISD::VCMP: { 2865 // If a VCMPo node already exists with exactly the same operands as this 2866 // node, use its result instead of this node (VCMPo computes both a CR6 and 2867 // a normal output). 2868 // 2869 if (!N->getOperand(0).hasOneUse() && 2870 !N->getOperand(1).hasOneUse() && 2871 !N->getOperand(2).hasOneUse()) { 2872 2873 // Scan all of the users of the LHS, looking for VCMPo's that match. 2874 SDNode *VCMPoNode = 0; 2875 2876 SDNode *LHSN = N->getOperand(0).Val; 2877 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 2878 UI != E; ++UI) 2879 if ((*UI)->getOpcode() == PPCISD::VCMPo && 2880 (*UI)->getOperand(1) == N->getOperand(1) && 2881 (*UI)->getOperand(2) == N->getOperand(2) && 2882 (*UI)->getOperand(0) == N->getOperand(0)) { 2883 VCMPoNode = *UI; 2884 break; 2885 } 2886 2887 // If there is no VCMPo node, or if the flag value has a single use, don't 2888 // transform this. 2889 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 2890 break; 2891 2892 // Look at the (necessarily single) use of the flag value. If it has a 2893 // chain, this transformation is more complex. Note that multiple things 2894 // could use the value result, which we should ignore. 2895 SDNode *FlagUser = 0; 2896 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 2897 FlagUser == 0; ++UI) { 2898 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 2899 SDNode *User = *UI; 2900 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 2901 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { 2902 FlagUser = User; 2903 break; 2904 } 2905 } 2906 } 2907 2908 // If the user is a MFCR instruction, we know this is safe. Otherwise we 2909 // give up for right now. 2910 if (FlagUser->getOpcode() == PPCISD::MFCR) 2911 return SDOperand(VCMPoNode, 0); 2912 } 2913 break; 2914 } 2915 case ISD::BR_CC: { 2916 // If this is a branch on an altivec predicate comparison, lower this so 2917 // that we don't have to do a MFCR: instead, branch directly on CR6. This 2918 // lowering is done pre-legalize, because the legalizer lowers the predicate 2919 // compare down to code that is difficult to reassemble. 2920 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 2921 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); 2922 int CompareOpc; 2923 bool isDot; 2924 2925 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2926 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 2927 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 2928 assert(isDot && "Can't compare against a vector result!"); 2929 2930 // If this is a comparison against something other than 0/1, then we know 2931 // that the condition is never/always true. 2932 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 2933 if (Val != 0 && Val != 1) { 2934 if (CC == ISD::SETEQ) // Cond never true, remove branch. 2935 return N->getOperand(0); 2936 // Always !=, turn it into an unconditional branch. 2937 return DAG.getNode(ISD::BR, MVT::Other, 2938 N->getOperand(0), N->getOperand(4)); 2939 } 2940 2941 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 2942 2943 // Create the PPCISD altivec 'dot' comparison node. 2944 std::vector<MVT::ValueType> VTs; 2945 SDOperand Ops[] = { 2946 LHS.getOperand(2), // LHS of compare 2947 LHS.getOperand(3), // RHS of compare 2948 DAG.getConstant(CompareOpc, MVT::i32) 2949 }; 2950 VTs.push_back(LHS.getOperand(2).getValueType()); 2951 VTs.push_back(MVT::Flag); 2952 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 2953 2954 // Unpack the result based on how the target uses it. 2955 PPC::Predicate CompOpc; 2956 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 2957 default: // Can't happen, don't crash on invalid number though. 2958 case 0: // Branch on the value of the EQ bit of CR6. 2959 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 2960 break; 2961 case 1: // Branch on the inverted value of the EQ bit of CR6. 2962 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 2963 break; 2964 case 2: // Branch on the value of the LT bit of CR6. 2965 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 2966 break; 2967 case 3: // Branch on the inverted value of the LT bit of CR6. 2968 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 2969 break; 2970 } 2971 2972 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 2973 DAG.getConstant(CompOpc, MVT::i32), 2974 DAG.getRegister(PPC::CR6, MVT::i32), 2975 N->getOperand(4), CompNode.getValue(1)); 2976 } 2977 break; 2978 } 2979 } 2980 2981 return SDOperand(); 2982} 2983 2984//===----------------------------------------------------------------------===// 2985// Inline Assembly Support 2986//===----------------------------------------------------------------------===// 2987 2988void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 2989 uint64_t Mask, 2990 uint64_t &KnownZero, 2991 uint64_t &KnownOne, 2992 unsigned Depth) const { 2993 KnownZero = 0; 2994 KnownOne = 0; 2995 switch (Op.getOpcode()) { 2996 default: break; 2997 case PPCISD::LBRX: { 2998 // lhbrx is known to have the top bits cleared out. 2999 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 3000 KnownZero = 0xFFFF0000; 3001 break; 3002 } 3003 case ISD::INTRINSIC_WO_CHAIN: { 3004 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 3005 default: break; 3006 case Intrinsic::ppc_altivec_vcmpbfp_p: 3007 case Intrinsic::ppc_altivec_vcmpeqfp_p: 3008 case Intrinsic::ppc_altivec_vcmpequb_p: 3009 case Intrinsic::ppc_altivec_vcmpequh_p: 3010 case Intrinsic::ppc_altivec_vcmpequw_p: 3011 case Intrinsic::ppc_altivec_vcmpgefp_p: 3012 case Intrinsic::ppc_altivec_vcmpgtfp_p: 3013 case Intrinsic::ppc_altivec_vcmpgtsb_p: 3014 case Intrinsic::ppc_altivec_vcmpgtsh_p: 3015 case Intrinsic::ppc_altivec_vcmpgtsw_p: 3016 case Intrinsic::ppc_altivec_vcmpgtub_p: 3017 case Intrinsic::ppc_altivec_vcmpgtuh_p: 3018 case Intrinsic::ppc_altivec_vcmpgtuw_p: 3019 KnownZero = ~1U; // All bits but the low one are known to be zero. 3020 break; 3021 } 3022 } 3023 } 3024} 3025 3026 3027/// getConstraintType - Given a constraint letter, return the type of 3028/// constraint it is for this target. 3029PPCTargetLowering::ConstraintType 3030PPCTargetLowering::getConstraintType(char ConstraintLetter) const { 3031 switch (ConstraintLetter) { 3032 default: break; 3033 case 'b': 3034 case 'r': 3035 case 'f': 3036 case 'v': 3037 case 'y': 3038 return C_RegisterClass; 3039 } 3040 return TargetLowering::getConstraintType(ConstraintLetter); 3041} 3042 3043std::pair<unsigned, const TargetRegisterClass*> 3044PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3045 MVT::ValueType VT) const { 3046 if (Constraint.size() == 1) { 3047 // GCC RS6000 Constraint Letters 3048 switch (Constraint[0]) { 3049 case 'b': // R1-R31 3050 case 'r': // R0-R31 3051 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 3052 return std::make_pair(0U, PPC::G8RCRegisterClass); 3053 return std::make_pair(0U, PPC::GPRCRegisterClass); 3054 case 'f': 3055 if (VT == MVT::f32) 3056 return std::make_pair(0U, PPC::F4RCRegisterClass); 3057 else if (VT == MVT::f64) 3058 return std::make_pair(0U, PPC::F8RCRegisterClass); 3059 break; 3060 case 'v': 3061 return std::make_pair(0U, PPC::VRRCRegisterClass); 3062 case 'y': // crrc 3063 return std::make_pair(0U, PPC::CRRCRegisterClass); 3064 } 3065 } 3066 3067 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3068} 3069 3070 3071// isOperandValidForConstraint 3072SDOperand PPCTargetLowering:: 3073isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) { 3074 switch (Letter) { 3075 default: break; 3076 case 'I': 3077 case 'J': 3078 case 'K': 3079 case 'L': 3080 case 'M': 3081 case 'N': 3082 case 'O': 3083 case 'P': { 3084 if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate. 3085 unsigned Value = cast<ConstantSDNode>(Op)->getValue(); 3086 switch (Letter) { 3087 default: assert(0 && "Unknown constraint letter!"); 3088 case 'I': // "I" is a signed 16-bit constant. 3089 if ((short)Value == (int)Value) return Op; 3090 break; 3091 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 3092 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 3093 if ((short)Value == 0) return Op; 3094 break; 3095 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 3096 if ((Value >> 16) == 0) return Op; 3097 break; 3098 case 'M': // "M" is a constant that is greater than 31. 3099 if (Value > 31) return Op; 3100 break; 3101 case 'N': // "N" is a positive constant that is an exact power of two. 3102 if ((int)Value > 0 && isPowerOf2_32(Value)) return Op; 3103 break; 3104 case 'O': // "O" is the constant zero. 3105 if (Value == 0) return Op; 3106 break; 3107 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 3108 if ((short)-Value == (int)-Value) return Op; 3109 break; 3110 } 3111 break; 3112 } 3113 } 3114 3115 // Handle standard constraint letters. 3116 return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG); 3117} 3118 3119/// isLegalAddressImmediate - Return true if the integer value can be used 3120/// as the offset of the target addressing mode. 3121bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const { 3122 // PPC allows a sign-extended 16-bit immediate field. 3123 return (V > -(1 << 16) && V < (1 << 16)-1); 3124} 3125 3126bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 3127 return TargetLowering::isLegalAddressImmediate(GV); 3128} 3129