PPCISelLowering.cpp revision c0cb28fd3abee9a8b40856990e04f1af2f9bd7b8
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPredicates.h" 17#include "PPCTargetMachine.h" 18#include "PPCPerfectShuffle.h" 19#include "llvm/ADT/STLExtras.h" 20#include "llvm/ADT/VectorExtras.h" 21#include "llvm/Analysis/ScalarEvolutionExpressions.h" 22#include "llvm/CodeGen/CallingConvLower.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/PseudoSourceValue.h" 28#include "llvm/CodeGen/SelectionDAG.h" 29#include "llvm/Constants.h" 30#include "llvm/Function.h" 31#include "llvm/Intrinsics.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CommandLine.h" 35using namespace llvm; 36 37static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 38cl::desc("enable preincrement load/store generation on PPC (experimental)"), 39 cl::Hidden); 40 41PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 42 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) { 43 44 setPow2DivIsCheap(); 45 46 // Use _setjmp/_longjmp instead of setjmp/longjmp. 47 setUseUnderscoreSetJmp(true); 48 setUseUnderscoreLongJmp(true); 49 50 // Set up the register classes. 51 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 52 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 53 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 54 55 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 56 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 57 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand); 58 59 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 60 61 // PowerPC has pre-inc load and store's. 62 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 63 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 64 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 65 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 66 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 67 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 68 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 69 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 70 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 71 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 72 73 // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg) 74 setConvertAction(MVT::ppcf128, MVT::f64, Expand); 75 setConvertAction(MVT::ppcf128, MVT::f32, Expand); 76 // This is used in the ppcf128->int sequence. Note it has different semantics 77 // from FP_ROUND: that rounds to nearest, this rounds to zero. 78 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 79 80 // PowerPC has no intrinsics for these particular operations 81 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); 82 83 // PowerPC has no SREM/UREM instructions 84 setOperationAction(ISD::SREM, MVT::i32, Expand); 85 setOperationAction(ISD::UREM, MVT::i32, Expand); 86 setOperationAction(ISD::SREM, MVT::i64, Expand); 87 setOperationAction(ISD::UREM, MVT::i64, Expand); 88 89 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 90 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 91 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 92 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 93 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 94 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 95 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 96 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 97 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 98 99 // We don't support sin/cos/sqrt/fmod/pow 100 setOperationAction(ISD::FSIN , MVT::f64, Expand); 101 setOperationAction(ISD::FCOS , MVT::f64, Expand); 102 setOperationAction(ISD::FREM , MVT::f64, Expand); 103 setOperationAction(ISD::FPOW , MVT::f64, Expand); 104 setOperationAction(ISD::FSIN , MVT::f32, Expand); 105 setOperationAction(ISD::FCOS , MVT::f32, Expand); 106 setOperationAction(ISD::FREM , MVT::f32, Expand); 107 setOperationAction(ISD::FPOW , MVT::f32, Expand); 108 109 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 110 111 // If we're enabling GP optimizations, use hardware square root 112 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 113 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 114 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 115 } 116 117 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 118 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 119 120 // PowerPC does not have BSWAP, CTPOP or CTTZ 121 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 122 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 123 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 124 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 125 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 126 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 127 128 // PowerPC does not have ROTR 129 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 130 131 // PowerPC does not have Select 132 setOperationAction(ISD::SELECT, MVT::i32, Expand); 133 setOperationAction(ISD::SELECT, MVT::i64, Expand); 134 setOperationAction(ISD::SELECT, MVT::f32, Expand); 135 setOperationAction(ISD::SELECT, MVT::f64, Expand); 136 137 // PowerPC wants to turn select_cc of FP into fsel when possible. 138 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 139 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 140 141 // PowerPC wants to optimize integer setcc a bit 142 setOperationAction(ISD::SETCC, MVT::i32, Custom); 143 144 // PowerPC does not have BRCOND which requires SetCC 145 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 146 147 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 148 149 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 150 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 151 152 // PowerPC does not have [U|S]INT_TO_FP 153 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 154 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 155 156 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 157 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 158 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 159 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 160 161 // We cannot sextinreg(i1). Expand to shifts. 162 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 163 164 // Support label based line numbers. 165 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 166 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 167 168 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 169 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 170 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 171 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 172 173 174 // We want to legalize GlobalAddress and ConstantPool nodes into the 175 // appropriate instructions to materialize the address. 176 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 177 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 178 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 179 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 180 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 181 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 182 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 183 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 184 185 // RET must be custom lowered, to meet ABI requirements 186 setOperationAction(ISD::RET , MVT::Other, Custom); 187 188 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 189 setOperationAction(ISD::VASTART , MVT::Other, Custom); 190 191 // VAARG is custom lowered with ELF 32 ABI 192 if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI()) 193 setOperationAction(ISD::VAARG, MVT::Other, Custom); 194 else 195 setOperationAction(ISD::VAARG, MVT::Other, Expand); 196 197 // Use the default implementation. 198 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 199 setOperationAction(ISD::VAEND , MVT::Other, Expand); 200 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 201 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 202 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 203 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 204 205 // We want to custom lower some of our intrinsics. 206 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 207 208 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 209 // They also have instructions for converting between i64 and fp. 210 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 211 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 212 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 213 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 214 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 215 216 // FIXME: disable this lowered code. This generates 64-bit register values, 217 // and we don't model the fact that the top part is clobbered by calls. We 218 // need to flag these together so that the value isn't live across a call. 219 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 220 221 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 222 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 223 } else { 224 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 225 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 226 } 227 228 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 229 // 64-bit PowerPC implementations can support i64 types directly 230 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 231 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 232 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 233 // 64-bit PowerPC wants to expand i128 shifts itself. 234 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 235 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 236 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 237 } else { 238 // 32-bit PowerPC wants to expand i64 shifts itself. 239 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 240 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 241 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 242 } 243 244 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 245 // First set operation action for all vector types to expand. Then we 246 // will selectively turn on ones that can be effectively codegen'd. 247 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 248 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 249 // add/sub are legal for all supported vector VT's. 250 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal); 251 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal); 252 253 // We promote all shuffles to v16i8. 254 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote); 255 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8); 256 257 // We promote all non-typed operations to v4i32. 258 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote); 259 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32); 260 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote); 261 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32); 262 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote); 263 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32); 264 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote); 265 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32); 266 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote); 267 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32); 268 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote); 269 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32); 270 271 // No other operations are legal. 272 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand); 273 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand); 274 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand); 275 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand); 276 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand); 277 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand); 278 setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand); 279 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 280 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand); 281 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand); 282 setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand); 283 setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand); 284 setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand); 285 setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand); 286 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand); 287 setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand); 288 setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand); 289 setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand); 290 setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand); 291 } 292 293 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 294 // with merges, splats, etc. 295 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 296 297 setOperationAction(ISD::AND , MVT::v4i32, Legal); 298 setOperationAction(ISD::OR , MVT::v4i32, Legal); 299 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 300 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 301 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 302 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 303 304 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 305 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 306 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 307 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 308 309 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 310 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 311 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 312 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 313 314 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 315 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 316 317 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 318 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 319 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 320 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 321 } 322 323 setShiftAmountType(MVT::i32); 324 setSetCCResultContents(ZeroOrOneSetCCResult); 325 326 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 327 setStackPointerRegisterToSaveRestore(PPC::X1); 328 setExceptionPointerRegister(PPC::X3); 329 setExceptionSelectorRegister(PPC::X4); 330 } else { 331 setStackPointerRegisterToSaveRestore(PPC::R1); 332 setExceptionPointerRegister(PPC::R3); 333 setExceptionSelectorRegister(PPC::R4); 334 } 335 336 // We have target-specific dag combine patterns for the following nodes: 337 setTargetDAGCombine(ISD::SINT_TO_FP); 338 setTargetDAGCombine(ISD::STORE); 339 setTargetDAGCombine(ISD::BR_CC); 340 setTargetDAGCombine(ISD::BSWAP); 341 342 // Darwin long double math library functions have $LDBL128 appended. 343 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) { 344 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 345 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 346 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 347 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 348 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 349 } 350 351 computeRegisterProperties(); 352} 353 354/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 355/// function arguments in the caller parameter area. 356unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const { 357 TargetMachine &TM = getTargetMachine(); 358 // Darwin passes everything on 4 byte boundary. 359 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 360 return 4; 361 // FIXME Elf TBD 362 return 4; 363} 364 365const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 366 switch (Opcode) { 367 default: return 0; 368 case PPCISD::FSEL: return "PPCISD::FSEL"; 369 case PPCISD::FCFID: return "PPCISD::FCFID"; 370 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 371 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 372 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 373 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 374 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 375 case PPCISD::VPERM: return "PPCISD::VPERM"; 376 case PPCISD::Hi: return "PPCISD::Hi"; 377 case PPCISD::Lo: return "PPCISD::Lo"; 378 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 379 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 380 case PPCISD::SRL: return "PPCISD::SRL"; 381 case PPCISD::SRA: return "PPCISD::SRA"; 382 case PPCISD::SHL: return "PPCISD::SHL"; 383 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 384 case PPCISD::STD_32: return "PPCISD::STD_32"; 385 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF"; 386 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho"; 387 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 388 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho"; 389 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF"; 390 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 391 case PPCISD::MFCR: return "PPCISD::MFCR"; 392 case PPCISD::VCMP: return "PPCISD::VCMP"; 393 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 394 case PPCISD::LBRX: return "PPCISD::LBRX"; 395 case PPCISD::STBRX: return "PPCISD::STBRX"; 396 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 397 case PPCISD::MFFS: return "PPCISD::MFFS"; 398 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 399 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 400 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 401 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 402 } 403} 404 405 406MVT::ValueType 407PPCTargetLowering::getSetCCResultType(const SDOperand &) const { 408 return MVT::i32; 409} 410 411 412//===----------------------------------------------------------------------===// 413// Node matching predicates, for use by the tblgen matching code. 414//===----------------------------------------------------------------------===// 415 416/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 417static bool isFloatingPointZero(SDOperand Op) { 418 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 419 return CFP->getValueAPF().isZero(); 420 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { 421 // Maybe this has already been legalized into the constant pool? 422 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 423 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 424 return CFP->getValueAPF().isZero(); 425 } 426 return false; 427} 428 429/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 430/// true if Op is undef or if it matches the specified value. 431static bool isConstantOrUndef(SDOperand Op, unsigned Val) { 432 return Op.getOpcode() == ISD::UNDEF || 433 cast<ConstantSDNode>(Op)->getValue() == Val; 434} 435 436/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 437/// VPKUHUM instruction. 438bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 439 if (!isUnary) { 440 for (unsigned i = 0; i != 16; ++i) 441 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 442 return false; 443 } else { 444 for (unsigned i = 0; i != 8; ++i) 445 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 446 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 447 return false; 448 } 449 return true; 450} 451 452/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 453/// VPKUWUM instruction. 454bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 455 if (!isUnary) { 456 for (unsigned i = 0; i != 16; i += 2) 457 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 458 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 459 return false; 460 } else { 461 for (unsigned i = 0; i != 8; i += 2) 462 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 463 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 464 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 465 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 466 return false; 467 } 468 return true; 469} 470 471/// isVMerge - Common function, used to match vmrg* shuffles. 472/// 473static bool isVMerge(SDNode *N, unsigned UnitSize, 474 unsigned LHSStart, unsigned RHSStart) { 475 assert(N->getOpcode() == ISD::BUILD_VECTOR && 476 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 477 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 478 "Unsupported merge size!"); 479 480 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 481 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 482 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 483 LHSStart+j+i*UnitSize) || 484 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 485 RHSStart+j+i*UnitSize)) 486 return false; 487 } 488 return true; 489} 490 491/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 492/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 493bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 494 if (!isUnary) 495 return isVMerge(N, UnitSize, 8, 24); 496 return isVMerge(N, UnitSize, 8, 8); 497} 498 499/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 500/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 501bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 502 if (!isUnary) 503 return isVMerge(N, UnitSize, 0, 16); 504 return isVMerge(N, UnitSize, 0, 0); 505} 506 507 508/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 509/// amount, otherwise return -1. 510int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 511 assert(N->getOpcode() == ISD::BUILD_VECTOR && 512 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 513 // Find the first non-undef value in the shuffle mask. 514 unsigned i; 515 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 516 /*search*/; 517 518 if (i == 16) return -1; // all undef. 519 520 // Otherwise, check to see if the rest of the elements are consequtively 521 // numbered from this value. 522 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 523 if (ShiftAmt < i) return -1; 524 ShiftAmt -= i; 525 526 if (!isUnary) { 527 // Check the rest of the elements to see if they are consequtive. 528 for (++i; i != 16; ++i) 529 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 530 return -1; 531 } else { 532 // Check the rest of the elements to see if they are consequtive. 533 for (++i; i != 16; ++i) 534 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 535 return -1; 536 } 537 538 return ShiftAmt; 539} 540 541/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 542/// specifies a splat of a single element that is suitable for input to 543/// VSPLTB/VSPLTH/VSPLTW. 544bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 545 assert(N->getOpcode() == ISD::BUILD_VECTOR && 546 N->getNumOperands() == 16 && 547 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 548 549 // This is a splat operation if each element of the permute is the same, and 550 // if the value doesn't reference the second vector. 551 unsigned ElementBase = 0; 552 SDOperand Elt = N->getOperand(0); 553 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 554 ElementBase = EltV->getValue(); 555 else 556 return false; // FIXME: Handle UNDEF elements too! 557 558 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 559 return false; 560 561 // Check that they are consequtive. 562 for (unsigned i = 1; i != EltSize; ++i) { 563 if (!isa<ConstantSDNode>(N->getOperand(i)) || 564 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 565 return false; 566 } 567 568 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 569 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 570 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 571 assert(isa<ConstantSDNode>(N->getOperand(i)) && 572 "Invalid VECTOR_SHUFFLE mask!"); 573 for (unsigned j = 0; j != EltSize; ++j) 574 if (N->getOperand(i+j) != N->getOperand(j)) 575 return false; 576 } 577 578 return true; 579} 580 581/// isAllNegativeZeroVector - Returns true if all elements of build_vector 582/// are -0.0. 583bool PPC::isAllNegativeZeroVector(SDNode *N) { 584 assert(N->getOpcode() == ISD::BUILD_VECTOR); 585 if (PPC::isSplatShuffleMask(N, N->getNumOperands())) 586 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N)) 587 return CFP->getValueAPF().isNegZero(); 588 return false; 589} 590 591/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 592/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 593unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 594 assert(isSplatShuffleMask(N, EltSize)); 595 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 596} 597 598/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 599/// by using a vspltis[bhw] instruction of the specified element size, return 600/// the constant being splatted. The ByteSize field indicates the number of 601/// bytes of each element [124] -> [bhw]. 602SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 603 SDOperand OpVal(0, 0); 604 605 // If ByteSize of the splat is bigger than the element size of the 606 // build_vector, then we have a case where we are checking for a splat where 607 // multiple elements of the buildvector are folded together into a single 608 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 609 unsigned EltSize = 16/N->getNumOperands(); 610 if (EltSize < ByteSize) { 611 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 612 SDOperand UniquedVals[4]; 613 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 614 615 // See if all of the elements in the buildvector agree across. 616 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 617 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 618 // If the element isn't a constant, bail fully out. 619 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); 620 621 622 if (UniquedVals[i&(Multiple-1)].Val == 0) 623 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 624 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 625 return SDOperand(); // no match. 626 } 627 628 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 629 // either constant or undef values that are identical for each chunk. See 630 // if these chunks can form into a larger vspltis*. 631 632 // Check to see if all of the leading entries are either 0 or -1. If 633 // neither, then this won't fit into the immediate field. 634 bool LeadingZero = true; 635 bool LeadingOnes = true; 636 for (unsigned i = 0; i != Multiple-1; ++i) { 637 if (UniquedVals[i].Val == 0) continue; // Must have been undefs. 638 639 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 640 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 641 } 642 // Finally, check the least significant entry. 643 if (LeadingZero) { 644 if (UniquedVals[Multiple-1].Val == 0) 645 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 646 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 647 if (Val < 16) 648 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 649 } 650 if (LeadingOnes) { 651 if (UniquedVals[Multiple-1].Val == 0) 652 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 653 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 654 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 655 return DAG.getTargetConstant(Val, MVT::i32); 656 } 657 658 return SDOperand(); 659 } 660 661 // Check to see if this buildvec has a single non-undef value in its elements. 662 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 663 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 664 if (OpVal.Val == 0) 665 OpVal = N->getOperand(i); 666 else if (OpVal != N->getOperand(i)) 667 return SDOperand(); 668 } 669 670 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. 671 672 unsigned ValSizeInBytes = 0; 673 uint64_t Value = 0; 674 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 675 Value = CN->getValue(); 676 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8; 677 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 678 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 679 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 680 ValSizeInBytes = 4; 681 } 682 683 // If the splat value is larger than the element value, then we can never do 684 // this splat. The only case that we could fit the replicated bits into our 685 // immediate field for would be zero, and we prefer to use vxor for it. 686 if (ValSizeInBytes < ByteSize) return SDOperand(); 687 688 // If the element value is larger than the splat value, cut it in half and 689 // check to see if the two halves are equal. Continue doing this until we 690 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 691 while (ValSizeInBytes > ByteSize) { 692 ValSizeInBytes >>= 1; 693 694 // If the top half equals the bottom half, we're still ok. 695 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 696 (Value & ((1 << (8*ValSizeInBytes))-1))) 697 return SDOperand(); 698 } 699 700 // Properly sign extend the value. 701 int ShAmt = (4-ByteSize)*8; 702 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 703 704 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 705 if (MaskVal == 0) return SDOperand(); 706 707 // Finally, if this value fits in a 5 bit sext field, return it 708 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 709 return DAG.getTargetConstant(MaskVal, MVT::i32); 710 return SDOperand(); 711} 712 713//===----------------------------------------------------------------------===// 714// Addressing Mode Selection 715//===----------------------------------------------------------------------===// 716 717/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 718/// or 64-bit immediate, and if the value can be accurately represented as a 719/// sign extension from a 16-bit value. If so, this returns true and the 720/// immediate. 721static bool isIntS16Immediate(SDNode *N, short &Imm) { 722 if (N->getOpcode() != ISD::Constant) 723 return false; 724 725 Imm = (short)cast<ConstantSDNode>(N)->getValue(); 726 if (N->getValueType(0) == MVT::i32) 727 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue(); 728 else 729 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); 730} 731static bool isIntS16Immediate(SDOperand Op, short &Imm) { 732 return isIntS16Immediate(Op.Val, Imm); 733} 734 735 736/// SelectAddressRegReg - Given the specified addressed, check to see if it 737/// can be represented as an indexed [r+r] operation. Returns false if it 738/// can be more efficiently represented with [r+imm]. 739bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, 740 SDOperand &Index, 741 SelectionDAG &DAG) { 742 short imm = 0; 743 if (N.getOpcode() == ISD::ADD) { 744 if (isIntS16Immediate(N.getOperand(1), imm)) 745 return false; // r+i 746 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 747 return false; // r+i 748 749 Base = N.getOperand(0); 750 Index = N.getOperand(1); 751 return true; 752 } else if (N.getOpcode() == ISD::OR) { 753 if (isIntS16Immediate(N.getOperand(1), imm)) 754 return false; // r+i can fold it if we can. 755 756 // If this is an or of disjoint bitfields, we can codegen this as an add 757 // (for better address arithmetic) if the LHS and RHS of the OR are provably 758 // disjoint. 759 APInt LHSKnownZero, LHSKnownOne; 760 APInt RHSKnownZero, RHSKnownOne; 761 DAG.ComputeMaskedBits(N.getOperand(0), 762 APInt::getAllOnesValue(N.getOperand(0) 763 .getValueSizeInBits()), 764 LHSKnownZero, LHSKnownOne); 765 766 if (LHSKnownZero.getBoolValue()) { 767 DAG.ComputeMaskedBits(N.getOperand(1), 768 APInt::getAllOnesValue(N.getOperand(1) 769 .getValueSizeInBits()), 770 RHSKnownZero, RHSKnownOne); 771 // If all of the bits are known zero on the LHS or RHS, the add won't 772 // carry. 773 if (~(LHSKnownZero | RHSKnownZero) == 0) { 774 Base = N.getOperand(0); 775 Index = N.getOperand(1); 776 return true; 777 } 778 } 779 } 780 781 return false; 782} 783 784/// Returns true if the address N can be represented by a base register plus 785/// a signed 16-bit displacement [r+imm], and if it is not better 786/// represented as reg+reg. 787bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, 788 SDOperand &Base, SelectionDAG &DAG){ 789 // If this can be more profitably realized as r+r, fail. 790 if (SelectAddressRegReg(N, Disp, Base, DAG)) 791 return false; 792 793 if (N.getOpcode() == ISD::ADD) { 794 short imm = 0; 795 if (isIntS16Immediate(N.getOperand(1), imm)) { 796 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 797 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 798 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 799 } else { 800 Base = N.getOperand(0); 801 } 802 return true; // [r+i] 803 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 804 // Match LOAD (ADD (X, Lo(G))). 805 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 806 && "Cannot handle constant offsets yet!"); 807 Disp = N.getOperand(1).getOperand(0); // The global address. 808 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 809 Disp.getOpcode() == ISD::TargetConstantPool || 810 Disp.getOpcode() == ISD::TargetJumpTable); 811 Base = N.getOperand(0); 812 return true; // [&g+r] 813 } 814 } else if (N.getOpcode() == ISD::OR) { 815 short imm = 0; 816 if (isIntS16Immediate(N.getOperand(1), imm)) { 817 // If this is an or of disjoint bitfields, we can codegen this as an add 818 // (for better address arithmetic) if the LHS and RHS of the OR are 819 // provably disjoint. 820 APInt LHSKnownZero, LHSKnownOne; 821 DAG.ComputeMaskedBits(N.getOperand(0), 822 APInt::getAllOnesValue(N.getOperand(0) 823 .getValueSizeInBits()), 824 LHSKnownZero, LHSKnownOne); 825 826 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 827 // If all of the bits are known zero on the LHS or RHS, the add won't 828 // carry. 829 Base = N.getOperand(0); 830 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 831 return true; 832 } 833 } 834 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 835 // Loading from a constant address. 836 837 // If this address fits entirely in a 16-bit sext immediate field, codegen 838 // this as "d, 0" 839 short Imm; 840 if (isIntS16Immediate(CN, Imm)) { 841 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 842 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 843 return true; 844 } 845 846 // Handle 32-bit sext immediates with LIS + addr mode. 847 if (CN->getValueType(0) == MVT::i32 || 848 (int64_t)CN->getValue() == (int)CN->getValue()) { 849 int Addr = (int)CN->getValue(); 850 851 // Otherwise, break this down into an LIS + disp. 852 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 853 854 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 855 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 856 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 857 return true; 858 } 859 } 860 861 Disp = DAG.getTargetConstant(0, getPointerTy()); 862 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 863 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 864 else 865 Base = N; 866 return true; // [r+0] 867} 868 869/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 870/// represented as an indexed [r+r] operation. 871bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, 872 SDOperand &Index, 873 SelectionDAG &DAG) { 874 // Check to see if we can easily represent this as an [r+r] address. This 875 // will fail if it thinks that the address is more profitably represented as 876 // reg+imm, e.g. where imm = 0. 877 if (SelectAddressRegReg(N, Base, Index, DAG)) 878 return true; 879 880 // If the operand is an addition, always emit this as [r+r], since this is 881 // better (for code size, and execution, as the memop does the add for free) 882 // than emitting an explicit add. 883 if (N.getOpcode() == ISD::ADD) { 884 Base = N.getOperand(0); 885 Index = N.getOperand(1); 886 return true; 887 } 888 889 // Otherwise, do it the hard way, using R0 as the base register. 890 Base = DAG.getRegister(PPC::R0, N.getValueType()); 891 Index = N; 892 return true; 893} 894 895/// SelectAddressRegImmShift - Returns true if the address N can be 896/// represented by a base register plus a signed 14-bit displacement 897/// [r+imm*4]. Suitable for use by STD and friends. 898bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, 899 SDOperand &Base, 900 SelectionDAG &DAG) { 901 // If this can be more profitably realized as r+r, fail. 902 if (SelectAddressRegReg(N, Disp, Base, DAG)) 903 return false; 904 905 if (N.getOpcode() == ISD::ADD) { 906 short imm = 0; 907 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 908 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 909 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 910 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 911 } else { 912 Base = N.getOperand(0); 913 } 914 return true; // [r+i] 915 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 916 // Match LOAD (ADD (X, Lo(G))). 917 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 918 && "Cannot handle constant offsets yet!"); 919 Disp = N.getOperand(1).getOperand(0); // The global address. 920 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 921 Disp.getOpcode() == ISD::TargetConstantPool || 922 Disp.getOpcode() == ISD::TargetJumpTable); 923 Base = N.getOperand(0); 924 return true; // [&g+r] 925 } 926 } else if (N.getOpcode() == ISD::OR) { 927 short imm = 0; 928 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 929 // If this is an or of disjoint bitfields, we can codegen this as an add 930 // (for better address arithmetic) if the LHS and RHS of the OR are 931 // provably disjoint. 932 APInt LHSKnownZero, LHSKnownOne; 933 DAG.ComputeMaskedBits(N.getOperand(0), 934 APInt::getAllOnesValue(N.getOperand(0) 935 .getValueSizeInBits()), 936 LHSKnownZero, LHSKnownOne); 937 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 938 // If all of the bits are known zero on the LHS or RHS, the add won't 939 // carry. 940 Base = N.getOperand(0); 941 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 942 return true; 943 } 944 } 945 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 946 // Loading from a constant address. Verify low two bits are clear. 947 if ((CN->getValue() & 3) == 0) { 948 // If this address fits entirely in a 14-bit sext immediate field, codegen 949 // this as "d, 0" 950 short Imm; 951 if (isIntS16Immediate(CN, Imm)) { 952 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 953 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 954 return true; 955 } 956 957 // Fold the low-part of 32-bit absolute addresses into addr mode. 958 if (CN->getValueType(0) == MVT::i32 || 959 (int64_t)CN->getValue() == (int)CN->getValue()) { 960 int Addr = (int)CN->getValue(); 961 962 // Otherwise, break this down into an LIS + disp. 963 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 964 965 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 966 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 967 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 968 return true; 969 } 970 } 971 } 972 973 Disp = DAG.getTargetConstant(0, getPointerTy()); 974 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 975 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 976 else 977 Base = N; 978 return true; // [r+0] 979} 980 981 982/// getPreIndexedAddressParts - returns true by value, base pointer and 983/// offset pointer and addressing mode by reference if the node's address 984/// can be legally represented as pre-indexed load / store address. 985bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, 986 SDOperand &Offset, 987 ISD::MemIndexedMode &AM, 988 SelectionDAG &DAG) { 989 // Disabled by default for now. 990 if (!EnablePPCPreinc) return false; 991 992 SDOperand Ptr; 993 MVT::ValueType VT; 994 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 995 Ptr = LD->getBasePtr(); 996 VT = LD->getMemoryVT(); 997 998 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 999 ST = ST; 1000 Ptr = ST->getBasePtr(); 1001 VT = ST->getMemoryVT(); 1002 } else 1003 return false; 1004 1005 // PowerPC doesn't have preinc load/store instructions for vectors. 1006 if (MVT::isVector(VT)) 1007 return false; 1008 1009 // TODO: Check reg+reg first. 1010 1011 // LDU/STU use reg+imm*4, others use reg+imm. 1012 if (VT != MVT::i64) { 1013 // reg + imm 1014 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1015 return false; 1016 } else { 1017 // reg + imm * 4. 1018 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1019 return false; 1020 } 1021 1022 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1023 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1024 // sext i32 to i64 when addr mode is r+i. 1025 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1026 LD->getExtensionType() == ISD::SEXTLOAD && 1027 isa<ConstantSDNode>(Offset)) 1028 return false; 1029 } 1030 1031 AM = ISD::PRE_INC; 1032 return true; 1033} 1034 1035//===----------------------------------------------------------------------===// 1036// LowerOperation implementation 1037//===----------------------------------------------------------------------===// 1038 1039SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, 1040 SelectionDAG &DAG) { 1041 MVT::ValueType PtrVT = Op.getValueType(); 1042 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1043 Constant *C = CP->getConstVal(); 1044 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 1045 SDOperand Zero = DAG.getConstant(0, PtrVT); 1046 1047 const TargetMachine &TM = DAG.getTarget(); 1048 1049 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 1050 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 1051 1052 // If this is a non-darwin platform, we don't support non-static relo models 1053 // yet. 1054 if (TM.getRelocationModel() == Reloc::Static || 1055 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1056 // Generate non-pic code that has direct accesses to the constant pool. 1057 // The address of the global is just (hi(&g)+lo(&g)). 1058 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1059 } 1060 1061 if (TM.getRelocationModel() == Reloc::PIC_) { 1062 // With PIC, the first instruction is actually "GR+hi(&G)". 1063 Hi = DAG.getNode(ISD::ADD, PtrVT, 1064 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1065 } 1066 1067 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1068 return Lo; 1069} 1070 1071SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { 1072 MVT::ValueType PtrVT = Op.getValueType(); 1073 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1074 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1075 SDOperand Zero = DAG.getConstant(0, PtrVT); 1076 1077 const TargetMachine &TM = DAG.getTarget(); 1078 1079 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 1080 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 1081 1082 // If this is a non-darwin platform, we don't support non-static relo models 1083 // yet. 1084 if (TM.getRelocationModel() == Reloc::Static || 1085 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1086 // Generate non-pic code that has direct accesses to the constant pool. 1087 // The address of the global is just (hi(&g)+lo(&g)). 1088 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1089 } 1090 1091 if (TM.getRelocationModel() == Reloc::PIC_) { 1092 // With PIC, the first instruction is actually "GR+hi(&G)". 1093 Hi = DAG.getNode(ISD::ADD, PtrVT, 1094 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1095 } 1096 1097 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1098 return Lo; 1099} 1100 1101SDOperand PPCTargetLowering::LowerGlobalTLSAddress(SDOperand Op, 1102 SelectionDAG &DAG) { 1103 assert(0 && "TLS not implemented for PPC."); 1104 return SDOperand(); // Not reached 1105} 1106 1107SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op, 1108 SelectionDAG &DAG) { 1109 MVT::ValueType PtrVT = Op.getValueType(); 1110 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1111 GlobalValue *GV = GSDN->getGlobal(); 1112 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1113 // If it's a debug information descriptor, don't mess with it. 1114 if (DAG.isVerifiedDebugInfoDesc(Op)) 1115 return GA; 1116 SDOperand Zero = DAG.getConstant(0, PtrVT); 1117 1118 const TargetMachine &TM = DAG.getTarget(); 1119 1120 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 1121 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 1122 1123 // If this is a non-darwin platform, we don't support non-static relo models 1124 // yet. 1125 if (TM.getRelocationModel() == Reloc::Static || 1126 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1127 // Generate non-pic code that has direct accesses to globals. 1128 // The address of the global is just (hi(&g)+lo(&g)). 1129 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1130 } 1131 1132 if (TM.getRelocationModel() == Reloc::PIC_) { 1133 // With PIC, the first instruction is actually "GR+hi(&G)". 1134 Hi = DAG.getNode(ISD::ADD, PtrVT, 1135 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1136 } 1137 1138 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1139 1140 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV)) 1141 return Lo; 1142 1143 // If the global is weak or external, we have to go through the lazy 1144 // resolution stub. 1145 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); 1146} 1147 1148SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { 1149 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1150 1151 // If we're comparing for equality to zero, expose the fact that this is 1152 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1153 // fold the new nodes. 1154 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1155 if (C->isNullValue() && CC == ISD::SETEQ) { 1156 MVT::ValueType VT = Op.getOperand(0).getValueType(); 1157 SDOperand Zext = Op.getOperand(0); 1158 if (VT < MVT::i32) { 1159 VT = MVT::i32; 1160 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 1161 } 1162 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT)); 1163 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 1164 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, 1165 DAG.getConstant(Log2b, MVT::i32)); 1166 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 1167 } 1168 // Leave comparisons against 0 and -1 alone for now, since they're usually 1169 // optimized. FIXME: revisit this when we can custom lower all setcc 1170 // optimizations. 1171 if (C->isAllOnesValue() || C->isNullValue()) 1172 return SDOperand(); 1173 } 1174 1175 // If we have an integer seteq/setne, turn it into a compare against zero 1176 // by xor'ing the rhs with the lhs, which is faster than setting a 1177 // condition register, reading it back out, and masking the correct bit. The 1178 // normal approach here uses sub to do this instead of xor. Using xor exposes 1179 // the result to other bit-twiddling opportunities. 1180 MVT::ValueType LHSVT = Op.getOperand(0).getValueType(); 1181 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1182 MVT::ValueType VT = Op.getValueType(); 1183 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), 1184 Op.getOperand(1)); 1185 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 1186 } 1187 return SDOperand(); 1188} 1189 1190SDOperand PPCTargetLowering::LowerVAARG(SDOperand Op, SelectionDAG &DAG, 1191 int VarArgsFrameIndex, 1192 int VarArgsStackOffset, 1193 unsigned VarArgsNumGPR, 1194 unsigned VarArgsNumFPR, 1195 const PPCSubtarget &Subtarget) { 1196 1197 assert(0 && "VAARG in ELF32 ABI not implemented yet!"); 1198 return SDOperand(); // Not reached 1199} 1200 1201SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, 1202 int VarArgsFrameIndex, 1203 int VarArgsStackOffset, 1204 unsigned VarArgsNumGPR, 1205 unsigned VarArgsNumFPR, 1206 const PPCSubtarget &Subtarget) { 1207 1208 if (Subtarget.isMachoABI()) { 1209 // vastart just stores the address of the VarArgsFrameIndex slot into the 1210 // memory location argument. 1211 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1212 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1213 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1214 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); 1215 } 1216 1217 // For ELF 32 ABI we follow the layout of the va_list struct. 1218 // We suppose the given va_list is already allocated. 1219 // 1220 // typedef struct { 1221 // char gpr; /* index into the array of 8 GPRs 1222 // * stored in the register save area 1223 // * gpr=0 corresponds to r3, 1224 // * gpr=1 to r4, etc. 1225 // */ 1226 // char fpr; /* index into the array of 8 FPRs 1227 // * stored in the register save area 1228 // * fpr=0 corresponds to f1, 1229 // * fpr=1 to f2, etc. 1230 // */ 1231 // char *overflow_arg_area; 1232 // /* location on stack that holds 1233 // * the next overflow argument 1234 // */ 1235 // char *reg_save_area; 1236 // /* where r3:r10 and f1:f8 (if saved) 1237 // * are stored 1238 // */ 1239 // } va_list[1]; 1240 1241 1242 SDOperand ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); 1243 SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); 1244 1245 1246 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1247 1248 SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); 1249 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1250 1251 uint64_t FrameOffset = MVT::getSizeInBits(PtrVT)/8; 1252 SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1253 1254 uint64_t StackOffset = MVT::getSizeInBits(PtrVT)/8 - 1; 1255 SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1256 1257 uint64_t FPROffset = 1; 1258 SDOperand ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1259 1260 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1261 1262 // Store first byte : number of int regs 1263 SDOperand firstStore = DAG.getStore(Op.getOperand(0), ArgGPR, 1264 Op.getOperand(1), SV, 0); 1265 uint64_t nextOffset = FPROffset; 1266 SDOperand nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1), 1267 ConstFPROffset); 1268 1269 // Store second byte : number of float regs 1270 SDOperand secondStore = 1271 DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset); 1272 nextOffset += StackOffset; 1273 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset); 1274 1275 // Store second word : arguments given on stack 1276 SDOperand thirdStore = 1277 DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset); 1278 nextOffset += FrameOffset; 1279 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset); 1280 1281 // Store third word : arguments given in registers 1282 return DAG.getStore(thirdStore, FR, nextPtr, SV, nextOffset); 1283 1284} 1285 1286#include "PPCGenCallingConv.inc" 1287 1288/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1289/// depending on which subtarget is selected. 1290static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { 1291 if (Subtarget.isMachoABI()) { 1292 static const unsigned FPR[] = { 1293 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1294 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1295 }; 1296 return FPR; 1297 } 1298 1299 1300 static const unsigned FPR[] = { 1301 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1302 PPC::F8 1303 }; 1304 return FPR; 1305} 1306 1307SDOperand 1308PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, 1309 SelectionDAG &DAG, 1310 int &VarArgsFrameIndex, 1311 int &VarArgsStackOffset, 1312 unsigned &VarArgsNumGPR, 1313 unsigned &VarArgsNumFPR, 1314 const PPCSubtarget &Subtarget) { 1315 // TODO: add description of PPC stack frame format, or at least some docs. 1316 // 1317 MachineFunction &MF = DAG.getMachineFunction(); 1318 MachineFrameInfo *MFI = MF.getFrameInfo(); 1319 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1320 SmallVector<SDOperand, 8> ArgValues; 1321 SDOperand Root = Op.getOperand(0); 1322 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1323 1324 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1325 bool isPPC64 = PtrVT == MVT::i64; 1326 bool isMachoABI = Subtarget.isMachoABI(); 1327 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1328 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1329 1330 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1331 1332 static const unsigned GPR_32[] = { // 32-bit registers. 1333 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1334 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1335 }; 1336 static const unsigned GPR_64[] = { // 64-bit registers. 1337 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1338 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1339 }; 1340 1341 static const unsigned *FPR = GetFPR(Subtarget); 1342 1343 static const unsigned VR[] = { 1344 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1345 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1346 }; 1347 1348 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 1349 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8; 1350 const unsigned Num_VR_Regs = array_lengthof( VR); 1351 1352 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1353 1354 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1355 1356 // In 32-bit non-varargs functions, the stack space for vectors is after the 1357 // stack space for non-vectors. We do not use this space unless we have 1358 // too many vectors to fit in registers, something that only occurs in 1359 // constructed examples:), but we have to walk the arglist to figure 1360 // that out...for the pathological case, compute VecArgOffset as the 1361 // start of the vector parameter area. Computing VecArgOffset is the 1362 // entire point of the following loop. 1363 // Altivec is not mentioned in the ppc32 Elf Supplement, so I'm not trying 1364 // to handle Elf here. 1365 unsigned VecArgOffset = ArgOffset; 1366 if (!isVarArg && !isPPC64) { 1367 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; 1368 ++ArgNo) { 1369 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1370 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1371 ISD::ArgFlagsTy Flags = 1372 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); 1373 1374 if (Flags.isByVal()) { 1375 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 1376 ObjSize = Flags.getByValSize(); 1377 unsigned ArgSize = 1378 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1379 VecArgOffset += ArgSize; 1380 continue; 1381 } 1382 1383 switch(ObjectVT) { 1384 default: assert(0 && "Unhandled argument type!"); 1385 case MVT::i32: 1386 case MVT::f32: 1387 VecArgOffset += isPPC64 ? 8 : 4; 1388 break; 1389 case MVT::i64: // PPC64 1390 case MVT::f64: 1391 VecArgOffset += 8; 1392 break; 1393 case MVT::v4f32: 1394 case MVT::v4i32: 1395 case MVT::v8i16: 1396 case MVT::v16i8: 1397 // Nothing to do, we're only looking at Nonvector args here. 1398 break; 1399 } 1400 } 1401 } 1402 // We've found where the vector parameter area in memory is. Skip the 1403 // first 12 parameters; these don't use that memory. 1404 VecArgOffset = ((VecArgOffset+15)/16)*16; 1405 VecArgOffset += 12*16; 1406 1407 // Add DAG nodes to load the arguments or copy them out of registers. On 1408 // entry to a function on PPC, the arguments start after the linkage area, 1409 // although the first ones are often in registers. 1410 // 1411 // In the ELF 32 ABI, GPRs and stack are double word align: an argument 1412 // represented with two words (long long or double) must be copied to an 1413 // even GPR_idx value or to an even ArgOffset value. 1414 1415 SmallVector<SDOperand, 8> MemOps; 1416 1417 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { 1418 SDOperand ArgVal; 1419 bool needsLoad = false; 1420 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 1421 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8; 1422 unsigned ArgSize = ObjSize; 1423 ISD::ArgFlagsTy Flags = 1424 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); 1425 // See if next argument requires stack alignment in ELF 1426 bool Align = Flags.isDivided(); 1427 1428 unsigned CurArgOffset = ArgOffset; 1429 1430 // FIXME alignment for ELF may not be right 1431 // FIXME the codegen can be much improved in some cases. 1432 // We do not have to keep everything in memory. 1433 if (Flags.isByVal()) { 1434 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 1435 ObjSize = Flags.getByValSize(); 1436 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1437 // Double word align in ELF 1438 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1439 // Objects of size 1 and 2 are right justified, everything else is 1440 // left justified. This means the memory address is adjusted forwards. 1441 if (ObjSize==1 || ObjSize==2) { 1442 CurArgOffset = CurArgOffset + (4 - ObjSize); 1443 } 1444 // The value of the object is its address. 1445 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); 1446 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1447 ArgValues.push_back(FIN); 1448 if (ObjSize==1 || ObjSize==2) { 1449 if (GPR_idx != Num_GPR_Regs) { 1450 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1451 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1452 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1453 SDOperand Store = DAG.getTruncStore(Val.getValue(1), Val, FIN, 1454 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 ); 1455 MemOps.push_back(Store); 1456 ++GPR_idx; 1457 if (isMachoABI) ArgOffset += PtrByteSize; 1458 } else { 1459 ArgOffset += PtrByteSize; 1460 } 1461 continue; 1462 } 1463 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 1464 // Store whatever pieces of the object are in registers 1465 // to memory. ArgVal will be address of the beginning of 1466 // the object. 1467 if (GPR_idx != Num_GPR_Regs) { 1468 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1469 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1470 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset); 1471 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1472 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1473 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1474 MemOps.push_back(Store); 1475 ++GPR_idx; 1476 if (isMachoABI) ArgOffset += PtrByteSize; 1477 } else { 1478 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 1479 break; 1480 } 1481 } 1482 continue; 1483 } 1484 1485 switch (ObjectVT) { 1486 default: assert(0 && "Unhandled argument type!"); 1487 case MVT::i32: 1488 if (!isPPC64) { 1489 // Double word align in ELF 1490 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1491 1492 if (GPR_idx != Num_GPR_Regs) { 1493 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1494 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1495 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 1496 ++GPR_idx; 1497 } else { 1498 needsLoad = true; 1499 ArgSize = PtrByteSize; 1500 } 1501 // Stack align in ELF 1502 if (needsLoad && Align && isELF32_ABI) 1503 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1504 // All int arguments reserve stack space in Macho ABI. 1505 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize; 1506 break; 1507 } 1508 // FALLTHROUGH 1509 case MVT::i64: // PPC64 1510 if (GPR_idx != Num_GPR_Regs) { 1511 unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 1512 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1513 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1514 1515 if (ObjectVT == MVT::i32) { 1516 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 1517 // value to MVT::i64 and then truncate to the correct register size. 1518 if (Flags.isSExt()) 1519 ArgVal = DAG.getNode(ISD::AssertSext, MVT::i64, ArgVal, 1520 DAG.getValueType(ObjectVT)); 1521 else if (Flags.isZExt()) 1522 ArgVal = DAG.getNode(ISD::AssertZext, MVT::i64, ArgVal, 1523 DAG.getValueType(ObjectVT)); 1524 1525 ArgVal = DAG.getNode(ISD::TRUNCATE, MVT::i32, ArgVal); 1526 } 1527 1528 ++GPR_idx; 1529 } else { 1530 needsLoad = true; 1531 } 1532 // All int arguments reserve stack space in Macho ABI. 1533 if (isMachoABI || needsLoad) ArgOffset += 8; 1534 break; 1535 1536 case MVT::f32: 1537 case MVT::f64: 1538 // Every 4 bytes of argument space consumes one of the GPRs available for 1539 // argument passing. 1540 if (GPR_idx != Num_GPR_Regs && isMachoABI) { 1541 ++GPR_idx; 1542 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1543 ++GPR_idx; 1544 } 1545 if (FPR_idx != Num_FPR_Regs) { 1546 unsigned VReg; 1547 if (ObjectVT == MVT::f32) 1548 VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass); 1549 else 1550 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 1551 RegInfo.addLiveIn(FPR[FPR_idx], VReg); 1552 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1553 ++FPR_idx; 1554 } else { 1555 needsLoad = true; 1556 } 1557 1558 // Stack align in ELF 1559 if (needsLoad && Align && isELF32_ABI) 1560 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1561 // All FP arguments reserve stack space in Macho ABI. 1562 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize; 1563 break; 1564 case MVT::v4f32: 1565 case MVT::v4i32: 1566 case MVT::v8i16: 1567 case MVT::v16i8: 1568 // Note that vector arguments in registers don't reserve stack space, 1569 // except in varargs functions. 1570 if (VR_idx != Num_VR_Regs) { 1571 unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass); 1572 RegInfo.addLiveIn(VR[VR_idx], VReg); 1573 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1574 if (isVarArg) { 1575 while ((ArgOffset % 16) != 0) { 1576 ArgOffset += PtrByteSize; 1577 if (GPR_idx != Num_GPR_Regs) 1578 GPR_idx++; 1579 } 1580 ArgOffset += 16; 1581 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); 1582 } 1583 ++VR_idx; 1584 } else { 1585 if (!isVarArg && !isPPC64) { 1586 // Vectors go after all the nonvectors. 1587 CurArgOffset = VecArgOffset; 1588 VecArgOffset += 16; 1589 } else { 1590 // Vectors are aligned. 1591 ArgOffset = ((ArgOffset+15)/16)*16; 1592 CurArgOffset = ArgOffset; 1593 ArgOffset += 16; 1594 } 1595 needsLoad = true; 1596 } 1597 break; 1598 } 1599 1600 // We need to load the argument to a virtual register if we determined above 1601 // that we ran out of physical registers of the appropriate type. 1602 if (needsLoad) { 1603 int FI = MFI->CreateFixedObject(ObjSize, 1604 CurArgOffset + (ArgSize - ObjSize)); 1605 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); 1606 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 1607 } 1608 1609 ArgValues.push_back(ArgVal); 1610 } 1611 1612 // If the function takes variable number of arguments, make a frame index for 1613 // the start of the first vararg value... for expansion of llvm.va_start. 1614 if (isVarArg) { 1615 1616 int depth; 1617 if (isELF32_ABI) { 1618 VarArgsNumGPR = GPR_idx; 1619 VarArgsNumFPR = FPR_idx; 1620 1621 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame 1622 // pointer. 1623 depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 + 1624 Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 + 1625 MVT::getSizeInBits(PtrVT)/8); 1626 1627 VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1628 ArgOffset); 1629 1630 } 1631 else 1632 depth = ArgOffset; 1633 1634 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8, 1635 depth); 1636 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1637 1638 // In ELF 32 ABI, the fixed integer arguments of a variadic function are 1639 // stored to the VarArgsFrameIndex on the stack. 1640 if (isELF32_ABI) { 1641 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) { 1642 SDOperand Val = DAG.getRegister(GPR[GPR_idx], PtrVT); 1643 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1644 MemOps.push_back(Store); 1645 // Increment the address by four for the next argument to store 1646 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1647 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1648 } 1649 } 1650 1651 // If this function is vararg, store any remaining integer argument regs 1652 // to their spots on the stack so that they may be loaded by deferencing the 1653 // result of va_next. 1654 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 1655 unsigned VReg; 1656 if (isPPC64) 1657 VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 1658 else 1659 VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1660 1661 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1662 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1663 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1664 MemOps.push_back(Store); 1665 // Increment the address by four for the next argument to store 1666 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT); 1667 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1668 } 1669 1670 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex 1671 // on the stack. 1672 if (isELF32_ABI) { 1673 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) { 1674 SDOperand Val = DAG.getRegister(FPR[FPR_idx], MVT::f64); 1675 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1676 MemOps.push_back(Store); 1677 // Increment the address by eight for the next argument to store 1678 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, 1679 PtrVT); 1680 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1681 } 1682 1683 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) { 1684 unsigned VReg; 1685 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 1686 1687 RegInfo.addLiveIn(FPR[FPR_idx], VReg); 1688 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64); 1689 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1690 MemOps.push_back(Store); 1691 // Increment the address by eight for the next argument to store 1692 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8, 1693 PtrVT); 1694 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1695 } 1696 } 1697 } 1698 1699 if (!MemOps.empty()) 1700 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size()); 1701 1702 ArgValues.push_back(Root); 1703 1704 // Return the new list of results. 1705 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 1706 Op.Val->value_end()); 1707 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); 1708} 1709 1710/// isCallCompatibleAddress - Return the immediate to use if the specified 1711/// 32-bit value is representable in the immediate field of a BxA instruction. 1712static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { 1713 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1714 if (!C) return 0; 1715 1716 int Addr = C->getValue(); 1717 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 1718 (Addr << 6 >> 6) != Addr) 1719 return 0; // Top 6 bits have to be sext of immediate. 1720 1721 return DAG.getConstant((int)C->getValue() >> 2, 1722 DAG.getTargetLoweringInfo().getPointerTy()).Val; 1723} 1724 1725/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1726/// by "Src" to address "Dst" of size "Size". Alignment information is 1727/// specified by the specific parameter attribute. The copy will be passed as 1728/// a byval function parameter. 1729/// Sometimes what we are copying is the end of a larger object, the part that 1730/// does not fit in registers. 1731static SDOperand 1732CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, 1733 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1734 unsigned Size) { 1735 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); 1736 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), false, 1737 NULL, 0, NULL, 0); 1738} 1739 1740SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, 1741 const PPCSubtarget &Subtarget, 1742 TargetMachine &TM) { 1743 SDOperand Chain = Op.getOperand(0); 1744 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1745 SDOperand Callee = Op.getOperand(4); 1746 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 1747 1748 bool isMachoABI = Subtarget.isMachoABI(); 1749 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1750 1751 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1752 bool isPPC64 = PtrVT == MVT::i64; 1753 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1754 1755 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 1756 // SelectExpr to use to put the arguments in the appropriate registers. 1757 std::vector<SDOperand> args_to_use; 1758 1759 // Count how many bytes are to be pushed on the stack, including the linkage 1760 // area, and parameter passing area. We start with 24/48 bytes, which is 1761 // prereserved space for [SP][CR][LR][3 x unused]. 1762 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1763 1764 // Add up all the space actually used. 1765 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 1766 // they all go in registers, but we must reserve stack space for them for 1767 // possible use by the caller. In varargs or 64-bit calls, parameters are 1768 // assigned stack space in order, with padding so Altivec parameters are 1769 // 16-byte aligned. 1770 unsigned nAltivecParamsAtEnd = 0; 1771 for (unsigned i = 0; i != NumOps; ++i) { 1772 SDOperand Arg = Op.getOperand(5+2*i); 1773 MVT::ValueType ArgVT = Arg.getValueType(); 1774 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 1775 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 1776 if (!isVarArg && !isPPC64) { 1777 // Non-varargs Altivec parameters go after all the non-Altivec parameters; 1778 // do those last so we know how much padding we need. 1779 nAltivecParamsAtEnd++; 1780 continue; 1781 } else { 1782 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 1783 NumBytes = ((NumBytes+15)/16)*16; 1784 } 1785 } 1786 ISD::ArgFlagsTy Flags = 1787 cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); 1788 unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8; 1789 if (Flags.isByVal()) 1790 ArgSize = Flags.getByValSize(); 1791 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1792 NumBytes += ArgSize; 1793 } 1794 // Allow for Altivec parameters at the end, if needed. 1795 if (nAltivecParamsAtEnd) { 1796 NumBytes = ((NumBytes+15)/16)*16; 1797 NumBytes += 16*nAltivecParamsAtEnd; 1798 } 1799 1800 // The prolog code of the callee may store up to 8 GPR argument registers to 1801 // the stack, allowing va_start to index over them in memory if its varargs. 1802 // Because we cannot tell if this is needed on the caller side, we have to 1803 // conservatively assume that it is needed. As such, make sure we have at 1804 // least enough stack space for the caller to store the 8 GPRs. 1805 NumBytes = std::max(NumBytes, 1806 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI)); 1807 1808 // Adjust the stack pointer for the new arguments... 1809 // These operations are automatically eliminated by the prolog/epilog pass 1810 Chain = DAG.getCALLSEQ_START(Chain, 1811 DAG.getConstant(NumBytes, PtrVT)); 1812 SDOperand CallSeqStart = Chain; 1813 1814 // Set up a copy of the stack pointer for use loading and storing any 1815 // arguments that may not fit in the registers available for argument 1816 // passing. 1817 SDOperand StackPtr; 1818 if (isPPC64) 1819 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 1820 else 1821 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 1822 1823 // Figure out which arguments are going to go in registers, and which in 1824 // memory. Also, if this is a vararg function, floating point operations 1825 // must be stored to our stack, and loaded into integer regs as well, if 1826 // any integer regs are available for argument passing. 1827 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1828 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1829 1830 static const unsigned GPR_32[] = { // 32-bit registers. 1831 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1832 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1833 }; 1834 static const unsigned GPR_64[] = { // 64-bit registers. 1835 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1836 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1837 }; 1838 static const unsigned *FPR = GetFPR(Subtarget); 1839 1840 static const unsigned VR[] = { 1841 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1842 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1843 }; 1844 const unsigned NumGPRs = array_lengthof(GPR_32); 1845 const unsigned NumFPRs = isMachoABI ? 13 : 8; 1846 const unsigned NumVRs = array_lengthof( VR); 1847 1848 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1849 1850 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 1851 SmallVector<SDOperand, 8> MemOpChains; 1852 for (unsigned i = 0; i != NumOps; ++i) { 1853 bool inMem = false; 1854 SDOperand Arg = Op.getOperand(5+2*i); 1855 ISD::ArgFlagsTy Flags = 1856 cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); 1857 // See if next argument requires stack alignment in ELF 1858 bool Align = Flags.isDivided(); 1859 1860 // PtrOff will be used to store the current argument to the stack if a 1861 // register cannot be found for it. 1862 SDOperand PtrOff; 1863 1864 // Stack align in ELF 32 1865 if (isELF32_ABI && Align) 1866 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize, 1867 StackPtr.getValueType()); 1868 else 1869 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 1870 1871 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 1872 1873 // On PPC64, promote integers to 64-bit values. 1874 if (isPPC64 && Arg.getValueType() == MVT::i32) { 1875 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 1876 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1877 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 1878 } 1879 1880 // FIXME Elf untested, what are alignment rules? 1881 // FIXME memcpy is used way more than necessary. Correctness first. 1882 if (Flags.isByVal()) { 1883 unsigned Size = Flags.getByValSize(); 1884 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2); 1885 if (Size==1 || Size==2) { 1886 // Very small objects are passed right-justified. 1887 // Everything else is passed left-justified. 1888 MVT::ValueType VT = (Size==1) ? MVT::i8 : MVT::i16; 1889 if (GPR_idx != NumGPRs) { 1890 SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, 1891 NULL, 0, VT); 1892 MemOpChains.push_back(Load.getValue(1)); 1893 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1894 if (isMachoABI) 1895 ArgOffset += PtrByteSize; 1896 } else { 1897 SDOperand Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); 1898 SDOperand AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const); 1899 SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, 1900 CallSeqStart.Val->getOperand(0), 1901 Flags, DAG, Size); 1902 // This must go outside the CALLSEQ_START..END. 1903 SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 1904 CallSeqStart.Val->getOperand(1)); 1905 DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val); 1906 Chain = CallSeqStart = NewCallSeqStart; 1907 ArgOffset += PtrByteSize; 1908 } 1909 continue; 1910 } 1911 // Copy entire object into memory. There are cases where gcc-generated 1912 // code assumes it is there, even if it could be put entirely into 1913 // registers. (This is not what the doc says.) 1914 SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 1915 CallSeqStart.Val->getOperand(0), 1916 Flags, DAG, Size); 1917 // This must go outside the CALLSEQ_START..END. 1918 SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 1919 CallSeqStart.Val->getOperand(1)); 1920 DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val); 1921 Chain = CallSeqStart = NewCallSeqStart; 1922 // And copy the pieces of it that fit into registers. 1923 for (unsigned j=0; j<Size; j+=PtrByteSize) { 1924 SDOperand Const = DAG.getConstant(j, PtrOff.getValueType()); 1925 SDOperand AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const); 1926 if (GPR_idx != NumGPRs) { 1927 SDOperand Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0); 1928 MemOpChains.push_back(Load.getValue(1)); 1929 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 1930 if (isMachoABI) 1931 ArgOffset += PtrByteSize; 1932 } else { 1933 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 1934 break; 1935 } 1936 } 1937 continue; 1938 } 1939 1940 switch (Arg.getValueType()) { 1941 default: assert(0 && "Unexpected ValueType for argument!"); 1942 case MVT::i32: 1943 case MVT::i64: 1944 // Double word align in ELF 1945 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2); 1946 if (GPR_idx != NumGPRs) { 1947 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 1948 } else { 1949 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1950 inMem = true; 1951 } 1952 if (inMem || isMachoABI) { 1953 // Stack align in ELF 1954 if (isELF32_ABI && Align) 1955 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1956 1957 ArgOffset += PtrByteSize; 1958 } 1959 break; 1960 case MVT::f32: 1961 case MVT::f64: 1962 if (FPR_idx != NumFPRs) { 1963 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 1964 1965 if (isVarArg) { 1966 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 1967 MemOpChains.push_back(Store); 1968 1969 // Float varargs are always shadowed in available integer registers 1970 if (GPR_idx != NumGPRs) { 1971 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1972 MemOpChains.push_back(Load.getValue(1)); 1973 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 1974 Load)); 1975 } 1976 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 1977 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 1978 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 1979 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 1980 MemOpChains.push_back(Load.getValue(1)); 1981 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 1982 Load)); 1983 } 1984 } else { 1985 // If we have any FPRs remaining, we may also have GPRs remaining. 1986 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 1987 // GPRs. 1988 if (isMachoABI) { 1989 if (GPR_idx != NumGPRs) 1990 ++GPR_idx; 1991 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 1992 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 1993 ++GPR_idx; 1994 } 1995 } 1996 } else { 1997 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 1998 inMem = true; 1999 } 2000 if (inMem || isMachoABI) { 2001 // Stack align in ELF 2002 if (isELF32_ABI && Align) 2003 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 2004 if (isPPC64) 2005 ArgOffset += 8; 2006 else 2007 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 2008 } 2009 break; 2010 case MVT::v4f32: 2011 case MVT::v4i32: 2012 case MVT::v8i16: 2013 case MVT::v16i8: 2014 if (isVarArg) { 2015 // These go aligned on the stack, or in the corresponding R registers 2016 // when within range. The Darwin PPC ABI doc claims they also go in 2017 // V registers; in fact gcc does this only for arguments that are 2018 // prototyped, not for those that match the ... We do it for all 2019 // arguments, seems to work. 2020 while (ArgOffset % 16 !=0) { 2021 ArgOffset += PtrByteSize; 2022 if (GPR_idx != NumGPRs) 2023 GPR_idx++; 2024 } 2025 // We could elide this store in the case where the object fits 2026 // entirely in R registers. Maybe later. 2027 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2028 DAG.getConstant(ArgOffset, PtrVT)); 2029 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2030 MemOpChains.push_back(Store); 2031 if (VR_idx != NumVRs) { 2032 SDOperand Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0); 2033 MemOpChains.push_back(Load.getValue(1)); 2034 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 2035 } 2036 ArgOffset += 16; 2037 for (unsigned i=0; i<16; i+=PtrByteSize) { 2038 if (GPR_idx == NumGPRs) 2039 break; 2040 SDOperand Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff, 2041 DAG.getConstant(i, PtrVT)); 2042 SDOperand Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0); 2043 MemOpChains.push_back(Load.getValue(1)); 2044 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2045 } 2046 break; 2047 } 2048 // Non-varargs Altivec params generally go in registers, but have 2049 // stack space allocated at the end. 2050 if (VR_idx != NumVRs) { 2051 // Doesn't have GPR space allocated. 2052 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 2053 } else if (nAltivecParamsAtEnd==0) { 2054 // We are emitting Altivec params in order. 2055 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2056 DAG.getConstant(ArgOffset, PtrVT)); 2057 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2058 MemOpChains.push_back(Store); 2059 ArgOffset += 16; 2060 } 2061 break; 2062 } 2063 } 2064 // If all Altivec parameters fit in registers, as they usually do, 2065 // they get stack space following the non-Altivec parameters. We 2066 // don't track this here because nobody below needs it. 2067 // If there are more Altivec parameters than fit in registers emit 2068 // the stores here. 2069 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 2070 unsigned j = 0; 2071 // Offset is aligned; skip 1st 12 params which go in V registers. 2072 ArgOffset = ((ArgOffset+15)/16)*16; 2073 ArgOffset += 12*16; 2074 for (unsigned i = 0; i != NumOps; ++i) { 2075 SDOperand Arg = Op.getOperand(5+2*i); 2076 MVT::ValueType ArgType = Arg.getValueType(); 2077 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 2078 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 2079 if (++j > NumVRs) { 2080 SDOperand PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2081 DAG.getConstant(ArgOffset, PtrVT)); 2082 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2083 MemOpChains.push_back(Store); 2084 ArgOffset += 16; 2085 } 2086 } 2087 } 2088 } 2089 2090 if (!MemOpChains.empty()) 2091 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2092 &MemOpChains[0], MemOpChains.size()); 2093 2094 // Build a sequence of copy-to-reg nodes chained together with token chain 2095 // and flag operands which copy the outgoing args into the appropriate regs. 2096 SDOperand InFlag; 2097 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2098 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 2099 InFlag); 2100 InFlag = Chain.getValue(1); 2101 } 2102 2103 // With the ELF 32 ABI, set CR6 to true if this is a vararg call. 2104 if (isVarArg && isELF32_ABI) { 2105 SDOperand SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0); 2106 Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag); 2107 InFlag = Chain.getValue(1); 2108 } 2109 2110 std::vector<MVT::ValueType> NodeTys; 2111 NodeTys.push_back(MVT::Other); // Returns a chain 2112 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2113 2114 SmallVector<SDOperand, 8> Ops; 2115 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF; 2116 2117 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2118 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2119 // node so that legalize doesn't hack it. 2120 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2121 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 2122 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 2123 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 2124 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 2125 // If this is an absolute destination address, use the munged value. 2126 Callee = SDOperand(Dest, 0); 2127 else { 2128 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 2129 // to do the call, we can't use PPCISD::CALL. 2130 SDOperand MTCTROps[] = {Chain, Callee, InFlag}; 2131 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0)); 2132 InFlag = Chain.getValue(1); 2133 2134 // Copy the callee address into R12/X12 on darwin. 2135 if (isMachoABI) { 2136 unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12; 2137 Chain = DAG.getCopyToReg(Chain, Reg, Callee, InFlag); 2138 InFlag = Chain.getValue(1); 2139 } 2140 2141 NodeTys.clear(); 2142 NodeTys.push_back(MVT::Other); 2143 NodeTys.push_back(MVT::Flag); 2144 Ops.push_back(Chain); 2145 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF; 2146 Callee.Val = 0; 2147 } 2148 2149 // If this is a direct call, pass the chain and the callee. 2150 if (Callee.Val) { 2151 Ops.push_back(Chain); 2152 Ops.push_back(Callee); 2153 } 2154 2155 // Add argument registers to the end of the list so that they are known live 2156 // into the call. 2157 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2158 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2159 RegsToPass[i].second.getValueType())); 2160 2161 if (InFlag.Val) 2162 Ops.push_back(InFlag); 2163 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 2164 InFlag = Chain.getValue(1); 2165 2166 Chain = DAG.getCALLSEQ_END(Chain, 2167 DAG.getConstant(NumBytes, PtrVT), 2168 DAG.getConstant(0, PtrVT), 2169 InFlag); 2170 if (Op.Val->getValueType(0) != MVT::Other) 2171 InFlag = Chain.getValue(1); 2172 2173 SmallVector<SDOperand, 16> ResultVals; 2174 SmallVector<CCValAssign, 16> RVLocs; 2175 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 2176 CCState CCInfo(CC, isVarArg, TM, RVLocs); 2177 CCInfo.AnalyzeCallResult(Op.Val, RetCC_PPC); 2178 2179 // Copy all of the result registers out of their specified physreg. 2180 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2181 CCValAssign &VA = RVLocs[i]; 2182 MVT::ValueType VT = VA.getValVT(); 2183 assert(VA.isRegLoc() && "Can only return in registers!"); 2184 Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1); 2185 ResultVals.push_back(Chain.getValue(0)); 2186 InFlag = Chain.getValue(2); 2187 } 2188 2189 // If the function returns void, just return the chain. 2190 if (RVLocs.empty()) 2191 return Chain; 2192 2193 // Otherwise, merge everything together with a MERGE_VALUES node. 2194 ResultVals.push_back(Chain); 2195 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), 2196 &ResultVals[0], ResultVals.size()); 2197 return Res.getValue(Op.ResNo); 2198} 2199 2200SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, 2201 TargetMachine &TM) { 2202 SmallVector<CCValAssign, 16> RVLocs; 2203 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 2204 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 2205 CCState CCInfo(CC, isVarArg, TM, RVLocs); 2206 CCInfo.AnalyzeReturn(Op.Val, RetCC_PPC); 2207 2208 // If this is the first return lowered for this function, add the regs to the 2209 // liveout set for the function. 2210 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 2211 for (unsigned i = 0; i != RVLocs.size(); ++i) 2212 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 2213 } 2214 2215 SDOperand Chain = Op.getOperand(0); 2216 SDOperand Flag; 2217 2218 // Copy the result values into the output registers. 2219 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2220 CCValAssign &VA = RVLocs[i]; 2221 assert(VA.isRegLoc() && "Can only return in registers!"); 2222 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag); 2223 Flag = Chain.getValue(1); 2224 } 2225 2226 if (Flag.Val) 2227 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag); 2228 else 2229 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain); 2230} 2231 2232SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, 2233 const PPCSubtarget &Subtarget) { 2234 // When we pop the dynamic allocation we need to restore the SP link. 2235 2236 // Get the corect type for pointers. 2237 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2238 2239 // Construct the stack pointer operand. 2240 bool IsPPC64 = Subtarget.isPPC64(); 2241 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; 2242 SDOperand StackPtr = DAG.getRegister(SP, PtrVT); 2243 2244 // Get the operands for the STACKRESTORE. 2245 SDOperand Chain = Op.getOperand(0); 2246 SDOperand SaveSP = Op.getOperand(1); 2247 2248 // Load the old link SP. 2249 SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); 2250 2251 // Restore the stack pointer. 2252 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP); 2253 2254 // Store the old link SP. 2255 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0); 2256} 2257 2258SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, 2259 SelectionDAG &DAG, 2260 const PPCSubtarget &Subtarget) { 2261 MachineFunction &MF = DAG.getMachineFunction(); 2262 bool IsPPC64 = Subtarget.isPPC64(); 2263 bool isMachoABI = Subtarget.isMachoABI(); 2264 2265 // Get current frame pointer save index. The users of this index will be 2266 // primarily DYNALLOC instructions. 2267 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2268 int FPSI = FI->getFramePointerSaveIndex(); 2269 2270 // If the frame pointer save index hasn't been defined yet. 2271 if (!FPSI) { 2272 // Find out what the fix offset of the frame pointer save area. 2273 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI); 2274 2275 // Allocate the frame index for frame pointer save area. 2276 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset); 2277 // Save the result. 2278 FI->setFramePointerSaveIndex(FPSI); 2279 } 2280 2281 // Get the inputs. 2282 SDOperand Chain = Op.getOperand(0); 2283 SDOperand Size = Op.getOperand(1); 2284 2285 // Get the corect type for pointers. 2286 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2287 // Negate the size. 2288 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, 2289 DAG.getConstant(0, PtrVT), Size); 2290 // Construct a node for the frame pointer save index. 2291 SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT); 2292 // Build a DYNALLOC node. 2293 SDOperand Ops[3] = { Chain, NegSize, FPSIdx }; 2294 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 2295 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); 2296} 2297 2298 2299/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 2300/// possible. 2301SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { 2302 // Not FP? Not a fsel. 2303 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) || 2304 !MVT::isFloatingPoint(Op.getOperand(2).getValueType())) 2305 return SDOperand(); 2306 2307 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2308 2309 // Cannot handle SETEQ/SETNE. 2310 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); 2311 2312 MVT::ValueType ResVT = Op.getValueType(); 2313 MVT::ValueType CmpVT = Op.getOperand(0).getValueType(); 2314 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2315 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); 2316 2317 // If the RHS of the comparison is a 0.0, we don't need to do the 2318 // subtraction at all. 2319 if (isFloatingPointZero(RHS)) 2320 switch (CC) { 2321 default: break; // SETUO etc aren't handled by fsel. 2322 case ISD::SETULT: 2323 case ISD::SETOLT: 2324 case ISD::SETLT: 2325 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 2326 case ISD::SETUGE: 2327 case ISD::SETOGE: 2328 case ISD::SETGE: 2329 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 2330 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 2331 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 2332 case ISD::SETUGT: 2333 case ISD::SETOGT: 2334 case ISD::SETGT: 2335 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 2336 case ISD::SETULE: 2337 case ISD::SETOLE: 2338 case ISD::SETLE: 2339 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 2340 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 2341 return DAG.getNode(PPCISD::FSEL, ResVT, 2342 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 2343 } 2344 2345 SDOperand Cmp; 2346 switch (CC) { 2347 default: break; // SETUO etc aren't handled by fsel. 2348 case ISD::SETULT: 2349 case ISD::SETOLT: 2350 case ISD::SETLT: 2351 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 2352 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2353 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2354 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 2355 case ISD::SETUGE: 2356 case ISD::SETOGE: 2357 case ISD::SETGE: 2358 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 2359 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2360 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2361 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2362 case ISD::SETUGT: 2363 case ISD::SETOGT: 2364 case ISD::SETGT: 2365 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2366 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2367 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2368 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 2369 case ISD::SETULE: 2370 case ISD::SETOLE: 2371 case ISD::SETLE: 2372 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2373 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2374 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2375 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2376 } 2377 return SDOperand(); 2378} 2379 2380// FIXME: Split this code up when LegalizeDAGTypes lands. 2381SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { 2382 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType())); 2383 SDOperand Src = Op.getOperand(0); 2384 if (Src.getValueType() == MVT::f32) 2385 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 2386 2387 SDOperand Tmp; 2388 switch (Op.getValueType()) { 2389 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 2390 case MVT::i32: 2391 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 2392 break; 2393 case MVT::i64: 2394 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 2395 break; 2396 } 2397 2398 // Convert the FP value to an int value through memory. 2399 SDOperand FIPtr = DAG.CreateStackTemporary(MVT::f64); 2400 2401 // Emit a store to the stack slot. 2402 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0); 2403 2404 // Result is a load from the stack slot. If loading 4 bytes, make sure to 2405 // add in a bias. 2406 if (Op.getValueType() == MVT::i32) 2407 FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr, 2408 DAG.getConstant(4, FIPtr.getValueType())); 2409 return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0); 2410} 2411 2412SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op, 2413 SelectionDAG &DAG) { 2414 assert(Op.getValueType() == MVT::ppcf128); 2415 SDNode *Node = Op.Val; 2416 assert(Node->getOperand(0).getValueType() == MVT::ppcf128); 2417 assert(Node->getOperand(0).Val->getOpcode() == ISD::BUILD_PAIR); 2418 SDOperand Lo = Node->getOperand(0).Val->getOperand(0); 2419 SDOperand Hi = Node->getOperand(0).Val->getOperand(1); 2420 2421 // This sequence changes FPSCR to do round-to-zero, adds the two halves 2422 // of the long double, and puts FPSCR back the way it was. We do not 2423 // actually model FPSCR. 2424 std::vector<MVT::ValueType> NodeTys; 2425 SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg; 2426 2427 NodeTys.push_back(MVT::f64); // Return register 2428 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns 2429 Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); 2430 MFFSreg = Result.getValue(0); 2431 InFlag = Result.getValue(1); 2432 2433 NodeTys.clear(); 2434 NodeTys.push_back(MVT::Flag); // Returns a flag 2435 Ops[0] = DAG.getConstant(31, MVT::i32); 2436 Ops[1] = InFlag; 2437 Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2); 2438 InFlag = Result.getValue(0); 2439 2440 NodeTys.clear(); 2441 NodeTys.push_back(MVT::Flag); // Returns a flag 2442 Ops[0] = DAG.getConstant(30, MVT::i32); 2443 Ops[1] = InFlag; 2444 Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2); 2445 InFlag = Result.getValue(0); 2446 2447 NodeTys.clear(); 2448 NodeTys.push_back(MVT::f64); // result of add 2449 NodeTys.push_back(MVT::Flag); // Returns a flag 2450 Ops[0] = Lo; 2451 Ops[1] = Hi; 2452 Ops[2] = InFlag; 2453 Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3); 2454 FPreg = Result.getValue(0); 2455 InFlag = Result.getValue(1); 2456 2457 NodeTys.clear(); 2458 NodeTys.push_back(MVT::f64); 2459 Ops[0] = DAG.getConstant(1, MVT::i32); 2460 Ops[1] = MFFSreg; 2461 Ops[2] = FPreg; 2462 Ops[3] = InFlag; 2463 Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4); 2464 FPreg = Result.getValue(0); 2465 2466 // We know the low half is about to be thrown away, so just use something 2467 // convenient. 2468 return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg); 2469} 2470 2471SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 2472 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 2473 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 2474 return SDOperand(); 2475 2476 if (Op.getOperand(0).getValueType() == MVT::i64) { 2477 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 2478 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 2479 if (Op.getValueType() == MVT::f32) 2480 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); 2481 return FP; 2482 } 2483 2484 assert(Op.getOperand(0).getValueType() == MVT::i32 && 2485 "Unhandled SINT_TO_FP type in custom expander!"); 2486 // Since we only generate this in 64-bit mode, we can take advantage of 2487 // 64-bit registers. In particular, sign extend the input value into the 2488 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 2489 // then lfd it and fcfid it. 2490 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2491 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 2492 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2493 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2494 2495 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 2496 Op.getOperand(0)); 2497 2498 // STD the extended value into the stack slot. 2499 MachineMemOperand MO(PseudoSourceValue::getFixedStack(), 2500 MachineMemOperand::MOStore, FrameIdx, 8, 8); 2501 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 2502 DAG.getEntryNode(), Ext64, FIdx, 2503 DAG.getMemOperand(MO)); 2504 // Load the value as a double. 2505 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); 2506 2507 // FCFID it and return it. 2508 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 2509 if (Op.getValueType() == MVT::f32) 2510 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); 2511 return FP; 2512} 2513 2514SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { 2515 /* 2516 The rounding mode is in bits 30:31 of FPSR, and has the following 2517 settings: 2518 00 Round to nearest 2519 01 Round to 0 2520 10 Round to +inf 2521 11 Round to -inf 2522 2523 FLT_ROUNDS, on the other hand, expects the following: 2524 -1 Undefined 2525 0 Round to 0 2526 1 Round to nearest 2527 2 Round to +inf 2528 3 Round to -inf 2529 2530 To perform the conversion, we do: 2531 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 2532 */ 2533 2534 MachineFunction &MF = DAG.getMachineFunction(); 2535 MVT::ValueType VT = Op.getValueType(); 2536 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2537 std::vector<MVT::ValueType> NodeTys; 2538 SDOperand MFFSreg, InFlag; 2539 2540 // Save FP Control Word to register 2541 NodeTys.push_back(MVT::f64); // return register 2542 NodeTys.push_back(MVT::Flag); // unused in this context 2543 SDOperand Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); 2544 2545 // Save FP register to stack slot 2546 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 2547 SDOperand StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 2548 SDOperand Store = DAG.getStore(DAG.getEntryNode(), Chain, 2549 StackSlot, NULL, 0); 2550 2551 // Load FP Control Word from low 32 bits of stack slot. 2552 SDOperand Four = DAG.getConstant(4, PtrVT); 2553 SDOperand Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four); 2554 SDOperand CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0); 2555 2556 // Transform as necessary 2557 SDOperand CWD1 = 2558 DAG.getNode(ISD::AND, MVT::i32, 2559 CWD, DAG.getConstant(3, MVT::i32)); 2560 SDOperand CWD2 = 2561 DAG.getNode(ISD::SRL, MVT::i32, 2562 DAG.getNode(ISD::AND, MVT::i32, 2563 DAG.getNode(ISD::XOR, MVT::i32, 2564 CWD, DAG.getConstant(3, MVT::i32)), 2565 DAG.getConstant(3, MVT::i32)), 2566 DAG.getConstant(1, MVT::i8)); 2567 2568 SDOperand RetVal = 2569 DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2); 2570 2571 return DAG.getNode((MVT::getSizeInBits(VT) < 16 ? 2572 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 2573} 2574 2575SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { 2576 MVT::ValueType VT = Op.getValueType(); 2577 unsigned BitWidth = MVT::getSizeInBits(VT); 2578 assert(Op.getNumOperands() == 3 && 2579 VT == Op.getOperand(1).getValueType() && 2580 "Unexpected SHL!"); 2581 2582 // Expand into a bunch of logical ops. Note that these ops 2583 // depend on the PPC behavior for oversized shift amounts. 2584 SDOperand Lo = Op.getOperand(0); 2585 SDOperand Hi = Op.getOperand(1); 2586 SDOperand Amt = Op.getOperand(2); 2587 MVT::ValueType AmtVT = Amt.getValueType(); 2588 2589 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 2590 DAG.getConstant(BitWidth, AmtVT), Amt); 2591 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt); 2592 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1); 2593 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 2594 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 2595 DAG.getConstant(-BitWidth, AmtVT)); 2596 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5); 2597 SDOperand OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); 2598 SDOperand OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt); 2599 SDOperand OutOps[] = { OutLo, OutHi }; 2600 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT), 2601 OutOps, 2); 2602} 2603 2604SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { 2605 MVT::ValueType VT = Op.getValueType(); 2606 unsigned BitWidth = MVT::getSizeInBits(VT); 2607 assert(Op.getNumOperands() == 3 && 2608 VT == Op.getOperand(1).getValueType() && 2609 "Unexpected SRL!"); 2610 2611 // Expand into a bunch of logical ops. Note that these ops 2612 // depend on the PPC behavior for oversized shift amounts. 2613 SDOperand Lo = Op.getOperand(0); 2614 SDOperand Hi = Op.getOperand(1); 2615 SDOperand Amt = Op.getOperand(2); 2616 MVT::ValueType AmtVT = Amt.getValueType(); 2617 2618 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 2619 DAG.getConstant(BitWidth, AmtVT), Amt); 2620 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); 2621 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); 2622 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 2623 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 2624 DAG.getConstant(-BitWidth, AmtVT)); 2625 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5); 2626 SDOperand OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); 2627 SDOperand OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt); 2628 SDOperand OutOps[] = { OutLo, OutHi }; 2629 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT), 2630 OutOps, 2); 2631} 2632 2633SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { 2634 MVT::ValueType VT = Op.getValueType(); 2635 unsigned BitWidth = MVT::getSizeInBits(VT); 2636 assert(Op.getNumOperands() == 3 && 2637 VT == Op.getOperand(1).getValueType() && 2638 "Unexpected SRA!"); 2639 2640 // Expand into a bunch of logical ops, followed by a select_cc. 2641 SDOperand Lo = Op.getOperand(0); 2642 SDOperand Hi = Op.getOperand(1); 2643 SDOperand Amt = Op.getOperand(2); 2644 MVT::ValueType AmtVT = Amt.getValueType(); 2645 2646 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 2647 DAG.getConstant(BitWidth, AmtVT), Amt); 2648 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); 2649 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); 2650 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 2651 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 2652 DAG.getConstant(-BitWidth, AmtVT)); 2653 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5); 2654 SDOperand OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt); 2655 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT), 2656 Tmp4, Tmp6, ISD::SETLE); 2657 SDOperand OutOps[] = { OutLo, OutHi }; 2658 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT), 2659 OutOps, 2); 2660} 2661 2662//===----------------------------------------------------------------------===// 2663// Vector related lowering. 2664// 2665 2666// If this is a vector of constants or undefs, get the bits. A bit in 2667// UndefBits is set if the corresponding element of the vector is an 2668// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2669// zero. Return true if this is not an array of constants, false if it is. 2670// 2671static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 2672 uint64_t UndefBits[2]) { 2673 // Start with zero'd results. 2674 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 2675 2676 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType()); 2677 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 2678 SDOperand OpVal = BV->getOperand(i); 2679 2680 unsigned PartNo = i >= e/2; // In the upper 128 bits? 2681 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 2682 2683 uint64_t EltBits = 0; 2684 if (OpVal.getOpcode() == ISD::UNDEF) { 2685 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 2686 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 2687 continue; 2688 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2689 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 2690 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2691 assert(CN->getValueType(0) == MVT::f32 && 2692 "Only one legal FP vector type!"); 2693 EltBits = FloatToBits(CN->getValueAPF().convertToFloat()); 2694 } else { 2695 // Nonconstant element. 2696 return true; 2697 } 2698 2699 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 2700 } 2701 2702 //printf("%llx %llx %llx %llx\n", 2703 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 2704 return false; 2705} 2706 2707// If this is a splat (repetition) of a value across the whole vector, return 2708// the smallest size that splats it. For example, "0x01010101010101..." is a 2709// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2710// SplatSize = 1 byte. 2711static bool isConstantSplat(const uint64_t Bits128[2], 2712 const uint64_t Undef128[2], 2713 unsigned &SplatBits, unsigned &SplatUndef, 2714 unsigned &SplatSize) { 2715 2716 // Don't let undefs prevent splats from matching. See if the top 64-bits are 2717 // the same as the lower 64-bits, ignoring undefs. 2718 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 2719 return false; // Can't be a splat if two pieces don't match. 2720 2721 uint64_t Bits64 = Bits128[0] | Bits128[1]; 2722 uint64_t Undef64 = Undef128[0] & Undef128[1]; 2723 2724 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 2725 // undefs. 2726 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 2727 return false; // Can't be a splat if two pieces don't match. 2728 2729 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 2730 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 2731 2732 // If the top 16-bits are different than the lower 16-bits, ignoring 2733 // undefs, we have an i32 splat. 2734 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 2735 SplatBits = Bits32; 2736 SplatUndef = Undef32; 2737 SplatSize = 4; 2738 return true; 2739 } 2740 2741 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 2742 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 2743 2744 // If the top 8-bits are different than the lower 8-bits, ignoring 2745 // undefs, we have an i16 splat. 2746 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 2747 SplatBits = Bits16; 2748 SplatUndef = Undef16; 2749 SplatSize = 2; 2750 return true; 2751 } 2752 2753 // Otherwise, we have an 8-bit splat. 2754 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 2755 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 2756 SplatSize = 1; 2757 return true; 2758} 2759 2760/// BuildSplatI - Build a canonical splati of Val with an element size of 2761/// SplatSize. Cast the result to VT. 2762static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT, 2763 SelectionDAG &DAG) { 2764 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 2765 2766 static const MVT::ValueType VTys[] = { // canonical VT to use for each size. 2767 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 2768 }; 2769 2770 MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 2771 2772 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 2773 if (Val == -1) 2774 SplatSize = 1; 2775 2776 MVT::ValueType CanonicalVT = VTys[SplatSize-1]; 2777 2778 // Build a canonical splat for this value. 2779 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorElementType(CanonicalVT)); 2780 SmallVector<SDOperand, 8> Ops; 2781 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt); 2782 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, 2783 &Ops[0], Ops.size()); 2784 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); 2785} 2786 2787/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 2788/// specified intrinsic ID. 2789static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, 2790 SelectionDAG &DAG, 2791 MVT::ValueType DestVT = MVT::Other) { 2792 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 2793 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2794 DAG.getConstant(IID, MVT::i32), LHS, RHS); 2795} 2796 2797/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 2798/// specified intrinsic ID. 2799static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, 2800 SDOperand Op2, SelectionDAG &DAG, 2801 MVT::ValueType DestVT = MVT::Other) { 2802 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 2803 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 2804 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 2805} 2806 2807 2808/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 2809/// amount. The result has the specified value type. 2810static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, 2811 MVT::ValueType VT, SelectionDAG &DAG) { 2812 // Force LHS/RHS to be the right type. 2813 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 2814 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 2815 2816 SDOperand Ops[16]; 2817 for (unsigned i = 0; i != 16; ++i) 2818 Ops[i] = DAG.getConstant(i+Amt, MVT::i32); 2819 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 2820 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); 2821 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 2822} 2823 2824// If this is a case we can't handle, return null and let the default 2825// expansion code take care of it. If we CAN select this case, and if it 2826// selects to a single instruction, return Op. Otherwise, if we can codegen 2827// this case more efficiently than a constant pool load, lower it to the 2828// sequence of ops that should be used. 2829SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, 2830 SelectionDAG &DAG) { 2831 // If this is a vector of constants or undefs, get the bits. A bit in 2832 // UndefBits is set if the corresponding element of the vector is an 2833 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 2834 // zero. 2835 uint64_t VectorBits[2]; 2836 uint64_t UndefBits[2]; 2837 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) 2838 return SDOperand(); // Not a constant vector. 2839 2840 // If this is a splat (repetition) of a value across the whole vector, return 2841 // the smallest size that splats it. For example, "0x01010101010101..." is a 2842 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 2843 // SplatSize = 1 byte. 2844 unsigned SplatBits, SplatUndef, SplatSize; 2845 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 2846 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 2847 2848 // First, handle single instruction cases. 2849 2850 // All zeros? 2851 if (SplatBits == 0) { 2852 // Canonicalize all zero vectors to be v4i32. 2853 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 2854 SDOperand Z = DAG.getConstant(0, MVT::i32); 2855 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 2856 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 2857 } 2858 return Op; 2859 } 2860 2861 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 2862 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 2863 if (SextVal >= -16 && SextVal <= 15) 2864 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 2865 2866 2867 // Two instruction sequences. 2868 2869 // If this value is in the range [-32,30] and is even, use: 2870 // tmp = VSPLTI[bhw], result = add tmp, tmp 2871 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 2872 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG); 2873 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op); 2874 } 2875 2876 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 2877 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 2878 // for fneg/fabs. 2879 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 2880 // Make -1 and vspltisw -1: 2881 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 2882 2883 // Make the VSLW intrinsic, computing 0x8000_0000. 2884 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 2885 OnesV, DAG); 2886 2887 // xor by OnesV to invert it. 2888 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 2889 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2890 } 2891 2892 // Check to see if this is a wide variety of vsplti*, binop self cases. 2893 unsigned SplatBitSize = SplatSize*8; 2894 static const signed char SplatCsts[] = { 2895 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 2896 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 2897 }; 2898 2899 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 2900 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 2901 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 2902 int i = SplatCsts[idx]; 2903 2904 // Figure out what shift amount will be used by altivec if shifted by i in 2905 // this splat size. 2906 unsigned TypeShiftAmt = i & (SplatBitSize-1); 2907 2908 // vsplti + shl self. 2909 if (SextVal == (i << (int)TypeShiftAmt)) { 2910 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2911 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2912 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 2913 Intrinsic::ppc_altivec_vslw 2914 }; 2915 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2916 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2917 } 2918 2919 // vsplti + srl self. 2920 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2921 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2922 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2923 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 2924 Intrinsic::ppc_altivec_vsrw 2925 }; 2926 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2927 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2928 } 2929 2930 // vsplti + sra self. 2931 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 2932 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2933 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2934 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 2935 Intrinsic::ppc_altivec_vsraw 2936 }; 2937 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2938 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2939 } 2940 2941 // vsplti + rol self. 2942 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 2943 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 2944 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 2945 static const unsigned IIDs[] = { // Intrinsic to use for each size. 2946 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 2947 Intrinsic::ppc_altivec_vrlw 2948 }; 2949 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 2950 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 2951 } 2952 2953 // t = vsplti c, result = vsldoi t, t, 1 2954 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 2955 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2956 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 2957 } 2958 // t = vsplti c, result = vsldoi t, t, 2 2959 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 2960 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2961 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 2962 } 2963 // t = vsplti c, result = vsldoi t, t, 3 2964 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 2965 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 2966 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 2967 } 2968 } 2969 2970 // Three instruction sequences. 2971 2972 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 2973 if (SextVal >= 0 && SextVal <= 31) { 2974 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); 2975 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 2976 LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS); 2977 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 2978 } 2979 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 2980 if (SextVal >= -31 && SextVal <= 0) { 2981 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); 2982 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 2983 LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS); 2984 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 2985 } 2986 } 2987 2988 return SDOperand(); 2989} 2990 2991/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 2992/// the specified operations to build the shuffle. 2993static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, 2994 SDOperand RHS, SelectionDAG &DAG) { 2995 unsigned OpNum = (PFEntry >> 26) & 0x0F; 2996 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 2997 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 2998 2999 enum { 3000 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3001 OP_VMRGHW, 3002 OP_VMRGLW, 3003 OP_VSPLTISW0, 3004 OP_VSPLTISW1, 3005 OP_VSPLTISW2, 3006 OP_VSPLTISW3, 3007 OP_VSLDOI4, 3008 OP_VSLDOI8, 3009 OP_VSLDOI12 3010 }; 3011 3012 if (OpNum == OP_COPY) { 3013 if (LHSID == (1*9+2)*9+3) return LHS; 3014 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3015 return RHS; 3016 } 3017 3018 SDOperand OpLHS, OpRHS; 3019 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 3020 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 3021 3022 unsigned ShufIdxs[16]; 3023 switch (OpNum) { 3024 default: assert(0 && "Unknown i32 permute!"); 3025 case OP_VMRGHW: 3026 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 3027 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 3028 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 3029 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 3030 break; 3031 case OP_VMRGLW: 3032 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 3033 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 3034 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 3035 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 3036 break; 3037 case OP_VSPLTISW0: 3038 for (unsigned i = 0; i != 16; ++i) 3039 ShufIdxs[i] = (i&3)+0; 3040 break; 3041 case OP_VSPLTISW1: 3042 for (unsigned i = 0; i != 16; ++i) 3043 ShufIdxs[i] = (i&3)+4; 3044 break; 3045 case OP_VSPLTISW2: 3046 for (unsigned i = 0; i != 16; ++i) 3047 ShufIdxs[i] = (i&3)+8; 3048 break; 3049 case OP_VSPLTISW3: 3050 for (unsigned i = 0; i != 16; ++i) 3051 ShufIdxs[i] = (i&3)+12; 3052 break; 3053 case OP_VSLDOI4: 3054 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 3055 case OP_VSLDOI8: 3056 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 3057 case OP_VSLDOI12: 3058 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 3059 } 3060 SDOperand Ops[16]; 3061 for (unsigned i = 0; i != 16; ++i) 3062 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32); 3063 3064 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 3065 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 3066} 3067 3068/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 3069/// is a shuffle we can handle in a single instruction, return it. Otherwise, 3070/// return the code it can be lowered into. Worst case, it can always be 3071/// lowered into a vperm. 3072SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, 3073 SelectionDAG &DAG) { 3074 SDOperand V1 = Op.getOperand(0); 3075 SDOperand V2 = Op.getOperand(1); 3076 SDOperand PermMask = Op.getOperand(2); 3077 3078 // Cases that are handled by instructions that take permute immediates 3079 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 3080 // selected by the instruction selector. 3081 if (V2.getOpcode() == ISD::UNDEF) { 3082 if (PPC::isSplatShuffleMask(PermMask.Val, 1) || 3083 PPC::isSplatShuffleMask(PermMask.Val, 2) || 3084 PPC::isSplatShuffleMask(PermMask.Val, 4) || 3085 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) || 3086 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) || 3087 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 || 3088 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) || 3089 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) || 3090 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) || 3091 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) || 3092 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) || 3093 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) { 3094 return Op; 3095 } 3096 } 3097 3098 // Altivec has a variety of "shuffle immediates" that take two vector inputs 3099 // and produce a fixed permutation. If any of these match, do not lower to 3100 // VPERM. 3101 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) || 3102 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) || 3103 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 || 3104 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) || 3105 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) || 3106 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) || 3107 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) || 3108 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) || 3109 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false)) 3110 return Op; 3111 3112 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 3113 // perfect shuffle table to emit an optimal matching sequence. 3114 unsigned PFIndexes[4]; 3115 bool isFourElementShuffle = true; 3116 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 3117 unsigned EltNo = 8; // Start out undef. 3118 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 3119 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 3120 continue; // Undef, ignore it. 3121 3122 unsigned ByteSource = 3123 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 3124 if ((ByteSource & 3) != j) { 3125 isFourElementShuffle = false; 3126 break; 3127 } 3128 3129 if (EltNo == 8) { 3130 EltNo = ByteSource/4; 3131 } else if (EltNo != ByteSource/4) { 3132 isFourElementShuffle = false; 3133 break; 3134 } 3135 } 3136 PFIndexes[i] = EltNo; 3137 } 3138 3139 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 3140 // perfect shuffle vector to determine if it is cost effective to do this as 3141 // discrete instructions, or whether we should use a vperm. 3142 if (isFourElementShuffle) { 3143 // Compute the index in the perfect shuffle table. 3144 unsigned PFTableIndex = 3145 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3146 3147 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3148 unsigned Cost = (PFEntry >> 30); 3149 3150 // Determining when to avoid vperm is tricky. Many things affect the cost 3151 // of vperm, particularly how many times the perm mask needs to be computed. 3152 // For example, if the perm mask can be hoisted out of a loop or is already 3153 // used (perhaps because there are multiple permutes with the same shuffle 3154 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 3155 // the loop requires an extra register. 3156 // 3157 // As a compromise, we only emit discrete instructions if the shuffle can be 3158 // generated in 3 or fewer operations. When we have loop information 3159 // available, if this block is within a loop, we should avoid using vperm 3160 // for 3-operation perms and use a constant pool load instead. 3161 if (Cost < 3) 3162 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 3163 } 3164 3165 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 3166 // vector that will get spilled to the constant pool. 3167 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 3168 3169 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 3170 // that it is in input element units, not in bytes. Convert now. 3171 MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType()); 3172 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8; 3173 3174 SmallVector<SDOperand, 16> ResultMask; 3175 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 3176 unsigned SrcElt; 3177 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 3178 SrcElt = 0; 3179 else 3180 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 3181 3182 for (unsigned j = 0; j != BytesPerElement; ++j) 3183 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 3184 MVT::i8)); 3185 } 3186 3187 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, 3188 &ResultMask[0], ResultMask.size()); 3189 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 3190} 3191 3192/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 3193/// altivec comparison. If it is, return true and fill in Opc/isDot with 3194/// information about the intrinsic. 3195static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, 3196 bool &isDot) { 3197 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 3198 CompareOpc = -1; 3199 isDot = false; 3200 switch (IntrinsicID) { 3201 default: return false; 3202 // Comparison predicates. 3203 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 3204 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 3205 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 3206 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 3207 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 3208 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 3209 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 3210 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 3211 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 3212 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 3213 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 3214 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 3215 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 3216 3217 // Normal Comparisons. 3218 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 3219 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 3220 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 3221 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 3222 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 3223 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 3224 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 3225 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 3226 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 3227 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 3228 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 3229 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 3230 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 3231 } 3232 return true; 3233} 3234 3235/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 3236/// lower, do it, otherwise return null. 3237SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, 3238 SelectionDAG &DAG) { 3239 // If this is a lowered altivec predicate compare, CompareOpc is set to the 3240 // opcode number of the comparison. 3241 int CompareOpc; 3242 bool isDot; 3243 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 3244 return SDOperand(); // Don't custom lower most intrinsics. 3245 3246 // If this is a non-dot comparison, make the VCMP node and we are done. 3247 if (!isDot) { 3248 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 3249 Op.getOperand(1), Op.getOperand(2), 3250 DAG.getConstant(CompareOpc, MVT::i32)); 3251 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 3252 } 3253 3254 // Create the PPCISD altivec 'dot' comparison node. 3255 SDOperand Ops[] = { 3256 Op.getOperand(2), // LHS 3257 Op.getOperand(3), // RHS 3258 DAG.getConstant(CompareOpc, MVT::i32) 3259 }; 3260 std::vector<MVT::ValueType> VTs; 3261 VTs.push_back(Op.getOperand(2).getValueType()); 3262 VTs.push_back(MVT::Flag); 3263 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 3264 3265 // Now that we have the comparison, emit a copy from the CR to a GPR. 3266 // This is flagged to the above dot comparison. 3267 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 3268 DAG.getRegister(PPC::CR6, MVT::i32), 3269 CompNode.getValue(1)); 3270 3271 // Unpack the result based on how the target uses it. 3272 unsigned BitNo; // Bit # of CR6. 3273 bool InvertBit; // Invert result? 3274 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 3275 default: // Can't happen, don't crash on invalid number though. 3276 case 0: // Return the value of the EQ bit of CR6. 3277 BitNo = 0; InvertBit = false; 3278 break; 3279 case 1: // Return the inverted value of the EQ bit of CR6. 3280 BitNo = 0; InvertBit = true; 3281 break; 3282 case 2: // Return the value of the LT bit of CR6. 3283 BitNo = 2; InvertBit = false; 3284 break; 3285 case 3: // Return the inverted value of the LT bit of CR6. 3286 BitNo = 2; InvertBit = true; 3287 break; 3288 } 3289 3290 // Shift the bit into the low position. 3291 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 3292 DAG.getConstant(8-(3-BitNo), MVT::i32)); 3293 // Isolate the bit. 3294 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 3295 DAG.getConstant(1, MVT::i32)); 3296 3297 // If we are supposed to, toggle the bit. 3298 if (InvertBit) 3299 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 3300 DAG.getConstant(1, MVT::i32)); 3301 return Flags; 3302} 3303 3304SDOperand PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, 3305 SelectionDAG &DAG) { 3306 // Create a stack slot that is 16-byte aligned. 3307 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 3308 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 3309 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3310 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 3311 3312 // Store the input value into Value#0 of the stack slot. 3313 SDOperand Store = DAG.getStore(DAG.getEntryNode(), 3314 Op.getOperand(0), FIdx, NULL, 0); 3315 // Load it out. 3316 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); 3317} 3318 3319SDOperand PPCTargetLowering::LowerMUL(SDOperand Op, SelectionDAG &DAG) { 3320 if (Op.getValueType() == MVT::v4i32) { 3321 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3322 3323 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 3324 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 3325 3326 SDOperand RHSSwap = // = vrlw RHS, 16 3327 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 3328 3329 // Shrinkify inputs to v8i16. 3330 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 3331 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 3332 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 3333 3334 // Low parts multiplied together, generating 32-bit results (we ignore the 3335 // top parts). 3336 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 3337 LHS, RHS, DAG, MVT::v4i32); 3338 3339 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 3340 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 3341 // Shift the high parts up 16 bits. 3342 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 3343 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 3344 } else if (Op.getValueType() == MVT::v8i16) { 3345 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3346 3347 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 3348 3349 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 3350 LHS, RHS, Zero, DAG); 3351 } else if (Op.getValueType() == MVT::v16i8) { 3352 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3353 3354 // Multiply the even 8-bit parts, producing 16-bit sums. 3355 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 3356 LHS, RHS, DAG, MVT::v8i16); 3357 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 3358 3359 // Multiply the odd 8-bit parts, producing 16-bit sums. 3360 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 3361 LHS, RHS, DAG, MVT::v8i16); 3362 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 3363 3364 // Merge the results together. 3365 SDOperand Ops[16]; 3366 for (unsigned i = 0; i != 8; ++i) { 3367 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); 3368 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); 3369 } 3370 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 3371 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 3372 } else { 3373 assert(0 && "Unknown mul to lower!"); 3374 abort(); 3375 } 3376} 3377 3378/// LowerOperation - Provide custom lowering hooks for some operations. 3379/// 3380SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 3381 switch (Op.getOpcode()) { 3382 default: assert(0 && "Wasn't expecting to be able to lower this!"); 3383 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3384 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 3385 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3386 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 3387 case ISD::SETCC: return LowerSETCC(Op, DAG); 3388 case ISD::VASTART: 3389 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 3390 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 3391 3392 case ISD::VAARG: 3393 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 3394 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 3395 3396 case ISD::FORMAL_ARGUMENTS: 3397 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, 3398 VarArgsStackOffset, VarArgsNumGPR, 3399 VarArgsNumFPR, PPCSubTarget); 3400 3401 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget, 3402 getTargetMachine()); 3403 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine()); 3404 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 3405 case ISD::DYNAMIC_STACKALLOC: 3406 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 3407 3408 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3409 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 3410 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 3411 case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG); 3412 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 3413 3414 // Lower 64-bit shifts. 3415 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 3416 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 3417 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 3418 3419 // Vector-related lowering. 3420 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 3421 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3422 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3423 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 3424 case ISD::MUL: return LowerMUL(Op, DAG); 3425 3426 // Frame & Return address. 3427 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3428 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3429 } 3430 return SDOperand(); 3431} 3432 3433SDNode *PPCTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { 3434 switch (N->getOpcode()) { 3435 default: assert(0 && "Wasn't expecting to be able to lower this!"); 3436 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(SDOperand(N, 0), DAG).Val; 3437 } 3438} 3439 3440 3441//===----------------------------------------------------------------------===// 3442// Other Lowering Code 3443//===----------------------------------------------------------------------===// 3444 3445MachineBasicBlock * 3446PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 3447 MachineBasicBlock *BB) { 3448 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3449 assert((MI->getOpcode() == PPC::SELECT_CC_I4 || 3450 MI->getOpcode() == PPC::SELECT_CC_I8 || 3451 MI->getOpcode() == PPC::SELECT_CC_F4 || 3452 MI->getOpcode() == PPC::SELECT_CC_F8 || 3453 MI->getOpcode() == PPC::SELECT_CC_VRRC) && 3454 "Unexpected instr type to insert"); 3455 3456 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 3457 // control-flow pattern. The incoming instruction knows the destination vreg 3458 // to set, the condition code register to branch on, the true/false values to 3459 // select between, and a branch opcode to use. 3460 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3461 ilist<MachineBasicBlock>::iterator It = BB; 3462 ++It; 3463 3464 // thisMBB: 3465 // ... 3466 // TrueVal = ... 3467 // cmpTY ccX, r1, r2 3468 // bCC copy1MBB 3469 // fallthrough --> copy0MBB 3470 MachineBasicBlock *thisMBB = BB; 3471 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 3472 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 3473 unsigned SelectPred = MI->getOperand(4).getImm(); 3474 BuildMI(BB, TII->get(PPC::BCC)) 3475 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 3476 MachineFunction *F = BB->getParent(); 3477 F->getBasicBlockList().insert(It, copy0MBB); 3478 F->getBasicBlockList().insert(It, sinkMBB); 3479 // Update machine-CFG edges by first adding all successors of the current 3480 // block to the new block which will contain the Phi node for the select. 3481 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 3482 e = BB->succ_end(); i != e; ++i) 3483 sinkMBB->addSuccessor(*i); 3484 // Next, remove all successors of the current block, and add the true 3485 // and fallthrough blocks as its successors. 3486 while(!BB->succ_empty()) 3487 BB->removeSuccessor(BB->succ_begin()); 3488 BB->addSuccessor(copy0MBB); 3489 BB->addSuccessor(sinkMBB); 3490 3491 // copy0MBB: 3492 // %FalseValue = ... 3493 // # fallthrough to sinkMBB 3494 BB = copy0MBB; 3495 3496 // Update machine-CFG edges 3497 BB->addSuccessor(sinkMBB); 3498 3499 // sinkMBB: 3500 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 3501 // ... 3502 BB = sinkMBB; 3503 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 3504 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 3505 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 3506 3507 delete MI; // The pseudo instruction is gone now. 3508 return BB; 3509} 3510 3511//===----------------------------------------------------------------------===// 3512// Target Optimization Hooks 3513//===----------------------------------------------------------------------===// 3514 3515SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, 3516 DAGCombinerInfo &DCI) const { 3517 TargetMachine &TM = getTargetMachine(); 3518 SelectionDAG &DAG = DCI.DAG; 3519 switch (N->getOpcode()) { 3520 default: break; 3521 case PPCISD::SHL: 3522 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3523 if (C->getValue() == 0) // 0 << V -> 0. 3524 return N->getOperand(0); 3525 } 3526 break; 3527 case PPCISD::SRL: 3528 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3529 if (C->getValue() == 0) // 0 >>u V -> 0. 3530 return N->getOperand(0); 3531 } 3532 break; 3533 case PPCISD::SRA: 3534 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 3535 if (C->getValue() == 0 || // 0 >>s V -> 0. 3536 C->isAllOnesValue()) // -1 >>s V -> -1. 3537 return N->getOperand(0); 3538 } 3539 break; 3540 3541 case ISD::SINT_TO_FP: 3542 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 3543 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 3544 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 3545 // We allow the src/dst to be either f32/f64, but the intermediate 3546 // type must be i64. 3547 if (N->getOperand(0).getValueType() == MVT::i64 && 3548 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 3549 SDOperand Val = N->getOperand(0).getOperand(0); 3550 if (Val.getValueType() == MVT::f32) { 3551 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 3552 DCI.AddToWorklist(Val.Val); 3553 } 3554 3555 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 3556 DCI.AddToWorklist(Val.Val); 3557 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 3558 DCI.AddToWorklist(Val.Val); 3559 if (N->getValueType(0) == MVT::f32) { 3560 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val, 3561 DAG.getIntPtrConstant(0)); 3562 DCI.AddToWorklist(Val.Val); 3563 } 3564 return Val; 3565 } else if (N->getOperand(0).getValueType() == MVT::i32) { 3566 // If the intermediate type is i32, we can avoid the load/store here 3567 // too. 3568 } 3569 } 3570 } 3571 break; 3572 case ISD::STORE: 3573 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 3574 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 3575 !cast<StoreSDNode>(N)->isTruncatingStore() && 3576 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 3577 N->getOperand(1).getValueType() == MVT::i32 && 3578 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 3579 SDOperand Val = N->getOperand(1).getOperand(0); 3580 if (Val.getValueType() == MVT::f32) { 3581 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 3582 DCI.AddToWorklist(Val.Val); 3583 } 3584 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 3585 DCI.AddToWorklist(Val.Val); 3586 3587 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 3588 N->getOperand(2), N->getOperand(3)); 3589 DCI.AddToWorklist(Val.Val); 3590 return Val; 3591 } 3592 3593 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 3594 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 3595 N->getOperand(1).Val->hasOneUse() && 3596 (N->getOperand(1).getValueType() == MVT::i32 || 3597 N->getOperand(1).getValueType() == MVT::i16)) { 3598 SDOperand BSwapOp = N->getOperand(1).getOperand(0); 3599 // Do an any-extend to 32-bits if this is a half-word input. 3600 if (BSwapOp.getValueType() == MVT::i16) 3601 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 3602 3603 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 3604 N->getOperand(2), N->getOperand(3), 3605 DAG.getValueType(N->getOperand(1).getValueType())); 3606 } 3607 break; 3608 case ISD::BSWAP: 3609 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 3610 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) && 3611 N->getOperand(0).hasOneUse() && 3612 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 3613 SDOperand Load = N->getOperand(0); 3614 LoadSDNode *LD = cast<LoadSDNode>(Load); 3615 // Create the byte-swapping load. 3616 std::vector<MVT::ValueType> VTs; 3617 VTs.push_back(MVT::i32); 3618 VTs.push_back(MVT::Other); 3619 SDOperand MO = DAG.getMemOperand(LD->getMemOperand()); 3620 SDOperand Ops[] = { 3621 LD->getChain(), // Chain 3622 LD->getBasePtr(), // Ptr 3623 MO, // MemOperand 3624 DAG.getValueType(N->getValueType(0)) // VT 3625 }; 3626 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); 3627 3628 // If this is an i16 load, insert the truncate. 3629 SDOperand ResVal = BSLoad; 3630 if (N->getValueType(0) == MVT::i16) 3631 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 3632 3633 // First, combine the bswap away. This makes the value produced by the 3634 // load dead. 3635 DCI.CombineTo(N, ResVal); 3636 3637 // Next, combine the load away, we give it a bogus result value but a real 3638 // chain result. The result value is dead because the bswap is dead. 3639 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); 3640 3641 // Return N so it doesn't get rechecked! 3642 return SDOperand(N, 0); 3643 } 3644 3645 break; 3646 case PPCISD::VCMP: { 3647 // If a VCMPo node already exists with exactly the same operands as this 3648 // node, use its result instead of this node (VCMPo computes both a CR6 and 3649 // a normal output). 3650 // 3651 if (!N->getOperand(0).hasOneUse() && 3652 !N->getOperand(1).hasOneUse() && 3653 !N->getOperand(2).hasOneUse()) { 3654 3655 // Scan all of the users of the LHS, looking for VCMPo's that match. 3656 SDNode *VCMPoNode = 0; 3657 3658 SDNode *LHSN = N->getOperand(0).Val; 3659 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 3660 UI != E; ++UI) 3661 if ((*UI).getUser()->getOpcode() == PPCISD::VCMPo && 3662 (*UI).getUser()->getOperand(1) == N->getOperand(1) && 3663 (*UI).getUser()->getOperand(2) == N->getOperand(2) && 3664 (*UI).getUser()->getOperand(0) == N->getOperand(0)) { 3665 VCMPoNode = UI->getUser(); 3666 break; 3667 } 3668 3669 // If there is no VCMPo node, or if the flag value has a single use, don't 3670 // transform this. 3671 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 3672 break; 3673 3674 // Look at the (necessarily single) use of the flag value. If it has a 3675 // chain, this transformation is more complex. Note that multiple things 3676 // could use the value result, which we should ignore. 3677 SDNode *FlagUser = 0; 3678 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 3679 FlagUser == 0; ++UI) { 3680 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 3681 SDNode *User = UI->getUser(); 3682 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 3683 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { 3684 FlagUser = User; 3685 break; 3686 } 3687 } 3688 } 3689 3690 // If the user is a MFCR instruction, we know this is safe. Otherwise we 3691 // give up for right now. 3692 if (FlagUser->getOpcode() == PPCISD::MFCR) 3693 return SDOperand(VCMPoNode, 0); 3694 } 3695 break; 3696 } 3697 case ISD::BR_CC: { 3698 // If this is a branch on an altivec predicate comparison, lower this so 3699 // that we don't have to do a MFCR: instead, branch directly on CR6. This 3700 // lowering is done pre-legalize, because the legalizer lowers the predicate 3701 // compare down to code that is difficult to reassemble. 3702 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 3703 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); 3704 int CompareOpc; 3705 bool isDot; 3706 3707 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 3708 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 3709 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 3710 assert(isDot && "Can't compare against a vector result!"); 3711 3712 // If this is a comparison against something other than 0/1, then we know 3713 // that the condition is never/always true. 3714 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 3715 if (Val != 0 && Val != 1) { 3716 if (CC == ISD::SETEQ) // Cond never true, remove branch. 3717 return N->getOperand(0); 3718 // Always !=, turn it into an unconditional branch. 3719 return DAG.getNode(ISD::BR, MVT::Other, 3720 N->getOperand(0), N->getOperand(4)); 3721 } 3722 3723 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 3724 3725 // Create the PPCISD altivec 'dot' comparison node. 3726 std::vector<MVT::ValueType> VTs; 3727 SDOperand Ops[] = { 3728 LHS.getOperand(2), // LHS of compare 3729 LHS.getOperand(3), // RHS of compare 3730 DAG.getConstant(CompareOpc, MVT::i32) 3731 }; 3732 VTs.push_back(LHS.getOperand(2).getValueType()); 3733 VTs.push_back(MVT::Flag); 3734 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 3735 3736 // Unpack the result based on how the target uses it. 3737 PPC::Predicate CompOpc; 3738 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 3739 default: // Can't happen, don't crash on invalid number though. 3740 case 0: // Branch on the value of the EQ bit of CR6. 3741 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 3742 break; 3743 case 1: // Branch on the inverted value of the EQ bit of CR6. 3744 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 3745 break; 3746 case 2: // Branch on the value of the LT bit of CR6. 3747 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 3748 break; 3749 case 3: // Branch on the inverted value of the LT bit of CR6. 3750 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 3751 break; 3752 } 3753 3754 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 3755 DAG.getConstant(CompOpc, MVT::i32), 3756 DAG.getRegister(PPC::CR6, MVT::i32), 3757 N->getOperand(4), CompNode.getValue(1)); 3758 } 3759 break; 3760 } 3761 } 3762 3763 return SDOperand(); 3764} 3765 3766//===----------------------------------------------------------------------===// 3767// Inline Assembly Support 3768//===----------------------------------------------------------------------===// 3769 3770void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 3771 const APInt &Mask, 3772 APInt &KnownZero, 3773 APInt &KnownOne, 3774 const SelectionDAG &DAG, 3775 unsigned Depth) const { 3776 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 3777 switch (Op.getOpcode()) { 3778 default: break; 3779 case PPCISD::LBRX: { 3780 // lhbrx is known to have the top bits cleared out. 3781 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 3782 KnownZero = 0xFFFF0000; 3783 break; 3784 } 3785 case ISD::INTRINSIC_WO_CHAIN: { 3786 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 3787 default: break; 3788 case Intrinsic::ppc_altivec_vcmpbfp_p: 3789 case Intrinsic::ppc_altivec_vcmpeqfp_p: 3790 case Intrinsic::ppc_altivec_vcmpequb_p: 3791 case Intrinsic::ppc_altivec_vcmpequh_p: 3792 case Intrinsic::ppc_altivec_vcmpequw_p: 3793 case Intrinsic::ppc_altivec_vcmpgefp_p: 3794 case Intrinsic::ppc_altivec_vcmpgtfp_p: 3795 case Intrinsic::ppc_altivec_vcmpgtsb_p: 3796 case Intrinsic::ppc_altivec_vcmpgtsh_p: 3797 case Intrinsic::ppc_altivec_vcmpgtsw_p: 3798 case Intrinsic::ppc_altivec_vcmpgtub_p: 3799 case Intrinsic::ppc_altivec_vcmpgtuh_p: 3800 case Intrinsic::ppc_altivec_vcmpgtuw_p: 3801 KnownZero = ~1U; // All bits but the low one are known to be zero. 3802 break; 3803 } 3804 } 3805 } 3806} 3807 3808 3809/// getConstraintType - Given a constraint, return the type of 3810/// constraint it is for this target. 3811PPCTargetLowering::ConstraintType 3812PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 3813 if (Constraint.size() == 1) { 3814 switch (Constraint[0]) { 3815 default: break; 3816 case 'b': 3817 case 'r': 3818 case 'f': 3819 case 'v': 3820 case 'y': 3821 return C_RegisterClass; 3822 } 3823 } 3824 return TargetLowering::getConstraintType(Constraint); 3825} 3826 3827std::pair<unsigned, const TargetRegisterClass*> 3828PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 3829 MVT::ValueType VT) const { 3830 if (Constraint.size() == 1) { 3831 // GCC RS6000 Constraint Letters 3832 switch (Constraint[0]) { 3833 case 'b': // R1-R31 3834 case 'r': // R0-R31 3835 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 3836 return std::make_pair(0U, PPC::G8RCRegisterClass); 3837 return std::make_pair(0U, PPC::GPRCRegisterClass); 3838 case 'f': 3839 if (VT == MVT::f32) 3840 return std::make_pair(0U, PPC::F4RCRegisterClass); 3841 else if (VT == MVT::f64) 3842 return std::make_pair(0U, PPC::F8RCRegisterClass); 3843 break; 3844 case 'v': 3845 return std::make_pair(0U, PPC::VRRCRegisterClass); 3846 case 'y': // crrc 3847 return std::make_pair(0U, PPC::CRRCRegisterClass); 3848 } 3849 } 3850 3851 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 3852} 3853 3854 3855/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 3856/// vector. If it is invalid, don't add anything to Ops. 3857void PPCTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, char Letter, 3858 std::vector<SDOperand>&Ops, 3859 SelectionDAG &DAG) { 3860 SDOperand Result(0,0); 3861 switch (Letter) { 3862 default: break; 3863 case 'I': 3864 case 'J': 3865 case 'K': 3866 case 'L': 3867 case 'M': 3868 case 'N': 3869 case 'O': 3870 case 'P': { 3871 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 3872 if (!CST) return; // Must be an immediate to match. 3873 unsigned Value = CST->getValue(); 3874 switch (Letter) { 3875 default: assert(0 && "Unknown constraint letter!"); 3876 case 'I': // "I" is a signed 16-bit constant. 3877 if ((short)Value == (int)Value) 3878 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3879 break; 3880 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 3881 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 3882 if ((short)Value == 0) 3883 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3884 break; 3885 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 3886 if ((Value >> 16) == 0) 3887 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3888 break; 3889 case 'M': // "M" is a constant that is greater than 31. 3890 if (Value > 31) 3891 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3892 break; 3893 case 'N': // "N" is a positive constant that is an exact power of two. 3894 if ((int)Value > 0 && isPowerOf2_32(Value)) 3895 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3896 break; 3897 case 'O': // "O" is the constant zero. 3898 if (Value == 0) 3899 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3900 break; 3901 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 3902 if ((short)-Value == (int)-Value) 3903 Result = DAG.getTargetConstant(Value, Op.getValueType()); 3904 break; 3905 } 3906 break; 3907 } 3908 } 3909 3910 if (Result.Val) { 3911 Ops.push_back(Result); 3912 return; 3913 } 3914 3915 // Handle standard constraint letters. 3916 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG); 3917} 3918 3919// isLegalAddressingMode - Return true if the addressing mode represented 3920// by AM is legal for this target, for a load/store of the specified type. 3921bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 3922 const Type *Ty) const { 3923 // FIXME: PPC does not allow r+i addressing modes for vectors! 3924 3925 // PPC allows a sign-extended 16-bit immediate field. 3926 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 3927 return false; 3928 3929 // No global is ever allowed as a base. 3930 if (AM.BaseGV) 3931 return false; 3932 3933 // PPC only support r+r, 3934 switch (AM.Scale) { 3935 case 0: // "r+i" or just "i", depending on HasBaseReg. 3936 break; 3937 case 1: 3938 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 3939 return false; 3940 // Otherwise we have r+r or r+i. 3941 break; 3942 case 2: 3943 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 3944 return false; 3945 // Allow 2*r as r+r. 3946 break; 3947 default: 3948 // No other scales are supported. 3949 return false; 3950 } 3951 3952 return true; 3953} 3954 3955/// isLegalAddressImmediate - Return true if the integer value can be used 3956/// as the offset of the target addressing mode for load / store of the 3957/// given type. 3958bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{ 3959 // PPC allows a sign-extended 16-bit immediate field. 3960 return (V > -(1 << 16) && V < (1 << 16)-1); 3961} 3962 3963bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 3964 return false; 3965} 3966 3967SDOperand PPCTargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { 3968 // Depths > 0 not supported yet! 3969 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 3970 return SDOperand(); 3971 3972 MachineFunction &MF = DAG.getMachineFunction(); 3973 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3974 int RAIdx = FuncInfo->getReturnAddrSaveIndex(); 3975 if (RAIdx == 0) { 3976 bool isPPC64 = PPCSubTarget.isPPC64(); 3977 int Offset = 3978 PPCFrameInfo::getReturnSaveOffset(isPPC64, PPCSubTarget.isMachoABI()); 3979 3980 // Set up a frame object for the return address. 3981 RAIdx = MF.getFrameInfo()->CreateFixedObject(isPPC64 ? 8 : 4, Offset); 3982 3983 // Remember it for next time. 3984 FuncInfo->setReturnAddrSaveIndex(RAIdx); 3985 3986 // Make sure the function really does not optimize away the store of the RA 3987 // to the stack. 3988 FuncInfo->setLRStoreRequired(); 3989 } 3990 3991 // Just load the return address off the stack. 3992 SDOperand RetAddrFI = DAG.getFrameIndex(RAIdx, getPointerTy()); 3993 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 3994} 3995 3996SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { 3997 // Depths > 0 not supported yet! 3998 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 3999 return SDOperand(); 4000 4001 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4002 bool isPPC64 = PtrVT == MVT::i64; 4003 4004 MachineFunction &MF = DAG.getMachineFunction(); 4005 MachineFrameInfo *MFI = MF.getFrameInfo(); 4006 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 4007 && MFI->getStackSize(); 4008 4009 if (isPPC64) 4010 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1, 4011 MVT::i64); 4012 else 4013 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1, 4014 MVT::i32); 4015} 4016