PPCISelLowering.cpp revision 209a4099f96f0dfd340fab4ffaf2c2d8fc15aad6
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPredicates.h" 17#include "PPCTargetMachine.h" 18#include "PPCPerfectShuffle.h" 19#include "llvm/ADT/STLExtras.h" 20#include "llvm/ADT/VectorExtras.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/PseudoSourceValue.h" 27#include "llvm/CodeGen/SelectionDAG.h" 28#include "llvm/CallingConv.h" 29#include "llvm/Constants.h" 30#include "llvm/Function.h" 31#include "llvm/Intrinsics.h" 32#include "llvm/ParameterAttributes.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Target/TargetOptions.h" 35#include "llvm/Support/CommandLine.h" 36using namespace llvm; 37 38static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 39cl::desc("enable preincrement load/store generation on PPC (experimental)"), 40 cl::Hidden); 41 42PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 43 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) { 44 45 setPow2DivIsCheap(); 46 47 // Use _setjmp/_longjmp instead of setjmp/longjmp. 48 setUseUnderscoreSetJmp(true); 49 setUseUnderscoreLongJmp(true); 50 51 // Set up the register classes. 52 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 53 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 54 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 55 56 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 57 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote); 58 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand); 59 60 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 61 62 // PowerPC has pre-inc load and store's. 63 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 64 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 65 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 66 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 67 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 68 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 69 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 70 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 71 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 72 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 73 74 // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg) 75 setConvertAction(MVT::ppcf128, MVT::f64, Expand); 76 setConvertAction(MVT::ppcf128, MVT::f32, Expand); 77 // This is used in the ppcf128->int sequence. Note it has different semantics 78 // from FP_ROUND: that rounds to nearest, this rounds to zero. 79 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 80 81 // PowerPC has no SREM/UREM instructions 82 setOperationAction(ISD::SREM, MVT::i32, Expand); 83 setOperationAction(ISD::UREM, MVT::i32, Expand); 84 setOperationAction(ISD::SREM, MVT::i64, Expand); 85 setOperationAction(ISD::UREM, MVT::i64, Expand); 86 87 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 88 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 89 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 90 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 91 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 92 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 93 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 94 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 95 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 96 97 // We don't support sin/cos/sqrt/fmod/pow 98 setOperationAction(ISD::FSIN , MVT::f64, Expand); 99 setOperationAction(ISD::FCOS , MVT::f64, Expand); 100 setOperationAction(ISD::FREM , MVT::f64, Expand); 101 setOperationAction(ISD::FPOW , MVT::f64, Expand); 102 setOperationAction(ISD::FLOG , MVT::f64, Expand); 103 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 104 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 105 setOperationAction(ISD::FEXP ,MVT::f64, Expand); 106 setOperationAction(ISD::FEXP2 ,MVT::f64, Expand); 107 setOperationAction(ISD::FSIN , MVT::f32, Expand); 108 setOperationAction(ISD::FCOS , MVT::f32, Expand); 109 setOperationAction(ISD::FREM , MVT::f32, Expand); 110 setOperationAction(ISD::FPOW , MVT::f32, Expand); 111 setOperationAction(ISD::FLOG , MVT::f32, Expand); 112 setOperationAction(ISD::FLOG2 ,MVT::f32, Expand); 113 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 114 setOperationAction(ISD::FEXP ,MVT::f32, Expand); 115 setOperationAction(ISD::FEXP2 ,MVT::f32, Expand); 116 117 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 118 119 // If we're enabling GP optimizations, use hardware square root 120 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 121 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 122 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 123 } 124 125 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 126 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 127 128 // PowerPC does not have BSWAP, CTPOP or CTTZ 129 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 130 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 131 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 132 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 133 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 134 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 135 136 // PowerPC does not have ROTR 137 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 138 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 139 140 // PowerPC does not have Select 141 setOperationAction(ISD::SELECT, MVT::i32, Expand); 142 setOperationAction(ISD::SELECT, MVT::i64, Expand); 143 setOperationAction(ISD::SELECT, MVT::f32, Expand); 144 setOperationAction(ISD::SELECT, MVT::f64, Expand); 145 146 // PowerPC wants to turn select_cc of FP into fsel when possible. 147 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 148 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 149 150 // PowerPC wants to optimize integer setcc a bit 151 setOperationAction(ISD::SETCC, MVT::i32, Custom); 152 153 // PowerPC does not have BRCOND which requires SetCC 154 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 155 156 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 157 158 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 159 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 160 161 // PowerPC does not have [U|S]INT_TO_FP 162 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 163 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 164 165 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 166 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 167 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 168 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 169 170 // We cannot sextinreg(i1). Expand to shifts. 171 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 172 173 // Support label based line numbers. 174 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); 175 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 176 177 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 178 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 179 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 180 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 181 182 183 // We want to legalize GlobalAddress and ConstantPool nodes into the 184 // appropriate instructions to materialize the address. 185 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 186 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 187 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 188 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 189 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 190 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 191 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 192 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 193 194 // RET must be custom lowered, to meet ABI requirements. 195 setOperationAction(ISD::RET , MVT::Other, Custom); 196 197 // TRAP is legal. 198 setOperationAction(ISD::TRAP, MVT::Other, Legal); 199 200 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 201 setOperationAction(ISD::VASTART , MVT::Other, Custom); 202 203 // VAARG is custom lowered with ELF 32 ABI 204 if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI()) 205 setOperationAction(ISD::VAARG, MVT::Other, Custom); 206 else 207 setOperationAction(ISD::VAARG, MVT::Other, Expand); 208 209 // Use the default implementation. 210 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 211 setOperationAction(ISD::VAEND , MVT::Other, Expand); 212 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 213 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 214 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 215 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 216 217 // We want to custom lower some of our intrinsics. 218 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 219 220 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 221 // They also have instructions for converting between i64 and fp. 222 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 223 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 224 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 225 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 226 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 227 228 // FIXME: disable this lowered code. This generates 64-bit register values, 229 // and we don't model the fact that the top part is clobbered by calls. We 230 // need to flag these together so that the value isn't live across a call. 231 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 232 233 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT 234 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); 235 } else { 236 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 237 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 238 } 239 240 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 241 // 64-bit PowerPC implementations can support i64 types directly 242 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 243 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 244 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 245 // 64-bit PowerPC wants to expand i128 shifts itself. 246 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 247 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 248 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 249 } else { 250 // 32-bit PowerPC wants to expand i64 shifts itself. 251 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 252 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 253 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 254 } 255 256 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 257 // First set operation action for all vector types to expand. Then we 258 // will selectively turn on ones that can be effectively codegen'd. 259 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 260 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 261 MVT VT = (MVT::SimpleValueType)i; 262 263 // add/sub are legal for all supported vector VT's. 264 setOperationAction(ISD::ADD , VT, Legal); 265 setOperationAction(ISD::SUB , VT, Legal); 266 267 // We promote all shuffles to v16i8. 268 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 269 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 270 271 // We promote all non-typed operations to v4i32. 272 setOperationAction(ISD::AND , VT, Promote); 273 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 274 setOperationAction(ISD::OR , VT, Promote); 275 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 276 setOperationAction(ISD::XOR , VT, Promote); 277 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 278 setOperationAction(ISD::LOAD , VT, Promote); 279 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 280 setOperationAction(ISD::SELECT, VT, Promote); 281 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 282 setOperationAction(ISD::STORE, VT, Promote); 283 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 284 285 // No other operations are legal. 286 setOperationAction(ISD::MUL , VT, Expand); 287 setOperationAction(ISD::SDIV, VT, Expand); 288 setOperationAction(ISD::SREM, VT, Expand); 289 setOperationAction(ISD::UDIV, VT, Expand); 290 setOperationAction(ISD::UREM, VT, Expand); 291 setOperationAction(ISD::FDIV, VT, Expand); 292 setOperationAction(ISD::FNEG, VT, Expand); 293 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 294 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 295 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 296 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 297 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 298 setOperationAction(ISD::UDIVREM, VT, Expand); 299 setOperationAction(ISD::SDIVREM, VT, Expand); 300 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 301 setOperationAction(ISD::FPOW, VT, Expand); 302 setOperationAction(ISD::CTPOP, VT, Expand); 303 setOperationAction(ISD::CTLZ, VT, Expand); 304 setOperationAction(ISD::CTTZ, VT, Expand); 305 } 306 307 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 308 // with merges, splats, etc. 309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 310 311 setOperationAction(ISD::AND , MVT::v4i32, Legal); 312 setOperationAction(ISD::OR , MVT::v4i32, Legal); 313 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 314 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 315 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 316 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 317 318 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 319 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 320 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 321 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 322 323 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 324 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 325 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 326 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 327 328 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 329 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 330 331 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 332 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 333 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 334 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 335 } 336 337 setShiftAmountType(MVT::i32); 338 setSetCCResultContents(ZeroOrOneSetCCResult); 339 340 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 341 setStackPointerRegisterToSaveRestore(PPC::X1); 342 setExceptionPointerRegister(PPC::X3); 343 setExceptionSelectorRegister(PPC::X4); 344 } else { 345 setStackPointerRegisterToSaveRestore(PPC::R1); 346 setExceptionPointerRegister(PPC::R3); 347 setExceptionSelectorRegister(PPC::R4); 348 } 349 350 // We have target-specific dag combine patterns for the following nodes: 351 setTargetDAGCombine(ISD::SINT_TO_FP); 352 setTargetDAGCombine(ISD::STORE); 353 setTargetDAGCombine(ISD::BR_CC); 354 setTargetDAGCombine(ISD::BSWAP); 355 356 // Darwin long double math library functions have $LDBL128 appended. 357 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) { 358 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 359 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 360 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 361 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 362 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 363 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 364 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 365 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 366 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 367 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 368 } 369 370 computeRegisterProperties(); 371} 372 373/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 374/// function arguments in the caller parameter area. 375unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const { 376 TargetMachine &TM = getTargetMachine(); 377 // Darwin passes everything on 4 byte boundary. 378 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 379 return 4; 380 // FIXME Elf TBD 381 return 4; 382} 383 384const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 385 switch (Opcode) { 386 default: return 0; 387 case PPCISD::FSEL: return "PPCISD::FSEL"; 388 case PPCISD::FCFID: return "PPCISD::FCFID"; 389 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 390 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 391 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 392 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 393 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 394 case PPCISD::VPERM: return "PPCISD::VPERM"; 395 case PPCISD::Hi: return "PPCISD::Hi"; 396 case PPCISD::Lo: return "PPCISD::Lo"; 397 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 398 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 399 case PPCISD::SRL: return "PPCISD::SRL"; 400 case PPCISD::SRA: return "PPCISD::SRA"; 401 case PPCISD::SHL: return "PPCISD::SHL"; 402 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 403 case PPCISD::STD_32: return "PPCISD::STD_32"; 404 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF"; 405 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho"; 406 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 407 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho"; 408 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF"; 409 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 410 case PPCISD::MFCR: return "PPCISD::MFCR"; 411 case PPCISD::VCMP: return "PPCISD::VCMP"; 412 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 413 case PPCISD::LBRX: return "PPCISD::LBRX"; 414 case PPCISD::STBRX: return "PPCISD::STBRX"; 415 case PPCISD::LARX: return "PPCISD::LARX"; 416 case PPCISD::STCX: return "PPCISD::STCX"; 417 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 418 case PPCISD::MFFS: return "PPCISD::MFFS"; 419 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 420 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 421 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 422 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 423 case PPCISD::TAILCALL: return "PPCISD::TAILCALL"; 424 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 425 } 426} 427 428 429MVT PPCTargetLowering::getSetCCResultType(const SDValue &) const { 430 return MVT::i32; 431} 432 433 434//===----------------------------------------------------------------------===// 435// Node matching predicates, for use by the tblgen matching code. 436//===----------------------------------------------------------------------===// 437 438/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 439static bool isFloatingPointZero(SDValue Op) { 440 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 441 return CFP->getValueAPF().isZero(); 442 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 443 // Maybe this has already been legalized into the constant pool? 444 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 445 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 446 return CFP->getValueAPF().isZero(); 447 } 448 return false; 449} 450 451/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 452/// true if Op is undef or if it matches the specified value. 453static bool isConstantOrUndef(SDValue Op, unsigned Val) { 454 return Op.getOpcode() == ISD::UNDEF || 455 cast<ConstantSDNode>(Op)->getValue() == Val; 456} 457 458/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 459/// VPKUHUM instruction. 460bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) { 461 if (!isUnary) { 462 for (unsigned i = 0; i != 16; ++i) 463 if (!isConstantOrUndef(N->getOperand(i), i*2+1)) 464 return false; 465 } else { 466 for (unsigned i = 0; i != 8; ++i) 467 if (!isConstantOrUndef(N->getOperand(i), i*2+1) || 468 !isConstantOrUndef(N->getOperand(i+8), i*2+1)) 469 return false; 470 } 471 return true; 472} 473 474/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 475/// VPKUWUM instruction. 476bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) { 477 if (!isUnary) { 478 for (unsigned i = 0; i != 16; i += 2) 479 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 480 !isConstantOrUndef(N->getOperand(i+1), i*2+3)) 481 return false; 482 } else { 483 for (unsigned i = 0; i != 8; i += 2) 484 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) || 485 !isConstantOrUndef(N->getOperand(i+1), i*2+3) || 486 !isConstantOrUndef(N->getOperand(i+8), i*2+2) || 487 !isConstantOrUndef(N->getOperand(i+9), i*2+3)) 488 return false; 489 } 490 return true; 491} 492 493/// isVMerge - Common function, used to match vmrg* shuffles. 494/// 495static bool isVMerge(SDNode *N, unsigned UnitSize, 496 unsigned LHSStart, unsigned RHSStart) { 497 assert(N->getOpcode() == ISD::BUILD_VECTOR && 498 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 499 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 500 "Unsupported merge size!"); 501 502 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 503 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 504 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j), 505 LHSStart+j+i*UnitSize) || 506 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j), 507 RHSStart+j+i*UnitSize)) 508 return false; 509 } 510 return true; 511} 512 513/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 514/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 515bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 516 if (!isUnary) 517 return isVMerge(N, UnitSize, 8, 24); 518 return isVMerge(N, UnitSize, 8, 8); 519} 520 521/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 522/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 523bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) { 524 if (!isUnary) 525 return isVMerge(N, UnitSize, 0, 16); 526 return isVMerge(N, UnitSize, 0, 0); 527} 528 529 530/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 531/// amount, otherwise return -1. 532int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 533 assert(N->getOpcode() == ISD::BUILD_VECTOR && 534 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!"); 535 // Find the first non-undef value in the shuffle mask. 536 unsigned i; 537 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i) 538 /*search*/; 539 540 if (i == 16) return -1; // all undef. 541 542 // Otherwise, check to see if the rest of the elements are consequtively 543 // numbered from this value. 544 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue(); 545 if (ShiftAmt < i) return -1; 546 ShiftAmt -= i; 547 548 if (!isUnary) { 549 // Check the rest of the elements to see if they are consequtive. 550 for (++i; i != 16; ++i) 551 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i)) 552 return -1; 553 } else { 554 // Check the rest of the elements to see if they are consequtive. 555 for (++i; i != 16; ++i) 556 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15)) 557 return -1; 558 } 559 560 return ShiftAmt; 561} 562 563/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 564/// specifies a splat of a single element that is suitable for input to 565/// VSPLTB/VSPLTH/VSPLTW. 566bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { 567 assert(N->getOpcode() == ISD::BUILD_VECTOR && 568 N->getNumOperands() == 16 && 569 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 570 571 // This is a splat operation if each element of the permute is the same, and 572 // if the value doesn't reference the second vector. 573 unsigned ElementBase = 0; 574 SDValue Elt = N->getOperand(0); 575 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) 576 ElementBase = EltV->getValue(); 577 else 578 return false; // FIXME: Handle UNDEF elements too! 579 580 if (cast<ConstantSDNode>(Elt)->getValue() >= 16) 581 return false; 582 583 // Check that they are consequtive. 584 for (unsigned i = 1; i != EltSize; ++i) { 585 if (!isa<ConstantSDNode>(N->getOperand(i)) || 586 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase) 587 return false; 588 } 589 590 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!"); 591 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 592 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 593 assert(isa<ConstantSDNode>(N->getOperand(i)) && 594 "Invalid VECTOR_SHUFFLE mask!"); 595 for (unsigned j = 0; j != EltSize; ++j) 596 if (N->getOperand(i+j) != N->getOperand(j)) 597 return false; 598 } 599 600 return true; 601} 602 603/// isAllNegativeZeroVector - Returns true if all elements of build_vector 604/// are -0.0. 605bool PPC::isAllNegativeZeroVector(SDNode *N) { 606 assert(N->getOpcode() == ISD::BUILD_VECTOR); 607 if (PPC::isSplatShuffleMask(N, N->getNumOperands())) 608 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N)) 609 return CFP->getValueAPF().isNegZero(); 610 return false; 611} 612 613/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 614/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 615unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 616 assert(isSplatShuffleMask(N, EltSize)); 617 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize; 618} 619 620/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 621/// by using a vspltis[bhw] instruction of the specified element size, return 622/// the constant being splatted. The ByteSize field indicates the number of 623/// bytes of each element [124] -> [bhw]. 624SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 625 SDValue OpVal(0, 0); 626 627 // If ByteSize of the splat is bigger than the element size of the 628 // build_vector, then we have a case where we are checking for a splat where 629 // multiple elements of the buildvector are folded together into a single 630 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 631 unsigned EltSize = 16/N->getNumOperands(); 632 if (EltSize < ByteSize) { 633 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 634 SDValue UniquedVals[4]; 635 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 636 637 // See if all of the elements in the buildvector agree across. 638 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 639 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 640 // If the element isn't a constant, bail fully out. 641 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 642 643 644 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 645 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 646 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 647 return SDValue(); // no match. 648 } 649 650 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 651 // either constant or undef values that are identical for each chunk. See 652 // if these chunks can form into a larger vspltis*. 653 654 // Check to see if all of the leading entries are either 0 or -1. If 655 // neither, then this won't fit into the immediate field. 656 bool LeadingZero = true; 657 bool LeadingOnes = true; 658 for (unsigned i = 0; i != Multiple-1; ++i) { 659 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 660 661 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 662 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 663 } 664 // Finally, check the least significant entry. 665 if (LeadingZero) { 666 if (UniquedVals[Multiple-1].getNode() == 0) 667 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 668 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue(); 669 if (Val < 16) 670 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 671 } 672 if (LeadingOnes) { 673 if (UniquedVals[Multiple-1].getNode() == 0) 674 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 675 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended(); 676 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 677 return DAG.getTargetConstant(Val, MVT::i32); 678 } 679 680 return SDValue(); 681 } 682 683 // Check to see if this buildvec has a single non-undef value in its elements. 684 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 685 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 686 if (OpVal.getNode() == 0) 687 OpVal = N->getOperand(i); 688 else if (OpVal != N->getOperand(i)) 689 return SDValue(); 690 } 691 692 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 693 694 unsigned ValSizeInBytes = 0; 695 uint64_t Value = 0; 696 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 697 Value = CN->getValue(); 698 ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8; 699 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 700 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 701 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 702 ValSizeInBytes = 4; 703 } 704 705 // If the splat value is larger than the element value, then we can never do 706 // this splat. The only case that we could fit the replicated bits into our 707 // immediate field for would be zero, and we prefer to use vxor for it. 708 if (ValSizeInBytes < ByteSize) return SDValue(); 709 710 // If the element value is larger than the splat value, cut it in half and 711 // check to see if the two halves are equal. Continue doing this until we 712 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 713 while (ValSizeInBytes > ByteSize) { 714 ValSizeInBytes >>= 1; 715 716 // If the top half equals the bottom half, we're still ok. 717 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 718 (Value & ((1 << (8*ValSizeInBytes))-1))) 719 return SDValue(); 720 } 721 722 // Properly sign extend the value. 723 int ShAmt = (4-ByteSize)*8; 724 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 725 726 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 727 if (MaskVal == 0) return SDValue(); 728 729 // Finally, if this value fits in a 5 bit sext field, return it 730 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 731 return DAG.getTargetConstant(MaskVal, MVT::i32); 732 return SDValue(); 733} 734 735//===----------------------------------------------------------------------===// 736// Addressing Mode Selection 737//===----------------------------------------------------------------------===// 738 739/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 740/// or 64-bit immediate, and if the value can be accurately represented as a 741/// sign extension from a 16-bit value. If so, this returns true and the 742/// immediate. 743static bool isIntS16Immediate(SDNode *N, short &Imm) { 744 if (N->getOpcode() != ISD::Constant) 745 return false; 746 747 Imm = (short)cast<ConstantSDNode>(N)->getValue(); 748 if (N->getValueType(0) == MVT::i32) 749 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue(); 750 else 751 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); 752} 753static bool isIntS16Immediate(SDValue Op, short &Imm) { 754 return isIntS16Immediate(Op.getNode(), Imm); 755} 756 757 758/// SelectAddressRegReg - Given the specified addressed, check to see if it 759/// can be represented as an indexed [r+r] operation. Returns false if it 760/// can be more efficiently represented with [r+imm]. 761bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 762 SDValue &Index, 763 SelectionDAG &DAG) { 764 short imm = 0; 765 if (N.getOpcode() == ISD::ADD) { 766 if (isIntS16Immediate(N.getOperand(1), imm)) 767 return false; // r+i 768 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 769 return false; // r+i 770 771 Base = N.getOperand(0); 772 Index = N.getOperand(1); 773 return true; 774 } else if (N.getOpcode() == ISD::OR) { 775 if (isIntS16Immediate(N.getOperand(1), imm)) 776 return false; // r+i can fold it if we can. 777 778 // If this is an or of disjoint bitfields, we can codegen this as an add 779 // (for better address arithmetic) if the LHS and RHS of the OR are provably 780 // disjoint. 781 APInt LHSKnownZero, LHSKnownOne; 782 APInt RHSKnownZero, RHSKnownOne; 783 DAG.ComputeMaskedBits(N.getOperand(0), 784 APInt::getAllOnesValue(N.getOperand(0) 785 .getValueSizeInBits()), 786 LHSKnownZero, LHSKnownOne); 787 788 if (LHSKnownZero.getBoolValue()) { 789 DAG.ComputeMaskedBits(N.getOperand(1), 790 APInt::getAllOnesValue(N.getOperand(1) 791 .getValueSizeInBits()), 792 RHSKnownZero, RHSKnownOne); 793 // If all of the bits are known zero on the LHS or RHS, the add won't 794 // carry. 795 if (~(LHSKnownZero | RHSKnownZero) == 0) { 796 Base = N.getOperand(0); 797 Index = N.getOperand(1); 798 return true; 799 } 800 } 801 } 802 803 return false; 804} 805 806/// Returns true if the address N can be represented by a base register plus 807/// a signed 16-bit displacement [r+imm], and if it is not better 808/// represented as reg+reg. 809bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 810 SDValue &Base, SelectionDAG &DAG){ 811 // If this can be more profitably realized as r+r, fail. 812 if (SelectAddressRegReg(N, Disp, Base, DAG)) 813 return false; 814 815 if (N.getOpcode() == ISD::ADD) { 816 short imm = 0; 817 if (isIntS16Immediate(N.getOperand(1), imm)) { 818 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 819 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 820 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 821 } else { 822 Base = N.getOperand(0); 823 } 824 return true; // [r+i] 825 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 826 // Match LOAD (ADD (X, Lo(G))). 827 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 828 && "Cannot handle constant offsets yet!"); 829 Disp = N.getOperand(1).getOperand(0); // The global address. 830 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 831 Disp.getOpcode() == ISD::TargetConstantPool || 832 Disp.getOpcode() == ISD::TargetJumpTable); 833 Base = N.getOperand(0); 834 return true; // [&g+r] 835 } 836 } else if (N.getOpcode() == ISD::OR) { 837 short imm = 0; 838 if (isIntS16Immediate(N.getOperand(1), imm)) { 839 // If this is an or of disjoint bitfields, we can codegen this as an add 840 // (for better address arithmetic) if the LHS and RHS of the OR are 841 // provably disjoint. 842 APInt LHSKnownZero, LHSKnownOne; 843 DAG.ComputeMaskedBits(N.getOperand(0), 844 APInt::getAllOnesValue(N.getOperand(0) 845 .getValueSizeInBits()), 846 LHSKnownZero, LHSKnownOne); 847 848 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 849 // If all of the bits are known zero on the LHS or RHS, the add won't 850 // carry. 851 Base = N.getOperand(0); 852 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 853 return true; 854 } 855 } 856 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 857 // Loading from a constant address. 858 859 // If this address fits entirely in a 16-bit sext immediate field, codegen 860 // this as "d, 0" 861 short Imm; 862 if (isIntS16Immediate(CN, Imm)) { 863 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 864 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 865 return true; 866 } 867 868 // Handle 32-bit sext immediates with LIS + addr mode. 869 if (CN->getValueType(0) == MVT::i32 || 870 (int64_t)CN->getValue() == (int)CN->getValue()) { 871 int Addr = (int)CN->getValue(); 872 873 // Otherwise, break this down into an LIS + disp. 874 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 875 876 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 877 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 878 Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 879 return true; 880 } 881 } 882 883 Disp = DAG.getTargetConstant(0, getPointerTy()); 884 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 885 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 886 else 887 Base = N; 888 return true; // [r+0] 889} 890 891/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 892/// represented as an indexed [r+r] operation. 893bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 894 SDValue &Index, 895 SelectionDAG &DAG) { 896 // Check to see if we can easily represent this as an [r+r] address. This 897 // will fail if it thinks that the address is more profitably represented as 898 // reg+imm, e.g. where imm = 0. 899 if (SelectAddressRegReg(N, Base, Index, DAG)) 900 return true; 901 902 // If the operand is an addition, always emit this as [r+r], since this is 903 // better (for code size, and execution, as the memop does the add for free) 904 // than emitting an explicit add. 905 if (N.getOpcode() == ISD::ADD) { 906 Base = N.getOperand(0); 907 Index = N.getOperand(1); 908 return true; 909 } 910 911 // Otherwise, do it the hard way, using R0 as the base register. 912 Base = DAG.getRegister(PPC::R0, N.getValueType()); 913 Index = N; 914 return true; 915} 916 917/// SelectAddressRegImmShift - Returns true if the address N can be 918/// represented by a base register plus a signed 14-bit displacement 919/// [r+imm*4]. Suitable for use by STD and friends. 920bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, 921 SDValue &Base, 922 SelectionDAG &DAG) { 923 // If this can be more profitably realized as r+r, fail. 924 if (SelectAddressRegReg(N, Disp, Base, DAG)) 925 return false; 926 927 if (N.getOpcode() == ISD::ADD) { 928 short imm = 0; 929 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 930 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 931 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 932 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 933 } else { 934 Base = N.getOperand(0); 935 } 936 return true; // [r+i] 937 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 938 // Match LOAD (ADD (X, Lo(G))). 939 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue() 940 && "Cannot handle constant offsets yet!"); 941 Disp = N.getOperand(1).getOperand(0); // The global address. 942 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 943 Disp.getOpcode() == ISD::TargetConstantPool || 944 Disp.getOpcode() == ISD::TargetJumpTable); 945 Base = N.getOperand(0); 946 return true; // [&g+r] 947 } 948 } else if (N.getOpcode() == ISD::OR) { 949 short imm = 0; 950 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 951 // If this is an or of disjoint bitfields, we can codegen this as an add 952 // (for better address arithmetic) if the LHS and RHS of the OR are 953 // provably disjoint. 954 APInt LHSKnownZero, LHSKnownOne; 955 DAG.ComputeMaskedBits(N.getOperand(0), 956 APInt::getAllOnesValue(N.getOperand(0) 957 .getValueSizeInBits()), 958 LHSKnownZero, LHSKnownOne); 959 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 960 // If all of the bits are known zero on the LHS or RHS, the add won't 961 // carry. 962 Base = N.getOperand(0); 963 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 964 return true; 965 } 966 } 967 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 968 // Loading from a constant address. Verify low two bits are clear. 969 if ((CN->getValue() & 3) == 0) { 970 // If this address fits entirely in a 14-bit sext immediate field, codegen 971 // this as "d, 0" 972 short Imm; 973 if (isIntS16Immediate(CN, Imm)) { 974 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 975 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 976 return true; 977 } 978 979 // Fold the low-part of 32-bit absolute addresses into addr mode. 980 if (CN->getValueType(0) == MVT::i32 || 981 (int64_t)CN->getValue() == (int)CN->getValue()) { 982 int Addr = (int)CN->getValue(); 983 984 // Otherwise, break this down into an LIS + disp. 985 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 986 987 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 988 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 989 Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); 990 return true; 991 } 992 } 993 } 994 995 Disp = DAG.getTargetConstant(0, getPointerTy()); 996 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 997 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 998 else 999 Base = N; 1000 return true; // [r+0] 1001} 1002 1003 1004/// getPreIndexedAddressParts - returns true by value, base pointer and 1005/// offset pointer and addressing mode by reference if the node's address 1006/// can be legally represented as pre-indexed load / store address. 1007bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1008 SDValue &Offset, 1009 ISD::MemIndexedMode &AM, 1010 SelectionDAG &DAG) { 1011 // Disabled by default for now. 1012 if (!EnablePPCPreinc) return false; 1013 1014 SDValue Ptr; 1015 MVT VT; 1016 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1017 Ptr = LD->getBasePtr(); 1018 VT = LD->getMemoryVT(); 1019 1020 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1021 ST = ST; 1022 Ptr = ST->getBasePtr(); 1023 VT = ST->getMemoryVT(); 1024 } else 1025 return false; 1026 1027 // PowerPC doesn't have preinc load/store instructions for vectors. 1028 if (VT.isVector()) 1029 return false; 1030 1031 // TODO: Check reg+reg first. 1032 1033 // LDU/STU use reg+imm*4, others use reg+imm. 1034 if (VT != MVT::i64) { 1035 // reg + imm 1036 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1037 return false; 1038 } else { 1039 // reg + imm * 4. 1040 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1041 return false; 1042 } 1043 1044 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1045 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1046 // sext i32 to i64 when addr mode is r+i. 1047 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1048 LD->getExtensionType() == ISD::SEXTLOAD && 1049 isa<ConstantSDNode>(Offset)) 1050 return false; 1051 } 1052 1053 AM = ISD::PRE_INC; 1054 return true; 1055} 1056 1057//===----------------------------------------------------------------------===// 1058// LowerOperation implementation 1059//===----------------------------------------------------------------------===// 1060 1061SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1062 SelectionDAG &DAG) { 1063 MVT PtrVT = Op.getValueType(); 1064 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1065 Constant *C = CP->getConstVal(); 1066 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 1067 SDValue Zero = DAG.getConstant(0, PtrVT); 1068 1069 const TargetMachine &TM = DAG.getTarget(); 1070 1071 SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); 1072 SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); 1073 1074 // If this is a non-darwin platform, we don't support non-static relo models 1075 // yet. 1076 if (TM.getRelocationModel() == Reloc::Static || 1077 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1078 // Generate non-pic code that has direct accesses to the constant pool. 1079 // The address of the global is just (hi(&g)+lo(&g)). 1080 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1081 } 1082 1083 if (TM.getRelocationModel() == Reloc::PIC_) { 1084 // With PIC, the first instruction is actually "GR+hi(&G)". 1085 Hi = DAG.getNode(ISD::ADD, PtrVT, 1086 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1087 } 1088 1089 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1090 return Lo; 1091} 1092 1093SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) { 1094 MVT PtrVT = Op.getValueType(); 1095 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1096 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1097 SDValue Zero = DAG.getConstant(0, PtrVT); 1098 1099 const TargetMachine &TM = DAG.getTarget(); 1100 1101 SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); 1102 SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); 1103 1104 // If this is a non-darwin platform, we don't support non-static relo models 1105 // yet. 1106 if (TM.getRelocationModel() == Reloc::Static || 1107 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1108 // Generate non-pic code that has direct accesses to the constant pool. 1109 // The address of the global is just (hi(&g)+lo(&g)). 1110 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1111 } 1112 1113 if (TM.getRelocationModel() == Reloc::PIC_) { 1114 // With PIC, the first instruction is actually "GR+hi(&G)". 1115 Hi = DAG.getNode(ISD::ADD, PtrVT, 1116 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1117 } 1118 1119 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1120 return Lo; 1121} 1122 1123SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1124 SelectionDAG &DAG) { 1125 assert(0 && "TLS not implemented for PPC."); 1126 return SDValue(); // Not reached 1127} 1128 1129SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1130 SelectionDAG &DAG) { 1131 MVT PtrVT = Op.getValueType(); 1132 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1133 GlobalValue *GV = GSDN->getGlobal(); 1134 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1135 // If it's a debug information descriptor, don't mess with it. 1136 if (DAG.isVerifiedDebugInfoDesc(Op)) 1137 return GA; 1138 SDValue Zero = DAG.getConstant(0, PtrVT); 1139 1140 const TargetMachine &TM = DAG.getTarget(); 1141 1142 SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); 1143 SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); 1144 1145 // If this is a non-darwin platform, we don't support non-static relo models 1146 // yet. 1147 if (TM.getRelocationModel() == Reloc::Static || 1148 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1149 // Generate non-pic code that has direct accesses to globals. 1150 // The address of the global is just (hi(&g)+lo(&g)). 1151 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1152 } 1153 1154 if (TM.getRelocationModel() == Reloc::PIC_) { 1155 // With PIC, the first instruction is actually "GR+hi(&G)". 1156 Hi = DAG.getNode(ISD::ADD, PtrVT, 1157 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi); 1158 } 1159 1160 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo); 1161 1162 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV)) 1163 return Lo; 1164 1165 // If the global is weak or external, we have to go through the lazy 1166 // resolution stub. 1167 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); 1168} 1169 1170SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) { 1171 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1172 1173 // If we're comparing for equality to zero, expose the fact that this is 1174 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1175 // fold the new nodes. 1176 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1177 if (C->isNullValue() && CC == ISD::SETEQ) { 1178 MVT VT = Op.getOperand(0).getValueType(); 1179 SDValue Zext = Op.getOperand(0); 1180 if (VT.bitsLT(MVT::i32)) { 1181 VT = MVT::i32; 1182 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); 1183 } 1184 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1185 SDValue Clz = DAG.getNode(ISD::CTLZ, VT, Zext); 1186 SDValue Scc = DAG.getNode(ISD::SRL, VT, Clz, 1187 DAG.getConstant(Log2b, MVT::i32)); 1188 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); 1189 } 1190 // Leave comparisons against 0 and -1 alone for now, since they're usually 1191 // optimized. FIXME: revisit this when we can custom lower all setcc 1192 // optimizations. 1193 if (C->isAllOnesValue() || C->isNullValue()) 1194 return SDValue(); 1195 } 1196 1197 // If we have an integer seteq/setne, turn it into a compare against zero 1198 // by xor'ing the rhs with the lhs, which is faster than setting a 1199 // condition register, reading it back out, and masking the correct bit. The 1200 // normal approach here uses sub to do this instead of xor. Using xor exposes 1201 // the result to other bit-twiddling opportunities. 1202 MVT LHSVT = Op.getOperand(0).getValueType(); 1203 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1204 MVT VT = Op.getValueType(); 1205 SDValue Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), 1206 Op.getOperand(1)); 1207 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); 1208 } 1209 return SDValue(); 1210} 1211 1212SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1213 int VarArgsFrameIndex, 1214 int VarArgsStackOffset, 1215 unsigned VarArgsNumGPR, 1216 unsigned VarArgsNumFPR, 1217 const PPCSubtarget &Subtarget) { 1218 1219 assert(0 && "VAARG in ELF32 ABI not implemented yet!"); 1220 return SDValue(); // Not reached 1221} 1222 1223SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1224 int VarArgsFrameIndex, 1225 int VarArgsStackOffset, 1226 unsigned VarArgsNumGPR, 1227 unsigned VarArgsNumFPR, 1228 const PPCSubtarget &Subtarget) { 1229 1230 if (Subtarget.isMachoABI()) { 1231 // vastart just stores the address of the VarArgsFrameIndex slot into the 1232 // memory location argument. 1233 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1234 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1235 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1236 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); 1237 } 1238 1239 // For ELF 32 ABI we follow the layout of the va_list struct. 1240 // We suppose the given va_list is already allocated. 1241 // 1242 // typedef struct { 1243 // char gpr; /* index into the array of 8 GPRs 1244 // * stored in the register save area 1245 // * gpr=0 corresponds to r3, 1246 // * gpr=1 to r4, etc. 1247 // */ 1248 // char fpr; /* index into the array of 8 FPRs 1249 // * stored in the register save area 1250 // * fpr=0 corresponds to f1, 1251 // * fpr=1 to f2, etc. 1252 // */ 1253 // char *overflow_arg_area; 1254 // /* location on stack that holds 1255 // * the next overflow argument 1256 // */ 1257 // char *reg_save_area; 1258 // /* where r3:r10 and f1:f8 (if saved) 1259 // * are stored 1260 // */ 1261 // } va_list[1]; 1262 1263 1264 SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); 1265 SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); 1266 1267 1268 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1269 1270 SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); 1271 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1272 1273 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1274 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1275 1276 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1277 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1278 1279 uint64_t FPROffset = 1; 1280 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1281 1282 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1283 1284 // Store first byte : number of int regs 1285 SDValue firstStore = DAG.getStore(Op.getOperand(0), ArgGPR, 1286 Op.getOperand(1), SV, 0); 1287 uint64_t nextOffset = FPROffset; 1288 SDValue nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1), 1289 ConstFPROffset); 1290 1291 // Store second byte : number of float regs 1292 SDValue secondStore = 1293 DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset); 1294 nextOffset += StackOffset; 1295 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset); 1296 1297 // Store second word : arguments given on stack 1298 SDValue thirdStore = 1299 DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset); 1300 nextOffset += FrameOffset; 1301 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset); 1302 1303 // Store third word : arguments given in registers 1304 return DAG.getStore(thirdStore, FR, nextPtr, SV, nextOffset); 1305 1306} 1307 1308#include "PPCGenCallingConv.inc" 1309 1310/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1311/// depending on which subtarget is selected. 1312static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { 1313 if (Subtarget.isMachoABI()) { 1314 static const unsigned FPR[] = { 1315 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1316 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1317 }; 1318 return FPR; 1319 } 1320 1321 1322 static const unsigned FPR[] = { 1323 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1324 PPC::F8 1325 }; 1326 return FPR; 1327} 1328 1329/// CalculateStackSlotSize - Calculates the size reserved for this argument on 1330/// the stack. 1331static unsigned CalculateStackSlotSize(SDValue Arg, SDValue Flag, 1332 bool isVarArg, unsigned PtrByteSize) { 1333 MVT ArgVT = Arg.getValueType(); 1334 ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags(); 1335 unsigned ArgSize =ArgVT.getSizeInBits()/8; 1336 if (Flags.isByVal()) 1337 ArgSize = Flags.getByValSize(); 1338 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1339 1340 return ArgSize; 1341} 1342 1343SDValue 1344PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, 1345 SelectionDAG &DAG, 1346 int &VarArgsFrameIndex, 1347 int &VarArgsStackOffset, 1348 unsigned &VarArgsNumGPR, 1349 unsigned &VarArgsNumFPR, 1350 const PPCSubtarget &Subtarget) { 1351 // TODO: add description of PPC stack frame format, or at least some docs. 1352 // 1353 MachineFunction &MF = DAG.getMachineFunction(); 1354 MachineFrameInfo *MFI = MF.getFrameInfo(); 1355 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1356 SmallVector<SDValue, 8> ArgValues; 1357 SDValue Root = Op.getOperand(0); 1358 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 1359 1360 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1361 bool isPPC64 = PtrVT == MVT::i64; 1362 bool isMachoABI = Subtarget.isMachoABI(); 1363 bool isELF32_ABI = Subtarget.isELF32_ABI(); 1364 // Potential tail calls could cause overwriting of argument stack slots. 1365 unsigned CC = MF.getFunction()->getCallingConv(); 1366 bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast)); 1367 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1368 1369 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1370 // Area that is at least reserved in caller of this function. 1371 unsigned MinReservedArea = ArgOffset; 1372 1373 static const unsigned GPR_32[] = { // 32-bit registers. 1374 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1375 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1376 }; 1377 static const unsigned GPR_64[] = { // 64-bit registers. 1378 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1379 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1380 }; 1381 1382 static const unsigned *FPR = GetFPR(Subtarget); 1383 1384 static const unsigned VR[] = { 1385 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1386 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1387 }; 1388 1389 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 1390 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8; 1391 const unsigned Num_VR_Regs = array_lengthof( VR); 1392 1393 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1394 1395 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1396 1397 // In 32-bit non-varargs functions, the stack space for vectors is after the 1398 // stack space for non-vectors. We do not use this space unless we have 1399 // too many vectors to fit in registers, something that only occurs in 1400 // constructed examples:), but we have to walk the arglist to figure 1401 // that out...for the pathological case, compute VecArgOffset as the 1402 // start of the vector parameter area. Computing VecArgOffset is the 1403 // entire point of the following loop. 1404 // Altivec is not mentioned in the ppc32 Elf Supplement, so I'm not trying 1405 // to handle Elf here. 1406 unsigned VecArgOffset = ArgOffset; 1407 if (!isVarArg && !isPPC64) { 1408 for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e; 1409 ++ArgNo) { 1410 MVT ObjectVT = Op.getValue(ArgNo).getValueType(); 1411 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1412 ISD::ArgFlagsTy Flags = 1413 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); 1414 1415 if (Flags.isByVal()) { 1416 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 1417 ObjSize = Flags.getByValSize(); 1418 unsigned ArgSize = 1419 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1420 VecArgOffset += ArgSize; 1421 continue; 1422 } 1423 1424 switch(ObjectVT.getSimpleVT()) { 1425 default: assert(0 && "Unhandled argument type!"); 1426 case MVT::i32: 1427 case MVT::f32: 1428 VecArgOffset += isPPC64 ? 8 : 4; 1429 break; 1430 case MVT::i64: // PPC64 1431 case MVT::f64: 1432 VecArgOffset += 8; 1433 break; 1434 case MVT::v4f32: 1435 case MVT::v4i32: 1436 case MVT::v8i16: 1437 case MVT::v16i8: 1438 // Nothing to do, we're only looking at Nonvector args here. 1439 break; 1440 } 1441 } 1442 } 1443 // We've found where the vector parameter area in memory is. Skip the 1444 // first 12 parameters; these don't use that memory. 1445 VecArgOffset = ((VecArgOffset+15)/16)*16; 1446 VecArgOffset += 12*16; 1447 1448 // Add DAG nodes to load the arguments or copy them out of registers. On 1449 // entry to a function on PPC, the arguments start after the linkage area, 1450 // although the first ones are often in registers. 1451 // 1452 // In the ELF 32 ABI, GPRs and stack are double word align: an argument 1453 // represented with two words (long long or double) must be copied to an 1454 // even GPR_idx value or to an even ArgOffset value. 1455 1456 SmallVector<SDValue, 8> MemOps; 1457 unsigned nAltivecParamsAtEnd = 0; 1458 for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues() - 1; 1459 ArgNo != e; ++ArgNo) { 1460 SDValue ArgVal; 1461 bool needsLoad = false; 1462 MVT ObjectVT = Op.getValue(ArgNo).getValueType(); 1463 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1464 unsigned ArgSize = ObjSize; 1465 ISD::ArgFlagsTy Flags = 1466 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); 1467 // See if next argument requires stack alignment in ELF 1468 bool Align = Flags.isSplit(); 1469 1470 unsigned CurArgOffset = ArgOffset; 1471 1472 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 1473 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 1474 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 1475 if (isVarArg || isPPC64) { 1476 MinReservedArea = ((MinReservedArea+15)/16)*16; 1477 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo), 1478 Op.getOperand(ArgNo+3), 1479 isVarArg, 1480 PtrByteSize); 1481 } else nAltivecParamsAtEnd++; 1482 } else 1483 // Calculate min reserved area. 1484 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo), 1485 Op.getOperand(ArgNo+3), 1486 isVarArg, 1487 PtrByteSize); 1488 1489 // FIXME alignment for ELF may not be right 1490 // FIXME the codegen can be much improved in some cases. 1491 // We do not have to keep everything in memory. 1492 if (Flags.isByVal()) { 1493 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 1494 ObjSize = Flags.getByValSize(); 1495 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1496 // Double word align in ELF 1497 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1498 // Objects of size 1 and 2 are right justified, everything else is 1499 // left justified. This means the memory address is adjusted forwards. 1500 if (ObjSize==1 || ObjSize==2) { 1501 CurArgOffset = CurArgOffset + (4 - ObjSize); 1502 } 1503 // The value of the object is its address. 1504 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); 1505 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1506 ArgValues.push_back(FIN); 1507 if (ObjSize==1 || ObjSize==2) { 1508 if (GPR_idx != Num_GPR_Regs) { 1509 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1510 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1511 SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1512 SDValue Store = DAG.getTruncStore(Val.getValue(1), Val, FIN, 1513 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 ); 1514 MemOps.push_back(Store); 1515 ++GPR_idx; 1516 if (isMachoABI) ArgOffset += PtrByteSize; 1517 } else { 1518 ArgOffset += PtrByteSize; 1519 } 1520 continue; 1521 } 1522 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 1523 // Store whatever pieces of the object are in registers 1524 // to memory. ArgVal will be address of the beginning of 1525 // the object. 1526 if (GPR_idx != Num_GPR_Regs) { 1527 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1528 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1529 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset); 1530 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1531 SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1532 SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1533 MemOps.push_back(Store); 1534 ++GPR_idx; 1535 if (isMachoABI) ArgOffset += PtrByteSize; 1536 } else { 1537 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 1538 break; 1539 } 1540 } 1541 continue; 1542 } 1543 1544 switch (ObjectVT.getSimpleVT()) { 1545 default: assert(0 && "Unhandled argument type!"); 1546 case MVT::i32: 1547 if (!isPPC64) { 1548 // Double word align in ELF 1549 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2); 1550 1551 if (GPR_idx != Num_GPR_Regs) { 1552 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1553 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1554 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32); 1555 ++GPR_idx; 1556 } else { 1557 needsLoad = true; 1558 ArgSize = PtrByteSize; 1559 } 1560 // Stack align in ELF 1561 if (needsLoad && Align && isELF32_ABI) 1562 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1563 // All int arguments reserve stack space in Macho ABI. 1564 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize; 1565 break; 1566 } 1567 // FALLTHROUGH 1568 case MVT::i64: // PPC64 1569 if (GPR_idx != Num_GPR_Regs) { 1570 unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 1571 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1572 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64); 1573 1574 if (ObjectVT == MVT::i32) { 1575 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 1576 // value to MVT::i64 and then truncate to the correct register size. 1577 if (Flags.isSExt()) 1578 ArgVal = DAG.getNode(ISD::AssertSext, MVT::i64, ArgVal, 1579 DAG.getValueType(ObjectVT)); 1580 else if (Flags.isZExt()) 1581 ArgVal = DAG.getNode(ISD::AssertZext, MVT::i64, ArgVal, 1582 DAG.getValueType(ObjectVT)); 1583 1584 ArgVal = DAG.getNode(ISD::TRUNCATE, MVT::i32, ArgVal); 1585 } 1586 1587 ++GPR_idx; 1588 } else { 1589 needsLoad = true; 1590 ArgSize = PtrByteSize; 1591 } 1592 // All int arguments reserve stack space in Macho ABI. 1593 if (isMachoABI || needsLoad) ArgOffset += 8; 1594 break; 1595 1596 case MVT::f32: 1597 case MVT::f64: 1598 // Every 4 bytes of argument space consumes one of the GPRs available for 1599 // argument passing. 1600 if (GPR_idx != Num_GPR_Regs && isMachoABI) { 1601 ++GPR_idx; 1602 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1603 ++GPR_idx; 1604 } 1605 if (FPR_idx != Num_FPR_Regs) { 1606 unsigned VReg; 1607 if (ObjectVT == MVT::f32) 1608 VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass); 1609 else 1610 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 1611 RegInfo.addLiveIn(FPR[FPR_idx], VReg); 1612 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1613 ++FPR_idx; 1614 } else { 1615 needsLoad = true; 1616 } 1617 1618 // Stack align in ELF 1619 if (needsLoad && Align && isELF32_ABI) 1620 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 1621 // All FP arguments reserve stack space in Macho ABI. 1622 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize; 1623 break; 1624 case MVT::v4f32: 1625 case MVT::v4i32: 1626 case MVT::v8i16: 1627 case MVT::v16i8: 1628 // Note that vector arguments in registers don't reserve stack space, 1629 // except in varargs functions. 1630 if (VR_idx != Num_VR_Regs) { 1631 unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass); 1632 RegInfo.addLiveIn(VR[VR_idx], VReg); 1633 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT); 1634 if (isVarArg) { 1635 while ((ArgOffset % 16) != 0) { 1636 ArgOffset += PtrByteSize; 1637 if (GPR_idx != Num_GPR_Regs) 1638 GPR_idx++; 1639 } 1640 ArgOffset += 16; 1641 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); 1642 } 1643 ++VR_idx; 1644 } else { 1645 if (!isVarArg && !isPPC64) { 1646 // Vectors go after all the nonvectors. 1647 CurArgOffset = VecArgOffset; 1648 VecArgOffset += 16; 1649 } else { 1650 // Vectors are aligned. 1651 ArgOffset = ((ArgOffset+15)/16)*16; 1652 CurArgOffset = ArgOffset; 1653 ArgOffset += 16; 1654 } 1655 needsLoad = true; 1656 } 1657 break; 1658 } 1659 1660 // We need to load the argument to a virtual register if we determined above 1661 // that we ran out of physical registers of the appropriate type. 1662 if (needsLoad) { 1663 int FI = MFI->CreateFixedObject(ObjSize, 1664 CurArgOffset + (ArgSize - ObjSize), 1665 isImmutable); 1666 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1667 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 1668 } 1669 1670 ArgValues.push_back(ArgVal); 1671 } 1672 1673 // Set the size that is at least reserved in caller of this function. Tail 1674 // call optimized function's reserved stack space needs to be aligned so that 1675 // taking the difference between two stack areas will result in an aligned 1676 // stack. 1677 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1678 // Add the Altivec parameters at the end, if needed. 1679 if (nAltivecParamsAtEnd) { 1680 MinReservedArea = ((MinReservedArea+15)/16)*16; 1681 MinReservedArea += 16*nAltivecParamsAtEnd; 1682 } 1683 MinReservedArea = 1684 std::max(MinReservedArea, 1685 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI)); 1686 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 1687 getStackAlignment(); 1688 unsigned AlignMask = TargetAlign-1; 1689 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 1690 FI->setMinReservedArea(MinReservedArea); 1691 1692 // If the function takes variable number of arguments, make a frame index for 1693 // the start of the first vararg value... for expansion of llvm.va_start. 1694 if (isVarArg) { 1695 1696 int depth; 1697 if (isELF32_ABI) { 1698 VarArgsNumGPR = GPR_idx; 1699 VarArgsNumFPR = FPR_idx; 1700 1701 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame 1702 // pointer. 1703 depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 + 1704 Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 + 1705 PtrVT.getSizeInBits()/8); 1706 1707 VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 1708 ArgOffset); 1709 1710 } 1711 else 1712 depth = ArgOffset; 1713 1714 VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 1715 depth); 1716 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1717 1718 // In ELF 32 ABI, the fixed integer arguments of a variadic function are 1719 // stored to the VarArgsFrameIndex on the stack. 1720 if (isELF32_ABI) { 1721 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) { 1722 SDValue Val = DAG.getRegister(GPR[GPR_idx], PtrVT); 1723 SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1724 MemOps.push_back(Store); 1725 // Increment the address by four for the next argument to store 1726 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1727 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1728 } 1729 } 1730 1731 // If this function is vararg, store any remaining integer argument regs 1732 // to their spots on the stack so that they may be loaded by deferencing the 1733 // result of va_next. 1734 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 1735 unsigned VReg; 1736 if (isPPC64) 1737 VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 1738 else 1739 VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 1740 1741 RegInfo.addLiveIn(GPR[GPR_idx], VReg); 1742 SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); 1743 SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1744 MemOps.push_back(Store); 1745 // Increment the address by four for the next argument to store 1746 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1747 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1748 } 1749 1750 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex 1751 // on the stack. 1752 if (isELF32_ABI) { 1753 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) { 1754 SDValue Val = DAG.getRegister(FPR[FPR_idx], MVT::f64); 1755 SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0); 1756 MemOps.push_back(Store); 1757 // Increment the address by eight for the next argument to store 1758 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, 1759 PtrVT); 1760 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1761 } 1762 1763 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) { 1764 unsigned VReg; 1765 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 1766 1767 RegInfo.addLiveIn(FPR[FPR_idx], VReg); 1768 SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::f64); 1769 SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 1770 MemOps.push_back(Store); 1771 // Increment the address by eight for the next argument to store 1772 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, 1773 PtrVT); 1774 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); 1775 } 1776 } 1777 } 1778 1779 if (!MemOps.empty()) 1780 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size()); 1781 1782 ArgValues.push_back(Root); 1783 1784 // Return the new list of results. 1785 return DAG.getMergeValues(Op.getNode()->getVTList(), &ArgValues[0], 1786 ArgValues.size()); 1787} 1788 1789/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus 1790/// linkage area. 1791static unsigned 1792CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 1793 bool isPPC64, 1794 bool isMachoABI, 1795 bool isVarArg, 1796 unsigned CC, 1797 SDValue Call, 1798 unsigned &nAltivecParamsAtEnd) { 1799 // Count how many bytes are to be pushed on the stack, including the linkage 1800 // area, and parameter passing area. We start with 24/48 bytes, which is 1801 // prereserved space for [SP][CR][LR][3 x unused]. 1802 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 1803 unsigned NumOps = (Call.getNumOperands() - 5) / 2; 1804 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1805 1806 // Add up all the space actually used. 1807 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 1808 // they all go in registers, but we must reserve stack space for them for 1809 // possible use by the caller. In varargs or 64-bit calls, parameters are 1810 // assigned stack space in order, with padding so Altivec parameters are 1811 // 16-byte aligned. 1812 nAltivecParamsAtEnd = 0; 1813 for (unsigned i = 0; i != NumOps; ++i) { 1814 SDValue Arg = Call.getOperand(5+2*i); 1815 SDValue Flag = Call.getOperand(5+2*i+1); 1816 MVT ArgVT = Arg.getValueType(); 1817 // Varargs Altivec parameters are padded to a 16 byte boundary. 1818 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 1819 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 1820 if (!isVarArg && !isPPC64) { 1821 // Non-varargs Altivec parameters go after all the non-Altivec 1822 // parameters; handle those later so we know how much padding we need. 1823 nAltivecParamsAtEnd++; 1824 continue; 1825 } 1826 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 1827 NumBytes = ((NumBytes+15)/16)*16; 1828 } 1829 NumBytes += CalculateStackSlotSize(Arg, Flag, isVarArg, PtrByteSize); 1830 } 1831 1832 // Allow for Altivec parameters at the end, if needed. 1833 if (nAltivecParamsAtEnd) { 1834 NumBytes = ((NumBytes+15)/16)*16; 1835 NumBytes += 16*nAltivecParamsAtEnd; 1836 } 1837 1838 // The prolog code of the callee may store up to 8 GPR argument registers to 1839 // the stack, allowing va_start to index over them in memory if its varargs. 1840 // Because we cannot tell if this is needed on the caller side, we have to 1841 // conservatively assume that it is needed. As such, make sure we have at 1842 // least enough stack space for the caller to store the 8 GPRs. 1843 NumBytes = std::max(NumBytes, 1844 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI)); 1845 1846 // Tail call needs the stack to be aligned. 1847 if (CC==CallingConv::Fast && PerformTailCallOpt) { 1848 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 1849 getStackAlignment(); 1850 unsigned AlignMask = TargetAlign-1; 1851 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 1852 } 1853 1854 return NumBytes; 1855} 1856 1857/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 1858/// adjusted to accomodate the arguments for the tailcall. 1859static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall, 1860 unsigned ParamSize) { 1861 1862 if (!IsTailCall) return 0; 1863 1864 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 1865 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 1866 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 1867 // Remember only if the new adjustement is bigger. 1868 if (SPDiff < FI->getTailCallSPDelta()) 1869 FI->setTailCallSPDelta(SPDiff); 1870 1871 return SPDiff; 1872} 1873 1874/// IsEligibleForTailCallElimination - Check to see whether the next instruction 1875/// following the call is a return. A function is eligible if caller/callee 1876/// calling conventions match, currently only fastcc supports tail calls, and 1877/// the function CALL is immediatly followed by a RET. 1878bool 1879PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Call, 1880 SDValue Ret, 1881 SelectionDAG& DAG) const { 1882 // Variable argument functions are not supported. 1883 if (!PerformTailCallOpt || 1884 cast<ConstantSDNode>(Call.getOperand(2))->getValue() != 0) return false; 1885 1886 if (CheckTailCallReturnConstraints(Call, Ret)) { 1887 MachineFunction &MF = DAG.getMachineFunction(); 1888 unsigned CallerCC = MF.getFunction()->getCallingConv(); 1889 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); 1890 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 1891 // Functions containing by val parameters are not supported. 1892 for (unsigned i = 0; i != ((Call.getNumOperands()-5)/2); i++) { 1893 ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Call.getOperand(5+2*i+1)) 1894 ->getArgFlags(); 1895 if (Flags.isByVal()) return false; 1896 } 1897 1898 SDValue Callee = Call.getOperand(4); 1899 // Non PIC/GOT tail calls are supported. 1900 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 1901 return true; 1902 1903 // At the moment we can only do local tail calls (in same module, hidden 1904 // or protected) if we are generating PIC. 1905 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1906 return G->getGlobal()->hasHiddenVisibility() 1907 || G->getGlobal()->hasProtectedVisibility(); 1908 } 1909 } 1910 1911 return false; 1912} 1913 1914/// isCallCompatibleAddress - Return the immediate to use if the specified 1915/// 32-bit value is representable in the immediate field of a BxA instruction. 1916static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 1917 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1918 if (!C) return 0; 1919 1920 int Addr = C->getValue(); 1921 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 1922 (Addr << 6 >> 6) != Addr) 1923 return 0; // Top 6 bits have to be sext of immediate. 1924 1925 return DAG.getConstant((int)C->getValue() >> 2, 1926 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 1927} 1928 1929namespace { 1930 1931struct TailCallArgumentInfo { 1932 SDValue Arg; 1933 SDValue FrameIdxOp; 1934 int FrameIdx; 1935 1936 TailCallArgumentInfo() : FrameIdx(0) {} 1937}; 1938 1939} 1940 1941/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 1942static void 1943StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 1944 SDValue Chain, 1945 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, 1946 SmallVector<SDValue, 8> &MemOpChains) { 1947 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 1948 SDValue Arg = TailCallArgs[i].Arg; 1949 SDValue FIN = TailCallArgs[i].FrameIdxOp; 1950 int FI = TailCallArgs[i].FrameIdx; 1951 // Store relative to framepointer. 1952 MemOpChains.push_back(DAG.getStore(Chain, Arg, FIN, 1953 PseudoSourceValue::getFixedStack(FI), 1954 0)); 1955 } 1956} 1957 1958/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 1959/// the appropriate stack slot for the tail call optimized function call. 1960static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 1961 MachineFunction &MF, 1962 SDValue Chain, 1963 SDValue OldRetAddr, 1964 SDValue OldFP, 1965 int SPDiff, 1966 bool isPPC64, 1967 bool isMachoABI) { 1968 if (SPDiff) { 1969 // Calculate the new stack slot for the return address. 1970 int SlotSize = isPPC64 ? 8 : 4; 1971 int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64, 1972 isMachoABI); 1973 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 1974 NewRetAddrLoc); 1975 int NewFPLoc = SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, 1976 isMachoABI); 1977 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); 1978 1979 MVT VT = isPPC64 ? MVT::i64 : MVT::i32; 1980 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 1981 Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx, 1982 PseudoSourceValue::getFixedStack(NewRetAddr), 0); 1983 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 1984 Chain = DAG.getStore(Chain, OldFP, NewFramePtrIdx, 1985 PseudoSourceValue::getFixedStack(NewFPIdx), 0); 1986 } 1987 return Chain; 1988} 1989 1990/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 1991/// the position of the argument. 1992static void 1993CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 1994 SDValue Arg, int SPDiff, unsigned ArgOffset, 1995 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 1996 int Offset = ArgOffset + SPDiff; 1997 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 1998 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 1999 MVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2000 SDValue FIN = DAG.getFrameIndex(FI, VT); 2001 TailCallArgumentInfo Info; 2002 Info.Arg = Arg; 2003 Info.FrameIdxOp = FIN; 2004 Info.FrameIdx = FI; 2005 TailCallArguments.push_back(Info); 2006} 2007 2008/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 2009/// stack slot. Returns the chain as result and the loaded frame pointers in 2010/// LROpOut/FPOpout. Used when tail calling. 2011SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 2012 int SPDiff, 2013 SDValue Chain, 2014 SDValue &LROpOut, 2015 SDValue &FPOpOut) { 2016 if (SPDiff) { 2017 // Load the LR and FP stack slot for later adjusting. 2018 MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 2019 LROpOut = getReturnAddrFrameIndex(DAG); 2020 LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0); 2021 Chain = SDValue(LROpOut.getNode(), 1); 2022 FPOpOut = getFramePointerFrameIndex(DAG); 2023 FPOpOut = DAG.getLoad(VT, Chain, FPOpOut, NULL, 0); 2024 Chain = SDValue(FPOpOut.getNode(), 1); 2025 } 2026 return Chain; 2027} 2028 2029/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 2030/// by "Src" to address "Dst" of size "Size". Alignment information is 2031/// specified by the specific parameter attribute. The copy will be passed as 2032/// a byval function parameter. 2033/// Sometimes what we are copying is the end of a larger object, the part that 2034/// does not fit in registers. 2035static SDValue 2036CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 2037 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 2038 unsigned Size) { 2039 SDValue SizeNode = DAG.getConstant(Size, MVT::i32); 2040 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), false, 2041 NULL, 0, NULL, 0); 2042} 2043 2044/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 2045/// tail calls. 2046static void 2047LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 2048 SDValue Arg, SDValue PtrOff, int SPDiff, 2049 unsigned ArgOffset, bool isPPC64, bool isTailCall, 2050 bool isVector, SmallVector<SDValue, 8> &MemOpChains, 2051 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 2052 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2053 if (!isTailCall) { 2054 if (isVector) { 2055 SDValue StackPtr; 2056 if (isPPC64) 2057 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2058 else 2059 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2060 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2061 DAG.getConstant(ArgOffset, PtrVT)); 2062 } 2063 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 2064 // Calculate and remember argument location. 2065 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 2066 TailCallArguments); 2067} 2068 2069SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG, 2070 const PPCSubtarget &Subtarget, 2071 TargetMachine &TM) { 2072 SDValue Chain = Op.getOperand(0); 2073 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 2074 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 2075 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 && 2076 CC == CallingConv::Fast && PerformTailCallOpt; 2077 SDValue Callee = Op.getOperand(4); 2078 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 2079 2080 bool isMachoABI = Subtarget.isMachoABI(); 2081 bool isELF32_ABI = Subtarget.isELF32_ABI(); 2082 2083 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2084 bool isPPC64 = PtrVT == MVT::i64; 2085 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2086 2087 MachineFunction &MF = DAG.getMachineFunction(); 2088 2089 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in 2090 // SelectExpr to use to put the arguments in the appropriate registers. 2091 std::vector<SDValue> args_to_use; 2092 2093 // Mark this function as potentially containing a function that contains a 2094 // tail call. As a consequence the frame pointer will be used for dynamicalloc 2095 // and restoring the callers stack pointer in this functions epilog. This is 2096 // done because by tail calling the called function might overwrite the value 2097 // in this function's (MF) stack pointer stack slot 0(SP). 2098 if (PerformTailCallOpt && CC==CallingConv::Fast) 2099 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 2100 2101 unsigned nAltivecParamsAtEnd = 0; 2102 2103 // Count how many bytes are to be pushed on the stack, including the linkage 2104 // area, and parameter passing area. We start with 24/48 bytes, which is 2105 // prereserved space for [SP][CR][LR][3 x unused]. 2106 unsigned NumBytes = 2107 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC, 2108 Op, nAltivecParamsAtEnd); 2109 2110 // Calculate by how many bytes the stack has to be adjusted in case of tail 2111 // call optimization. 2112 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 2113 2114 // Adjust the stack pointer for the new arguments... 2115 // These operations are automatically eliminated by the prolog/epilog pass 2116 Chain = DAG.getCALLSEQ_START(Chain, 2117 DAG.getConstant(NumBytes, PtrVT)); 2118 SDValue CallSeqStart = Chain; 2119 2120 // Load the return address and frame pointer so it can be move somewhere else 2121 // later. 2122 SDValue LROp, FPOp; 2123 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp); 2124 2125 // Set up a copy of the stack pointer for use loading and storing any 2126 // arguments that may not fit in the registers available for argument 2127 // passing. 2128 SDValue StackPtr; 2129 if (isPPC64) 2130 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2131 else 2132 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2133 2134 // Figure out which arguments are going to go in registers, and which in 2135 // memory. Also, if this is a vararg function, floating point operations 2136 // must be stored to our stack, and loaded into integer regs as well, if 2137 // any integer regs are available for argument passing. 2138 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI); 2139 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2140 2141 static const unsigned GPR_32[] = { // 32-bit registers. 2142 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2143 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2144 }; 2145 static const unsigned GPR_64[] = { // 64-bit registers. 2146 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2147 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2148 }; 2149 static const unsigned *FPR = GetFPR(Subtarget); 2150 2151 static const unsigned VR[] = { 2152 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2153 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2154 }; 2155 const unsigned NumGPRs = array_lengthof(GPR_32); 2156 const unsigned NumFPRs = isMachoABI ? 13 : 8; 2157 const unsigned NumVRs = array_lengthof( VR); 2158 2159 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 2160 2161 std::vector<std::pair<unsigned, SDValue> > RegsToPass; 2162 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 2163 2164 SmallVector<SDValue, 8> MemOpChains; 2165 for (unsigned i = 0; i != NumOps; ++i) { 2166 bool inMem = false; 2167 SDValue Arg = Op.getOperand(5+2*i); 2168 ISD::ArgFlagsTy Flags = 2169 cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); 2170 // See if next argument requires stack alignment in ELF 2171 bool Align = Flags.isSplit(); 2172 2173 // PtrOff will be used to store the current argument to the stack if a 2174 // register cannot be found for it. 2175 SDValue PtrOff; 2176 2177 // Stack align in ELF 32 2178 if (isELF32_ABI && Align) 2179 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize, 2180 StackPtr.getValueType()); 2181 else 2182 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 2183 2184 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); 2185 2186 // On PPC64, promote integers to 64-bit values. 2187 if (isPPC64 && Arg.getValueType() == MVT::i32) { 2188 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 2189 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 2190 Arg = DAG.getNode(ExtOp, MVT::i64, Arg); 2191 } 2192 2193 // FIXME Elf untested, what are alignment rules? 2194 // FIXME memcpy is used way more than necessary. Correctness first. 2195 if (Flags.isByVal()) { 2196 unsigned Size = Flags.getByValSize(); 2197 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2); 2198 if (Size==1 || Size==2) { 2199 // Very small objects are passed right-justified. 2200 // Everything else is passed left-justified. 2201 MVT VT = (Size==1) ? MVT::i8 : MVT::i16; 2202 if (GPR_idx != NumGPRs) { 2203 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, 2204 NULL, 0, VT); 2205 MemOpChains.push_back(Load.getValue(1)); 2206 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2207 if (isMachoABI) 2208 ArgOffset += PtrByteSize; 2209 } else { 2210 SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); 2211 SDValue AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const); 2212 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, 2213 CallSeqStart.getNode()->getOperand(0), 2214 Flags, DAG, Size); 2215 // This must go outside the CALLSEQ_START..END. 2216 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2217 CallSeqStart.getNode()->getOperand(1)); 2218 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 2219 NewCallSeqStart.getNode()); 2220 Chain = CallSeqStart = NewCallSeqStart; 2221 ArgOffset += PtrByteSize; 2222 } 2223 continue; 2224 } 2225 // Copy entire object into memory. There are cases where gcc-generated 2226 // code assumes it is there, even if it could be put entirely into 2227 // registers. (This is not what the doc says.) 2228 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 2229 CallSeqStart.getNode()->getOperand(0), 2230 Flags, DAG, Size); 2231 // This must go outside the CALLSEQ_START..END. 2232 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2233 CallSeqStart.getNode()->getOperand(1)); 2234 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode()); 2235 Chain = CallSeqStart = NewCallSeqStart; 2236 // And copy the pieces of it that fit into registers. 2237 for (unsigned j=0; j<Size; j+=PtrByteSize) { 2238 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 2239 SDValue AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const); 2240 if (GPR_idx != NumGPRs) { 2241 SDValue Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0); 2242 MemOpChains.push_back(Load.getValue(1)); 2243 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2244 if (isMachoABI) 2245 ArgOffset += PtrByteSize; 2246 } else { 2247 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 2248 break; 2249 } 2250 } 2251 continue; 2252 } 2253 2254 switch (Arg.getValueType().getSimpleVT()) { 2255 default: assert(0 && "Unexpected ValueType for argument!"); 2256 case MVT::i32: 2257 case MVT::i64: 2258 // Double word align in ELF 2259 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2); 2260 if (GPR_idx != NumGPRs) { 2261 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 2262 } else { 2263 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 2264 isPPC64, isTailCall, false, MemOpChains, 2265 TailCallArguments); 2266 inMem = true; 2267 } 2268 if (inMem || isMachoABI) { 2269 // Stack align in ELF 2270 if (isELF32_ABI && Align) 2271 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 2272 2273 ArgOffset += PtrByteSize; 2274 } 2275 break; 2276 case MVT::f32: 2277 case MVT::f64: 2278 if (FPR_idx != NumFPRs) { 2279 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 2280 2281 if (isVarArg) { 2282 SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2283 MemOpChains.push_back(Store); 2284 2285 // Float varargs are always shadowed in available integer registers 2286 if (GPR_idx != NumGPRs) { 2287 SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 2288 MemOpChains.push_back(Load.getValue(1)); 2289 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 2290 Load)); 2291 } 2292 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 2293 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 2294 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); 2295 SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); 2296 MemOpChains.push_back(Load.getValue(1)); 2297 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], 2298 Load)); 2299 } 2300 } else { 2301 // If we have any FPRs remaining, we may also have GPRs remaining. 2302 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 2303 // GPRs. 2304 if (isMachoABI) { 2305 if (GPR_idx != NumGPRs) 2306 ++GPR_idx; 2307 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 2308 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 2309 ++GPR_idx; 2310 } 2311 } 2312 } else { 2313 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 2314 isPPC64, isTailCall, false, MemOpChains, 2315 TailCallArguments); 2316 inMem = true; 2317 } 2318 if (inMem || isMachoABI) { 2319 // Stack align in ELF 2320 if (isELF32_ABI && Align) 2321 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; 2322 if (isPPC64) 2323 ArgOffset += 8; 2324 else 2325 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 2326 } 2327 break; 2328 case MVT::v4f32: 2329 case MVT::v4i32: 2330 case MVT::v8i16: 2331 case MVT::v16i8: 2332 if (isVarArg) { 2333 // These go aligned on the stack, or in the corresponding R registers 2334 // when within range. The Darwin PPC ABI doc claims they also go in 2335 // V registers; in fact gcc does this only for arguments that are 2336 // prototyped, not for those that match the ... We do it for all 2337 // arguments, seems to work. 2338 while (ArgOffset % 16 !=0) { 2339 ArgOffset += PtrByteSize; 2340 if (GPR_idx != NumGPRs) 2341 GPR_idx++; 2342 } 2343 // We could elide this store in the case where the object fits 2344 // entirely in R registers. Maybe later. 2345 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, 2346 DAG.getConstant(ArgOffset, PtrVT)); 2347 SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); 2348 MemOpChains.push_back(Store); 2349 if (VR_idx != NumVRs) { 2350 SDValue Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0); 2351 MemOpChains.push_back(Load.getValue(1)); 2352 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 2353 } 2354 ArgOffset += 16; 2355 for (unsigned i=0; i<16; i+=PtrByteSize) { 2356 if (GPR_idx == NumGPRs) 2357 break; 2358 SDValue Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff, 2359 DAG.getConstant(i, PtrVT)); 2360 SDValue Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0); 2361 MemOpChains.push_back(Load.getValue(1)); 2362 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2363 } 2364 break; 2365 } 2366 2367 // Non-varargs Altivec params generally go in registers, but have 2368 // stack space allocated at the end. 2369 if (VR_idx != NumVRs) { 2370 // Doesn't have GPR space allocated. 2371 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 2372 } else if (nAltivecParamsAtEnd==0) { 2373 // We are emitting Altivec params in order. 2374 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 2375 isPPC64, isTailCall, true, MemOpChains, 2376 TailCallArguments); 2377 ArgOffset += 16; 2378 } 2379 break; 2380 } 2381 } 2382 // If all Altivec parameters fit in registers, as they usually do, 2383 // they get stack space following the non-Altivec parameters. We 2384 // don't track this here because nobody below needs it. 2385 // If there are more Altivec parameters than fit in registers emit 2386 // the stores here. 2387 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 2388 unsigned j = 0; 2389 // Offset is aligned; skip 1st 12 params which go in V registers. 2390 ArgOffset = ((ArgOffset+15)/16)*16; 2391 ArgOffset += 12*16; 2392 for (unsigned i = 0; i != NumOps; ++i) { 2393 SDValue Arg = Op.getOperand(5+2*i); 2394 MVT ArgType = Arg.getValueType(); 2395 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 2396 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 2397 if (++j > NumVRs) { 2398 SDValue PtrOff; 2399 // We are emitting Altivec params in order. 2400 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 2401 isPPC64, isTailCall, true, MemOpChains, 2402 TailCallArguments); 2403 ArgOffset += 16; 2404 } 2405 } 2406 } 2407 } 2408 2409 if (!MemOpChains.empty()) 2410 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2411 &MemOpChains[0], MemOpChains.size()); 2412 2413 // Build a sequence of copy-to-reg nodes chained together with token chain 2414 // and flag operands which copy the outgoing args into the appropriate regs. 2415 SDValue InFlag; 2416 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2417 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 2418 InFlag); 2419 InFlag = Chain.getValue(1); 2420 } 2421 2422 // With the ELF 32 ABI, set CR6 to true if this is a vararg call. 2423 if (isVarArg && isELF32_ABI) { 2424 SDValue SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0); 2425 Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag); 2426 InFlag = Chain.getValue(1); 2427 } 2428 2429 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 2430 // might overwrite each other in case of tail call optimization. 2431 if (isTailCall) { 2432 SmallVector<SDValue, 8> MemOpChains2; 2433 // Do not flag preceeding copytoreg stuff together with the following stuff. 2434 InFlag = SDValue(); 2435 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 2436 MemOpChains2); 2437 if (!MemOpChains2.empty()) 2438 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 2439 &MemOpChains2[0], MemOpChains2.size()); 2440 2441 // Store the return address to the appropriate stack slot. 2442 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 2443 isPPC64, isMachoABI); 2444 } 2445 2446 // Emit callseq_end just before tailcall node. 2447 if (isTailCall) { 2448 SmallVector<SDValue, 8> CallSeqOps; 2449 SDVTList CallSeqNodeTys = DAG.getVTList(MVT::Other, MVT::Flag); 2450 CallSeqOps.push_back(Chain); 2451 CallSeqOps.push_back(DAG.getIntPtrConstant(NumBytes)); 2452 CallSeqOps.push_back(DAG.getIntPtrConstant(0)); 2453 if (InFlag.getNode()) 2454 CallSeqOps.push_back(InFlag); 2455 Chain = DAG.getNode(ISD::CALLSEQ_END, CallSeqNodeTys, &CallSeqOps[0], 2456 CallSeqOps.size()); 2457 InFlag = Chain.getValue(1); 2458 } 2459 2460 std::vector<MVT> NodeTys; 2461 NodeTys.push_back(MVT::Other); // Returns a chain 2462 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2463 2464 SmallVector<SDValue, 8> Ops; 2465 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF; 2466 2467 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2468 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2469 // node so that legalize doesn't hack it. 2470 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2471 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 2472 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 2473 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 2474 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 2475 // If this is an absolute destination address, use the munged value. 2476 Callee = SDValue(Dest, 0); 2477 else { 2478 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 2479 // to do the call, we can't use PPCISD::CALL. 2480 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 2481 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2482 2 + (InFlag.getNode() != 0)); 2483 InFlag = Chain.getValue(1); 2484 2485 // Copy the callee address into R12/X12 on darwin. 2486 if (isMachoABI) { 2487 unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12; 2488 Chain = DAG.getCopyToReg(Chain, Reg, Callee, InFlag); 2489 InFlag = Chain.getValue(1); 2490 } 2491 2492 NodeTys.clear(); 2493 NodeTys.push_back(MVT::Other); 2494 NodeTys.push_back(MVT::Flag); 2495 Ops.push_back(Chain); 2496 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF; 2497 Callee.setNode(0); 2498 // Add CTR register as callee so a bctr can be emitted later. 2499 if (isTailCall) 2500 Ops.push_back(DAG.getRegister(PPC::CTR, getPointerTy())); 2501 } 2502 2503 // If this is a direct call, pass the chain and the callee. 2504 if (Callee.getNode()) { 2505 Ops.push_back(Chain); 2506 Ops.push_back(Callee); 2507 } 2508 // If this is a tail call add stack pointer delta. 2509 if (isTailCall) 2510 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 2511 2512 // Add argument registers to the end of the list so that they are known live 2513 // into the call. 2514 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2515 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2516 RegsToPass[i].second.getValueType())); 2517 2518 // When performing tail call optimization the callee pops its arguments off 2519 // the stack. Account for this here so these bytes can be pushed back on in 2520 // PPCRegisterInfo::eliminateCallFramePseudoInstr. 2521 int BytesCalleePops = 2522 (CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0; 2523 2524 if (InFlag.getNode()) 2525 Ops.push_back(InFlag); 2526 2527 // Emit tail call. 2528 if (isTailCall) { 2529 assert(InFlag.getNode() && 2530 "Flag must be set. Depend on flag being set in LowerRET"); 2531 Chain = DAG.getNode(PPCISD::TAILCALL, 2532 Op.getNode()->getVTList(), &Ops[0], Ops.size()); 2533 return SDValue(Chain.getNode(), Op.getResNo()); 2534 } 2535 2536 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 2537 InFlag = Chain.getValue(1); 2538 2539 Chain = DAG.getCALLSEQ_END(Chain, 2540 DAG.getConstant(NumBytes, PtrVT), 2541 DAG.getConstant(BytesCalleePops, PtrVT), 2542 InFlag); 2543 if (Op.getNode()->getValueType(0) != MVT::Other) 2544 InFlag = Chain.getValue(1); 2545 2546 SmallVector<SDValue, 16> ResultVals; 2547 SmallVector<CCValAssign, 16> RVLocs; 2548 unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv(); 2549 CCState CCInfo(CallerCC, isVarArg, TM, RVLocs); 2550 CCInfo.AnalyzeCallResult(Op.getNode(), RetCC_PPC); 2551 2552 // Copy all of the result registers out of their specified physreg. 2553 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2554 CCValAssign &VA = RVLocs[i]; 2555 MVT VT = VA.getValVT(); 2556 assert(VA.isRegLoc() && "Can only return in registers!"); 2557 Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1); 2558 ResultVals.push_back(Chain.getValue(0)); 2559 InFlag = Chain.getValue(2); 2560 } 2561 2562 // If the function returns void, just return the chain. 2563 if (RVLocs.empty()) 2564 return Chain; 2565 2566 // Otherwise, merge everything together with a MERGE_VALUES node. 2567 ResultVals.push_back(Chain); 2568 SDValue Res = DAG.getMergeValues(Op.getNode()->getVTList(), &ResultVals[0], 2569 ResultVals.size()); 2570 return Res.getValue(Op.getResNo()); 2571} 2572 2573SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG, 2574 TargetMachine &TM) { 2575 SmallVector<CCValAssign, 16> RVLocs; 2576 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); 2577 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 2578 CCState CCInfo(CC, isVarArg, TM, RVLocs); 2579 CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC); 2580 2581 // If this is the first return lowered for this function, add the regs to the 2582 // liveout set for the function. 2583 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 2584 for (unsigned i = 0; i != RVLocs.size(); ++i) 2585 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 2586 } 2587 2588 SDValue Chain = Op.getOperand(0); 2589 2590 Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL); 2591 if (Chain.getOpcode() == PPCISD::TAILCALL) { 2592 SDValue TailCall = Chain; 2593 SDValue TargetAddress = TailCall.getOperand(1); 2594 SDValue StackAdjustment = TailCall.getOperand(2); 2595 2596 assert(((TargetAddress.getOpcode() == ISD::Register && 2597 cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) || 2598 TargetAddress.getOpcode() == ISD::TargetExternalSymbol || 2599 TargetAddress.getOpcode() == ISD::TargetGlobalAddress || 2600 isa<ConstantSDNode>(TargetAddress)) && 2601 "Expecting an global address, external symbol, absolute value or register"); 2602 2603 assert(StackAdjustment.getOpcode() == ISD::Constant && 2604 "Expecting a const value"); 2605 2606 SmallVector<SDValue,8> Operands; 2607 Operands.push_back(Chain.getOperand(0)); 2608 Operands.push_back(TargetAddress); 2609 Operands.push_back(StackAdjustment); 2610 // Copy registers used by the call. Last operand is a flag so it is not 2611 // copied. 2612 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) { 2613 Operands.push_back(Chain.getOperand(i)); 2614 } 2615 return DAG.getNode(PPCISD::TC_RETURN, MVT::Other, &Operands[0], 2616 Operands.size()); 2617 } 2618 2619 SDValue Flag; 2620 2621 // Copy the result values into the output registers. 2622 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2623 CCValAssign &VA = RVLocs[i]; 2624 assert(VA.isRegLoc() && "Can only return in registers!"); 2625 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag); 2626 Flag = Chain.getValue(1); 2627 } 2628 2629 if (Flag.getNode()) 2630 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag); 2631 else 2632 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain); 2633} 2634 2635SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 2636 const PPCSubtarget &Subtarget) { 2637 // When we pop the dynamic allocation we need to restore the SP link. 2638 2639 // Get the corect type for pointers. 2640 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2641 2642 // Construct the stack pointer operand. 2643 bool IsPPC64 = Subtarget.isPPC64(); 2644 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; 2645 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 2646 2647 // Get the operands for the STACKRESTORE. 2648 SDValue Chain = Op.getOperand(0); 2649 SDValue SaveSP = Op.getOperand(1); 2650 2651 // Load the old link SP. 2652 SDValue LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); 2653 2654 // Restore the stack pointer. 2655 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP); 2656 2657 // Store the old link SP. 2658 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0); 2659} 2660 2661 2662 2663SDValue 2664PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 2665 MachineFunction &MF = DAG.getMachineFunction(); 2666 bool IsPPC64 = PPCSubTarget.isPPC64(); 2667 bool isMachoABI = PPCSubTarget.isMachoABI(); 2668 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2669 2670 // Get current frame pointer save index. The users of this index will be 2671 // primarily DYNALLOC instructions. 2672 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2673 int RASI = FI->getReturnAddrSaveIndex(); 2674 2675 // If the frame pointer save index hasn't been defined yet. 2676 if (!RASI) { 2677 // Find out what the fix offset of the frame pointer save area. 2678 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, isMachoABI); 2679 // Allocate the frame index for frame pointer save area. 2680 RASI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, LROffset); 2681 // Save the result. 2682 FI->setReturnAddrSaveIndex(RASI); 2683 } 2684 return DAG.getFrameIndex(RASI, PtrVT); 2685} 2686 2687SDValue 2688PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 2689 MachineFunction &MF = DAG.getMachineFunction(); 2690 bool IsPPC64 = PPCSubTarget.isPPC64(); 2691 bool isMachoABI = PPCSubTarget.isMachoABI(); 2692 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2693 2694 // Get current frame pointer save index. The users of this index will be 2695 // primarily DYNALLOC instructions. 2696 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2697 int FPSI = FI->getFramePointerSaveIndex(); 2698 2699 // If the frame pointer save index hasn't been defined yet. 2700 if (!FPSI) { 2701 // Find out what the fix offset of the frame pointer save area. 2702 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI); 2703 2704 // Allocate the frame index for frame pointer save area. 2705 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset); 2706 // Save the result. 2707 FI->setFramePointerSaveIndex(FPSI); 2708 } 2709 return DAG.getFrameIndex(FPSI, PtrVT); 2710} 2711 2712SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 2713 SelectionDAG &DAG, 2714 const PPCSubtarget &Subtarget) { 2715 // Get the inputs. 2716 SDValue Chain = Op.getOperand(0); 2717 SDValue Size = Op.getOperand(1); 2718 2719 // Get the corect type for pointers. 2720 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2721 // Negate the size. 2722 SDValue NegSize = DAG.getNode(ISD::SUB, PtrVT, 2723 DAG.getConstant(0, PtrVT), Size); 2724 // Construct a node for the frame pointer save index. 2725 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 2726 // Build a DYNALLOC node. 2727 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 2728 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 2729 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); 2730} 2731 2732/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 2733/// possible. 2734SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { 2735 // Not FP? Not a fsel. 2736 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 2737 !Op.getOperand(2).getValueType().isFloatingPoint()) 2738 return SDValue(); 2739 2740 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2741 2742 // Cannot handle SETEQ/SETNE. 2743 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDValue(); 2744 2745 MVT ResVT = Op.getValueType(); 2746 MVT CmpVT = Op.getOperand(0).getValueType(); 2747 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 2748 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 2749 2750 // If the RHS of the comparison is a 0.0, we don't need to do the 2751 // subtraction at all. 2752 if (isFloatingPointZero(RHS)) 2753 switch (CC) { 2754 default: break; // SETUO etc aren't handled by fsel. 2755 case ISD::SETULT: 2756 case ISD::SETLT: 2757 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 2758 case ISD::SETOGE: 2759 case ISD::SETGE: 2760 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 2761 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 2762 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV); 2763 case ISD::SETUGT: 2764 case ISD::SETGT: 2765 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 2766 case ISD::SETOLE: 2767 case ISD::SETLE: 2768 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 2769 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS); 2770 return DAG.getNode(PPCISD::FSEL, ResVT, 2771 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); 2772 } 2773 2774 SDValue Cmp; 2775 switch (CC) { 2776 default: break; // SETUO etc aren't handled by fsel. 2777 case ISD::SETULT: 2778 case ISD::SETLT: 2779 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 2780 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2781 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2782 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 2783 case ISD::SETOGE: 2784 case ISD::SETGE: 2785 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS); 2786 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2787 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2788 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2789 case ISD::SETUGT: 2790 case ISD::SETGT: 2791 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2792 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2793 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2794 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV); 2795 case ISD::SETOLE: 2796 case ISD::SETLE: 2797 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS); 2798 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 2799 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); 2800 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); 2801 } 2802 return SDValue(); 2803} 2804 2805// FIXME: Split this code up when LegalizeDAGTypes lands. 2806SDValue PPCTargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { 2807 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 2808 SDValue Src = Op.getOperand(0); 2809 if (Src.getValueType() == MVT::f32) 2810 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); 2811 2812 SDValue Tmp; 2813 switch (Op.getValueType().getSimpleVT()) { 2814 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); 2815 case MVT::i32: 2816 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src); 2817 break; 2818 case MVT::i64: 2819 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src); 2820 break; 2821 } 2822 2823 // Convert the FP value to an int value through memory. 2824 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); 2825 2826 // Emit a store to the stack slot. 2827 SDValue Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0); 2828 2829 // Result is a load from the stack slot. If loading 4 bytes, make sure to 2830 // add in a bias. 2831 if (Op.getValueType() == MVT::i32) 2832 FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr, 2833 DAG.getConstant(4, FIPtr.getValueType())); 2834 return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0); 2835} 2836 2837SDValue PPCTargetLowering::LowerFP_ROUND_INREG(SDValue Op, 2838 SelectionDAG &DAG) { 2839 assert(Op.getValueType() == MVT::ppcf128); 2840 SDNode *Node = Op.getNode(); 2841 assert(Node->getOperand(0).getValueType() == MVT::ppcf128); 2842 assert(Node->getOperand(0).getNode()->getOpcode() == ISD::BUILD_PAIR); 2843 SDValue Lo = Node->getOperand(0).getNode()->getOperand(0); 2844 SDValue Hi = Node->getOperand(0).getNode()->getOperand(1); 2845 2846 // This sequence changes FPSCR to do round-to-zero, adds the two halves 2847 // of the long double, and puts FPSCR back the way it was. We do not 2848 // actually model FPSCR. 2849 std::vector<MVT> NodeTys; 2850 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; 2851 2852 NodeTys.push_back(MVT::f64); // Return register 2853 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns 2854 Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); 2855 MFFSreg = Result.getValue(0); 2856 InFlag = Result.getValue(1); 2857 2858 NodeTys.clear(); 2859 NodeTys.push_back(MVT::Flag); // Returns a flag 2860 Ops[0] = DAG.getConstant(31, MVT::i32); 2861 Ops[1] = InFlag; 2862 Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2); 2863 InFlag = Result.getValue(0); 2864 2865 NodeTys.clear(); 2866 NodeTys.push_back(MVT::Flag); // Returns a flag 2867 Ops[0] = DAG.getConstant(30, MVT::i32); 2868 Ops[1] = InFlag; 2869 Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2); 2870 InFlag = Result.getValue(0); 2871 2872 NodeTys.clear(); 2873 NodeTys.push_back(MVT::f64); // result of add 2874 NodeTys.push_back(MVT::Flag); // Returns a flag 2875 Ops[0] = Lo; 2876 Ops[1] = Hi; 2877 Ops[2] = InFlag; 2878 Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3); 2879 FPreg = Result.getValue(0); 2880 InFlag = Result.getValue(1); 2881 2882 NodeTys.clear(); 2883 NodeTys.push_back(MVT::f64); 2884 Ops[0] = DAG.getConstant(1, MVT::i32); 2885 Ops[1] = MFFSreg; 2886 Ops[2] = FPreg; 2887 Ops[3] = InFlag; 2888 Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4); 2889 FPreg = Result.getValue(0); 2890 2891 // We know the low half is about to be thrown away, so just use something 2892 // convenient. 2893 return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg); 2894} 2895 2896SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2897 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 2898 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 2899 return SDValue(); 2900 2901 if (Op.getOperand(0).getValueType() == MVT::i64) { 2902 SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); 2903 SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); 2904 if (Op.getValueType() == MVT::f32) 2905 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); 2906 return FP; 2907 } 2908 2909 assert(Op.getOperand(0).getValueType() == MVT::i32 && 2910 "Unhandled SINT_TO_FP type in custom expander!"); 2911 // Since we only generate this in 64-bit mode, we can take advantage of 2912 // 64-bit registers. In particular, sign extend the input value into the 2913 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 2914 // then lfd it and fcfid it. 2915 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 2916 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 2917 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2918 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 2919 2920 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, 2921 Op.getOperand(0)); 2922 2923 // STD the extended value into the stack slot. 2924 MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx), 2925 MachineMemOperand::MOStore, 0, 8, 8); 2926 SDValue Store = DAG.getNode(PPCISD::STD_32, MVT::Other, 2927 DAG.getEntryNode(), Ext64, FIdx, 2928 DAG.getMemOperand(MO)); 2929 // Load the value as a double. 2930 SDValue Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); 2931 2932 // FCFID it and return it. 2933 SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); 2934 if (Op.getValueType() == MVT::f32) 2935 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); 2936 return FP; 2937} 2938 2939SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) { 2940 /* 2941 The rounding mode is in bits 30:31 of FPSR, and has the following 2942 settings: 2943 00 Round to nearest 2944 01 Round to 0 2945 10 Round to +inf 2946 11 Round to -inf 2947 2948 FLT_ROUNDS, on the other hand, expects the following: 2949 -1 Undefined 2950 0 Round to 0 2951 1 Round to nearest 2952 2 Round to +inf 2953 3 Round to -inf 2954 2955 To perform the conversion, we do: 2956 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 2957 */ 2958 2959 MachineFunction &MF = DAG.getMachineFunction(); 2960 MVT VT = Op.getValueType(); 2961 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2962 std::vector<MVT> NodeTys; 2963 SDValue MFFSreg, InFlag; 2964 2965 // Save FP Control Word to register 2966 NodeTys.push_back(MVT::f64); // return register 2967 NodeTys.push_back(MVT::Flag); // unused in this context 2968 SDValue Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); 2969 2970 // Save FP register to stack slot 2971 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 2972 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 2973 SDValue Store = DAG.getStore(DAG.getEntryNode(), Chain, 2974 StackSlot, NULL, 0); 2975 2976 // Load FP Control Word from low 32 bits of stack slot. 2977 SDValue Four = DAG.getConstant(4, PtrVT); 2978 SDValue Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four); 2979 SDValue CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0); 2980 2981 // Transform as necessary 2982 SDValue CWD1 = 2983 DAG.getNode(ISD::AND, MVT::i32, 2984 CWD, DAG.getConstant(3, MVT::i32)); 2985 SDValue CWD2 = 2986 DAG.getNode(ISD::SRL, MVT::i32, 2987 DAG.getNode(ISD::AND, MVT::i32, 2988 DAG.getNode(ISD::XOR, MVT::i32, 2989 CWD, DAG.getConstant(3, MVT::i32)), 2990 DAG.getConstant(3, MVT::i32)), 2991 DAG.getConstant(1, MVT::i8)); 2992 2993 SDValue RetVal = 2994 DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2); 2995 2996 return DAG.getNode((VT.getSizeInBits() < 16 ? 2997 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); 2998} 2999 3000SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) { 3001 MVT VT = Op.getValueType(); 3002 unsigned BitWidth = VT.getSizeInBits(); 3003 assert(Op.getNumOperands() == 3 && 3004 VT == Op.getOperand(1).getValueType() && 3005 "Unexpected SHL!"); 3006 3007 // Expand into a bunch of logical ops. Note that these ops 3008 // depend on the PPC behavior for oversized shift amounts. 3009 SDValue Lo = Op.getOperand(0); 3010 SDValue Hi = Op.getOperand(1); 3011 SDValue Amt = Op.getOperand(2); 3012 MVT AmtVT = Amt.getValueType(); 3013 3014 SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 3015 DAG.getConstant(BitWidth, AmtVT), Amt); 3016 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt); 3017 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1); 3018 SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 3019 SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 3020 DAG.getConstant(-BitWidth, AmtVT)); 3021 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5); 3022 SDValue OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); 3023 SDValue OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt); 3024 SDValue OutOps[] = { OutLo, OutHi }; 3025 return DAG.getMergeValues(OutOps, 2); 3026} 3027 3028SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) { 3029 MVT VT = Op.getValueType(); 3030 unsigned BitWidth = VT.getSizeInBits(); 3031 assert(Op.getNumOperands() == 3 && 3032 VT == Op.getOperand(1).getValueType() && 3033 "Unexpected SRL!"); 3034 3035 // Expand into a bunch of logical ops. Note that these ops 3036 // depend on the PPC behavior for oversized shift amounts. 3037 SDValue Lo = Op.getOperand(0); 3038 SDValue Hi = Op.getOperand(1); 3039 SDValue Amt = Op.getOperand(2); 3040 MVT AmtVT = Amt.getValueType(); 3041 3042 SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 3043 DAG.getConstant(BitWidth, AmtVT), Amt); 3044 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); 3045 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); 3046 SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 3047 SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 3048 DAG.getConstant(-BitWidth, AmtVT)); 3049 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5); 3050 SDValue OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); 3051 SDValue OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt); 3052 SDValue OutOps[] = { OutLo, OutHi }; 3053 return DAG.getMergeValues(OutOps, 2); 3054} 3055 3056SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) { 3057 MVT VT = Op.getValueType(); 3058 unsigned BitWidth = VT.getSizeInBits(); 3059 assert(Op.getNumOperands() == 3 && 3060 VT == Op.getOperand(1).getValueType() && 3061 "Unexpected SRA!"); 3062 3063 // Expand into a bunch of logical ops, followed by a select_cc. 3064 SDValue Lo = Op.getOperand(0); 3065 SDValue Hi = Op.getOperand(1); 3066 SDValue Amt = Op.getOperand(2); 3067 MVT AmtVT = Amt.getValueType(); 3068 3069 SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT, 3070 DAG.getConstant(BitWidth, AmtVT), Amt); 3071 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); 3072 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); 3073 SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); 3074 SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, 3075 DAG.getConstant(-BitWidth, AmtVT)); 3076 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5); 3077 SDValue OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt); 3078 SDValue OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT), 3079 Tmp4, Tmp6, ISD::SETLE); 3080 SDValue OutOps[] = { OutLo, OutHi }; 3081 return DAG.getMergeValues(OutOps, 2); 3082} 3083 3084//===----------------------------------------------------------------------===// 3085// Vector related lowering. 3086// 3087 3088// If this is a vector of constants or undefs, get the bits. A bit in 3089// UndefBits is set if the corresponding element of the vector is an 3090// ISD::UNDEF value. For undefs, the corresponding VectorBits values are 3091// zero. Return true if this is not an array of constants, false if it is. 3092// 3093static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], 3094 uint64_t UndefBits[2]) { 3095 // Start with zero'd results. 3096 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0; 3097 3098 unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); 3099 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 3100 SDValue OpVal = BV->getOperand(i); 3101 3102 unsigned PartNo = i >= e/2; // In the upper 128 bits? 3103 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. 3104 3105 uint64_t EltBits = 0; 3106 if (OpVal.getOpcode() == ISD::UNDEF) { 3107 uint64_t EltUndefBits = ~0U >> (32-EltBitSize); 3108 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize); 3109 continue; 3110 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 3111 EltBits = CN->getValue() & (~0U >> (32-EltBitSize)); 3112 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 3113 assert(CN->getValueType(0) == MVT::f32 && 3114 "Only one legal FP vector type!"); 3115 EltBits = FloatToBits(CN->getValueAPF().convertToFloat()); 3116 } else { 3117 // Nonconstant element. 3118 return true; 3119 } 3120 3121 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize); 3122 } 3123 3124 //printf("%llx %llx %llx %llx\n", 3125 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]); 3126 return false; 3127} 3128 3129// If this is a splat (repetition) of a value across the whole vector, return 3130// the smallest size that splats it. For example, "0x01010101010101..." is a 3131// splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 3132// SplatSize = 1 byte. 3133static bool isConstantSplat(const uint64_t Bits128[2], 3134 const uint64_t Undef128[2], 3135 unsigned &SplatBits, unsigned &SplatUndef, 3136 unsigned &SplatSize) { 3137 3138 // Don't let undefs prevent splats from matching. See if the top 64-bits are 3139 // the same as the lower 64-bits, ignoring undefs. 3140 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0])) 3141 return false; // Can't be a splat if two pieces don't match. 3142 3143 uint64_t Bits64 = Bits128[0] | Bits128[1]; 3144 uint64_t Undef64 = Undef128[0] & Undef128[1]; 3145 3146 // Check that the top 32-bits are the same as the lower 32-bits, ignoring 3147 // undefs. 3148 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64)) 3149 return false; // Can't be a splat if two pieces don't match. 3150 3151 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32); 3152 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32); 3153 3154 // If the top 16-bits are different than the lower 16-bits, ignoring 3155 // undefs, we have an i32 splat. 3156 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) { 3157 SplatBits = Bits32; 3158 SplatUndef = Undef32; 3159 SplatSize = 4; 3160 return true; 3161 } 3162 3163 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16); 3164 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16); 3165 3166 // If the top 8-bits are different than the lower 8-bits, ignoring 3167 // undefs, we have an i16 splat. 3168 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) { 3169 SplatBits = Bits16; 3170 SplatUndef = Undef16; 3171 SplatSize = 2; 3172 return true; 3173 } 3174 3175 // Otherwise, we have an 8-bit splat. 3176 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8); 3177 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8); 3178 SplatSize = 1; 3179 return true; 3180} 3181 3182/// BuildSplatI - Build a canonical splati of Val with an element size of 3183/// SplatSize. Cast the result to VT. 3184static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT, 3185 SelectionDAG &DAG) { 3186 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 3187 3188 static const MVT VTys[] = { // canonical VT to use for each size. 3189 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 3190 }; 3191 3192 MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 3193 3194 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 3195 if (Val == -1) 3196 SplatSize = 1; 3197 3198 MVT CanonicalVT = VTys[SplatSize-1]; 3199 3200 // Build a canonical splat for this value. 3201 SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); 3202 SmallVector<SDValue, 8> Ops; 3203 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 3204 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, 3205 &Ops[0], Ops.size()); 3206 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); 3207} 3208 3209/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 3210/// specified intrinsic ID. 3211static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 3212 SelectionDAG &DAG, 3213 MVT DestVT = MVT::Other) { 3214 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 3215 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 3216 DAG.getConstant(IID, MVT::i32), LHS, RHS); 3217} 3218 3219/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 3220/// specified intrinsic ID. 3221static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 3222 SDValue Op2, SelectionDAG &DAG, 3223 MVT DestVT = MVT::Other) { 3224 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 3225 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, 3226 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 3227} 3228 3229 3230/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 3231/// amount. The result has the specified value type. 3232static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 3233 MVT VT, SelectionDAG &DAG) { 3234 // Force LHS/RHS to be the right type. 3235 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); 3236 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); 3237 3238 SDValue Ops[16]; 3239 for (unsigned i = 0; i != 16; ++i) 3240 Ops[i] = DAG.getConstant(i+Amt, MVT::i8); 3241 SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, 3242 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); 3243 return DAG.getNode(ISD::BIT_CONVERT, VT, T); 3244} 3245 3246// If this is a case we can't handle, return null and let the default 3247// expansion code take care of it. If we CAN select this case, and if it 3248// selects to a single instruction, return Op. Otherwise, if we can codegen 3249// this case more efficiently than a constant pool load, lower it to the 3250// sequence of ops that should be used. 3251SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 3252 SelectionDAG &DAG) { 3253 // If this is a vector of constants or undefs, get the bits. A bit in 3254 // UndefBits is set if the corresponding element of the vector is an 3255 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are 3256 // zero. 3257 uint64_t VectorBits[2]; 3258 uint64_t UndefBits[2]; 3259 if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits)) 3260 return SDValue(); // Not a constant vector. 3261 3262 // If this is a splat (repetition) of a value across the whole vector, return 3263 // the smallest size that splats it. For example, "0x01010101010101..." is a 3264 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and 3265 // SplatSize = 1 byte. 3266 unsigned SplatBits, SplatUndef, SplatSize; 3267 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){ 3268 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0; 3269 3270 // First, handle single instruction cases. 3271 3272 // All zeros? 3273 if (SplatBits == 0) { 3274 // Canonicalize all zero vectors to be v4i32. 3275 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 3276 SDValue Z = DAG.getConstant(0, MVT::i32); 3277 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); 3278 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); 3279 } 3280 return Op; 3281 } 3282 3283 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 3284 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize); 3285 if (SextVal >= -16 && SextVal <= 15) 3286 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG); 3287 3288 3289 // Two instruction sequences. 3290 3291 // If this value is in the range [-32,30] and is even, use: 3292 // tmp = VSPLTI[bhw], result = add tmp, tmp 3293 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 3294 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG); 3295 Res = DAG.getNode(ISD::ADD, Res.getValueType(), Res, Res); 3296 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3297 } 3298 3299 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 3300 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 3301 // for fneg/fabs. 3302 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 3303 // Make -1 and vspltisw -1: 3304 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); 3305 3306 // Make the VSLW intrinsic, computing 0x8000_0000. 3307 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 3308 OnesV, DAG); 3309 3310 // xor by OnesV to invert it. 3311 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV); 3312 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3313 } 3314 3315 // Check to see if this is a wide variety of vsplti*, binop self cases. 3316 unsigned SplatBitSize = SplatSize*8; 3317 static const signed char SplatCsts[] = { 3318 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 3319 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 3320 }; 3321 3322 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 3323 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 3324 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 3325 int i = SplatCsts[idx]; 3326 3327 // Figure out what shift amount will be used by altivec if shifted by i in 3328 // this splat size. 3329 unsigned TypeShiftAmt = i & (SplatBitSize-1); 3330 3331 // vsplti + shl self. 3332 if (SextVal == (i << (int)TypeShiftAmt)) { 3333 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3334 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3335 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 3336 Intrinsic::ppc_altivec_vslw 3337 }; 3338 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3339 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3340 } 3341 3342 // vsplti + srl self. 3343 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3344 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3345 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3346 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 3347 Intrinsic::ppc_altivec_vsrw 3348 }; 3349 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3350 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3351 } 3352 3353 // vsplti + sra self. 3354 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3355 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3356 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3357 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 3358 Intrinsic::ppc_altivec_vsraw 3359 }; 3360 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3361 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3362 } 3363 3364 // vsplti + rol self. 3365 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 3366 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 3367 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); 3368 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3369 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 3370 Intrinsic::ppc_altivec_vrlw 3371 }; 3372 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG); 3373 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); 3374 } 3375 3376 // t = vsplti c, result = vsldoi t, t, 1 3377 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 3378 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 3379 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); 3380 } 3381 // t = vsplti c, result = vsldoi t, t, 2 3382 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 3383 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 3384 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); 3385 } 3386 // t = vsplti c, result = vsldoi t, t, 3 3387 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 3388 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); 3389 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); 3390 } 3391 } 3392 3393 // Three instruction sequences. 3394 3395 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 3396 if (SextVal >= 0 && SextVal <= 31) { 3397 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); 3398 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 3399 LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS); 3400 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 3401 } 3402 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 3403 if (SextVal >= -31 && SextVal <= 0) { 3404 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); 3405 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); 3406 LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS); 3407 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); 3408 } 3409 } 3410 3411 return SDValue(); 3412} 3413 3414/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3415/// the specified operations to build the shuffle. 3416static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3417 SDValue RHS, SelectionDAG &DAG) { 3418 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3419 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3420 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3421 3422 enum { 3423 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3424 OP_VMRGHW, 3425 OP_VMRGLW, 3426 OP_VSPLTISW0, 3427 OP_VSPLTISW1, 3428 OP_VSPLTISW2, 3429 OP_VSPLTISW3, 3430 OP_VSLDOI4, 3431 OP_VSLDOI8, 3432 OP_VSLDOI12 3433 }; 3434 3435 if (OpNum == OP_COPY) { 3436 if (LHSID == (1*9+2)*9+3) return LHS; 3437 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3438 return RHS; 3439 } 3440 3441 SDValue OpLHS, OpRHS; 3442 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); 3443 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); 3444 3445 unsigned ShufIdxs[16]; 3446 switch (OpNum) { 3447 default: assert(0 && "Unknown i32 permute!"); 3448 case OP_VMRGHW: 3449 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 3450 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 3451 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 3452 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 3453 break; 3454 case OP_VMRGLW: 3455 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 3456 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 3457 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 3458 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 3459 break; 3460 case OP_VSPLTISW0: 3461 for (unsigned i = 0; i != 16; ++i) 3462 ShufIdxs[i] = (i&3)+0; 3463 break; 3464 case OP_VSPLTISW1: 3465 for (unsigned i = 0; i != 16; ++i) 3466 ShufIdxs[i] = (i&3)+4; 3467 break; 3468 case OP_VSPLTISW2: 3469 for (unsigned i = 0; i != 16; ++i) 3470 ShufIdxs[i] = (i&3)+8; 3471 break; 3472 case OP_VSPLTISW3: 3473 for (unsigned i = 0; i != 16; ++i) 3474 ShufIdxs[i] = (i&3)+12; 3475 break; 3476 case OP_VSLDOI4: 3477 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG); 3478 case OP_VSLDOI8: 3479 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG); 3480 case OP_VSLDOI12: 3481 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); 3482 } 3483 SDValue Ops[16]; 3484 for (unsigned i = 0; i != 16; ++i) 3485 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8); 3486 3487 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS, 3488 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 3489} 3490 3491/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 3492/// is a shuffle we can handle in a single instruction, return it. Otherwise, 3493/// return the code it can be lowered into. Worst case, it can always be 3494/// lowered into a vperm. 3495SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 3496 SelectionDAG &DAG) { 3497 SDValue V1 = Op.getOperand(0); 3498 SDValue V2 = Op.getOperand(1); 3499 SDValue PermMask = Op.getOperand(2); 3500 3501 // Cases that are handled by instructions that take permute immediates 3502 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 3503 // selected by the instruction selector. 3504 if (V2.getOpcode() == ISD::UNDEF) { 3505 if (PPC::isSplatShuffleMask(PermMask.getNode(), 1) || 3506 PPC::isSplatShuffleMask(PermMask.getNode(), 2) || 3507 PPC::isSplatShuffleMask(PermMask.getNode(), 4) || 3508 PPC::isVPKUWUMShuffleMask(PermMask.getNode(), true) || 3509 PPC::isVPKUHUMShuffleMask(PermMask.getNode(), true) || 3510 PPC::isVSLDOIShuffleMask(PermMask.getNode(), true) != -1 || 3511 PPC::isVMRGLShuffleMask(PermMask.getNode(), 1, true) || 3512 PPC::isVMRGLShuffleMask(PermMask.getNode(), 2, true) || 3513 PPC::isVMRGLShuffleMask(PermMask.getNode(), 4, true) || 3514 PPC::isVMRGHShuffleMask(PermMask.getNode(), 1, true) || 3515 PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, true) || 3516 PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, true)) { 3517 return Op; 3518 } 3519 } 3520 3521 // Altivec has a variety of "shuffle immediates" that take two vector inputs 3522 // and produce a fixed permutation. If any of these match, do not lower to 3523 // VPERM. 3524 if (PPC::isVPKUWUMShuffleMask(PermMask.getNode(), false) || 3525 PPC::isVPKUHUMShuffleMask(PermMask.getNode(), false) || 3526 PPC::isVSLDOIShuffleMask(PermMask.getNode(), false) != -1 || 3527 PPC::isVMRGLShuffleMask(PermMask.getNode(), 1, false) || 3528 PPC::isVMRGLShuffleMask(PermMask.getNode(), 2, false) || 3529 PPC::isVMRGLShuffleMask(PermMask.getNode(), 4, false) || 3530 PPC::isVMRGHShuffleMask(PermMask.getNode(), 1, false) || 3531 PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, false) || 3532 PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, false)) 3533 return Op; 3534 3535 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 3536 // perfect shuffle table to emit an optimal matching sequence. 3537 unsigned PFIndexes[4]; 3538 bool isFourElementShuffle = true; 3539 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 3540 unsigned EltNo = 8; // Start out undef. 3541 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 3542 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF) 3543 continue; // Undef, ignore it. 3544 3545 unsigned ByteSource = 3546 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue(); 3547 if ((ByteSource & 3) != j) { 3548 isFourElementShuffle = false; 3549 break; 3550 } 3551 3552 if (EltNo == 8) { 3553 EltNo = ByteSource/4; 3554 } else if (EltNo != ByteSource/4) { 3555 isFourElementShuffle = false; 3556 break; 3557 } 3558 } 3559 PFIndexes[i] = EltNo; 3560 } 3561 3562 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 3563 // perfect shuffle vector to determine if it is cost effective to do this as 3564 // discrete instructions, or whether we should use a vperm. 3565 if (isFourElementShuffle) { 3566 // Compute the index in the perfect shuffle table. 3567 unsigned PFTableIndex = 3568 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3569 3570 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3571 unsigned Cost = (PFEntry >> 30); 3572 3573 // Determining when to avoid vperm is tricky. Many things affect the cost 3574 // of vperm, particularly how many times the perm mask needs to be computed. 3575 // For example, if the perm mask can be hoisted out of a loop or is already 3576 // used (perhaps because there are multiple permutes with the same shuffle 3577 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 3578 // the loop requires an extra register. 3579 // 3580 // As a compromise, we only emit discrete instructions if the shuffle can be 3581 // generated in 3 or fewer operations. When we have loop information 3582 // available, if this block is within a loop, we should avoid using vperm 3583 // for 3-operation perms and use a constant pool load instead. 3584 if (Cost < 3) 3585 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG); 3586 } 3587 3588 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 3589 // vector that will get spilled to the constant pool. 3590 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 3591 3592 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 3593 // that it is in input element units, not in bytes. Convert now. 3594 MVT EltVT = V1.getValueType().getVectorElementType(); 3595 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 3596 3597 SmallVector<SDValue, 16> ResultMask; 3598 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { 3599 unsigned SrcElt; 3600 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) 3601 SrcElt = 0; 3602 else 3603 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue(); 3604 3605 for (unsigned j = 0; j != BytesPerElement; ++j) 3606 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 3607 MVT::i8)); 3608 } 3609 3610 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, 3611 &ResultMask[0], ResultMask.size()); 3612 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); 3613} 3614 3615/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 3616/// altivec comparison. If it is, return true and fill in Opc/isDot with 3617/// information about the intrinsic. 3618static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 3619 bool &isDot) { 3620 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); 3621 CompareOpc = -1; 3622 isDot = false; 3623 switch (IntrinsicID) { 3624 default: return false; 3625 // Comparison predicates. 3626 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 3627 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 3628 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 3629 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 3630 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 3631 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 3632 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 3633 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 3634 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 3635 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 3636 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 3637 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 3638 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 3639 3640 // Normal Comparisons. 3641 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 3642 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 3643 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 3644 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 3645 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 3646 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 3647 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 3648 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 3649 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 3650 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 3651 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 3652 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 3653 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 3654 } 3655 return true; 3656} 3657 3658/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 3659/// lower, do it, otherwise return null. 3660SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 3661 SelectionDAG &DAG) { 3662 // If this is a lowered altivec predicate compare, CompareOpc is set to the 3663 // opcode number of the comparison. 3664 int CompareOpc; 3665 bool isDot; 3666 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 3667 return SDValue(); // Don't custom lower most intrinsics. 3668 3669 // If this is a non-dot comparison, make the VCMP node and we are done. 3670 if (!isDot) { 3671 SDValue Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), 3672 Op.getOperand(1), Op.getOperand(2), 3673 DAG.getConstant(CompareOpc, MVT::i32)); 3674 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); 3675 } 3676 3677 // Create the PPCISD altivec 'dot' comparison node. 3678 SDValue Ops[] = { 3679 Op.getOperand(2), // LHS 3680 Op.getOperand(3), // RHS 3681 DAG.getConstant(CompareOpc, MVT::i32) 3682 }; 3683 std::vector<MVT> VTs; 3684 VTs.push_back(Op.getOperand(2).getValueType()); 3685 VTs.push_back(MVT::Flag); 3686 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 3687 3688 // Now that we have the comparison, emit a copy from the CR to a GPR. 3689 // This is flagged to the above dot comparison. 3690 SDValue Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, 3691 DAG.getRegister(PPC::CR6, MVT::i32), 3692 CompNode.getValue(1)); 3693 3694 // Unpack the result based on how the target uses it. 3695 unsigned BitNo; // Bit # of CR6. 3696 bool InvertBit; // Invert result? 3697 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) { 3698 default: // Can't happen, don't crash on invalid number though. 3699 case 0: // Return the value of the EQ bit of CR6. 3700 BitNo = 0; InvertBit = false; 3701 break; 3702 case 1: // Return the inverted value of the EQ bit of CR6. 3703 BitNo = 0; InvertBit = true; 3704 break; 3705 case 2: // Return the value of the LT bit of CR6. 3706 BitNo = 2; InvertBit = false; 3707 break; 3708 case 3: // Return the inverted value of the LT bit of CR6. 3709 BitNo = 2; InvertBit = true; 3710 break; 3711 } 3712 3713 // Shift the bit into the low position. 3714 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags, 3715 DAG.getConstant(8-(3-BitNo), MVT::i32)); 3716 // Isolate the bit. 3717 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags, 3718 DAG.getConstant(1, MVT::i32)); 3719 3720 // If we are supposed to, toggle the bit. 3721 if (InvertBit) 3722 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags, 3723 DAG.getConstant(1, MVT::i32)); 3724 return Flags; 3725} 3726 3727SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 3728 SelectionDAG &DAG) { 3729 // Create a stack slot that is 16-byte aligned. 3730 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 3731 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 3732 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3733 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 3734 3735 // Store the input value into Value#0 of the stack slot. 3736 SDValue Store = DAG.getStore(DAG.getEntryNode(), 3737 Op.getOperand(0), FIdx, NULL, 0); 3738 // Load it out. 3739 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); 3740} 3741 3742SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) { 3743 if (Op.getValueType() == MVT::v4i32) { 3744 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3745 3746 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); 3747 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. 3748 3749 SDValue RHSSwap = // = vrlw RHS, 16 3750 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); 3751 3752 // Shrinkify inputs to v8i16. 3753 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS); 3754 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS); 3755 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap); 3756 3757 // Low parts multiplied together, generating 32-bit results (we ignore the 3758 // top parts). 3759 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 3760 LHS, RHS, DAG, MVT::v4i32); 3761 3762 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 3763 LHS, RHSSwap, Zero, DAG, MVT::v4i32); 3764 // Shift the high parts up 16 bits. 3765 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); 3766 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); 3767 } else if (Op.getValueType() == MVT::v8i16) { 3768 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3769 3770 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); 3771 3772 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 3773 LHS, RHS, Zero, DAG); 3774 } else if (Op.getValueType() == MVT::v16i8) { 3775 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3776 3777 // Multiply the even 8-bit parts, producing 16-bit sums. 3778 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 3779 LHS, RHS, DAG, MVT::v8i16); 3780 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); 3781 3782 // Multiply the odd 8-bit parts, producing 16-bit sums. 3783 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 3784 LHS, RHS, DAG, MVT::v8i16); 3785 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); 3786 3787 // Merge the results together. 3788 SDValue Ops[16]; 3789 for (unsigned i = 0; i != 8; ++i) { 3790 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); 3791 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); 3792 } 3793 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts, 3794 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16)); 3795 } else { 3796 assert(0 && "Unknown mul to lower!"); 3797 abort(); 3798 } 3799} 3800 3801/// LowerOperation - Provide custom lowering hooks for some operations. 3802/// 3803SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { 3804 switch (Op.getOpcode()) { 3805 default: assert(0 && "Wasn't expecting to be able to lower this!"); 3806 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 3807 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 3808 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 3809 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 3810 case ISD::SETCC: return LowerSETCC(Op, DAG); 3811 case ISD::VASTART: 3812 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 3813 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 3814 3815 case ISD::VAARG: 3816 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 3817 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 3818 3819 case ISD::FORMAL_ARGUMENTS: 3820 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, 3821 VarArgsStackOffset, VarArgsNumGPR, 3822 VarArgsNumFPR, PPCSubTarget); 3823 3824 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget, 3825 getTargetMachine()); 3826 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine()); 3827 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 3828 case ISD::DYNAMIC_STACKALLOC: 3829 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 3830 3831 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 3832 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 3833 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 3834 case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG); 3835 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 3836 3837 // Lower 64-bit shifts. 3838 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 3839 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 3840 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 3841 3842 // Vector-related lowering. 3843 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 3844 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 3845 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3846 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 3847 case ISD::MUL: return LowerMUL(Op, DAG); 3848 3849 // Frame & Return address. 3850 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 3851 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 3852 } 3853 return SDValue(); 3854} 3855 3856SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) { 3857 switch (N->getOpcode()) { 3858 default: assert(0 && "Wasn't expecting to be able to lower this!"); 3859 case ISD::FP_TO_SINT: { 3860 SDValue Res = LowerFP_TO_SINT(SDValue(N, 0), DAG); 3861 // Use MERGE_VALUES to drop the chain result value and get a node with one 3862 // result. This requires turning off getMergeValues simplification, since 3863 // otherwise it will give us Res back. 3864 return DAG.getMergeValues(&Res, 1, false).getNode(); 3865 } 3866 } 3867} 3868 3869 3870//===----------------------------------------------------------------------===// 3871// Other Lowering Code 3872//===----------------------------------------------------------------------===// 3873 3874MachineBasicBlock * 3875PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 3876 bool is64bit, unsigned BinOpcode) { 3877 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 3878 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3879 3880 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3881 MachineFunction *F = BB->getParent(); 3882 MachineFunction::iterator It = BB; 3883 ++It; 3884 3885 unsigned dest = MI->getOperand(0).getReg(); 3886 unsigned ptrA = MI->getOperand(1).getReg(); 3887 unsigned ptrB = MI->getOperand(2).getReg(); 3888 unsigned incr = MI->getOperand(3).getReg(); 3889 3890 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 3891 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 3892 F->insert(It, loopMBB); 3893 F->insert(It, exitMBB); 3894 exitMBB->transferSuccessors(BB); 3895 3896 MachineRegisterInfo &RegInfo = F->getRegInfo(); 3897 unsigned TmpReg = (!BinOpcode) ? incr : 3898 RegInfo.createVirtualRegister( 3899 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 3900 (const TargetRegisterClass *) &PPC::GPRCRegClass); 3901 3902 // thisMBB: 3903 // ... 3904 // fallthrough --> loopMBB 3905 BB->addSuccessor(loopMBB); 3906 3907 // loopMBB: 3908 // l[wd]arx dest, ptr 3909 // add r0, dest, incr 3910 // st[wd]cx. r0, ptr 3911 // bne- loopMBB 3912 // fallthrough --> exitMBB 3913 BB = loopMBB; 3914 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 3915 .addReg(ptrA).addReg(ptrB); 3916 if (BinOpcode) 3917 BuildMI(BB, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 3918 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 3919 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 3920 BuildMI(BB, TII->get(PPC::BCC)) 3921 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 3922 BB->addSuccessor(loopMBB); 3923 BB->addSuccessor(exitMBB); 3924 3925 // exitMBB: 3926 // ... 3927 BB = exitMBB; 3928 return BB; 3929} 3930 3931MachineBasicBlock * 3932PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 3933 MachineBasicBlock *BB, 3934 bool is8bit, // operation 3935 unsigned BinOpcode) { 3936 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 3937 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 3938 // In 64 bit mode we have to use 64 bits for addresses, even though the 3939 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 3940 // registers without caring whether they're 32 or 64, but here we're 3941 // doing actual arithmetic on the addresses. 3942 bool is64bit = PPCSubTarget.isPPC64(); 3943 3944 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3945 MachineFunction *F = BB->getParent(); 3946 MachineFunction::iterator It = BB; 3947 ++It; 3948 3949 unsigned dest = MI->getOperand(0).getReg(); 3950 unsigned ptrA = MI->getOperand(1).getReg(); 3951 unsigned ptrB = MI->getOperand(2).getReg(); 3952 unsigned incr = MI->getOperand(3).getReg(); 3953 3954 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 3955 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 3956 F->insert(It, loopMBB); 3957 F->insert(It, exitMBB); 3958 exitMBB->transferSuccessors(BB); 3959 3960 MachineRegisterInfo &RegInfo = F->getRegInfo(); 3961 const TargetRegisterClass *RC = 3962 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 3963 (const TargetRegisterClass *) &PPC::GPRCRegClass; 3964 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 3965 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 3966 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 3967 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 3968 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 3969 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 3970 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 3971 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 3972 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 3973 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 3974 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 3975 unsigned Ptr1Reg; 3976 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 3977 3978 // thisMBB: 3979 // ... 3980 // fallthrough --> loopMBB 3981 BB->addSuccessor(loopMBB); 3982 3983 // The 4-byte load must be aligned, while a char or short may be 3984 // anywhere in the word. Hence all this nasty bookkeeping code. 3985 // add ptr1, ptrA, ptrB [copy if ptrA==0] 3986 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 3987 // xori shift, shift1, 24 [16] 3988 // rlwinm ptr, ptr1, 0, 0, 29 3989 // slw incr2, incr, shift 3990 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 3991 // slw mask, mask2, shift 3992 // loopMBB: 3993 // lwarx tmpDest, ptr 3994 // add tmp, tmpDest, incr2 3995 // andc tmp2, tmpDest, mask 3996 // and tmp3, tmp, mask 3997 // or tmp4, tmp3, tmp2 3998 // stwcx. tmp4, ptr 3999 // bne- loopMBB 4000 // fallthrough --> exitMBB 4001 // srw dest, tmpDest, shift 4002 4003 if (ptrA!=PPC::R0) { 4004 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4005 BuildMI(BB, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4006 .addReg(ptrA).addReg(ptrB); 4007 } else { 4008 Ptr1Reg = ptrB; 4009 } 4010 BuildMI(BB, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4011 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4012 BuildMI(BB, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4013 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4014 if (is64bit) 4015 BuildMI(BB, TII->get(PPC::RLDICR), PtrReg) 4016 .addReg(Ptr1Reg).addImm(0).addImm(61); 4017 else 4018 BuildMI(BB, TII->get(PPC::RLWINM), PtrReg) 4019 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4020 BuildMI(BB, TII->get(PPC::SLW), Incr2Reg) 4021 .addReg(incr).addReg(ShiftReg); 4022 if (is8bit) 4023 BuildMI(BB, TII->get(PPC::LI), Mask2Reg).addImm(255); 4024 else { 4025 BuildMI(BB, TII->get(PPC::LI), Mask3Reg).addImm(0); 4026 BuildMI(BB, TII->get(PPC::ORI), Mask2Reg).addReg(Mask3Reg).addImm(65535); 4027 } 4028 BuildMI(BB, TII->get(PPC::SLW), MaskReg) 4029 .addReg(Mask2Reg).addReg(ShiftReg); 4030 4031 BB = loopMBB; 4032 BuildMI(BB, TII->get(PPC::LWARX), TmpDestReg) 4033 .addReg(PPC::R0).addReg(PtrReg); 4034 if (BinOpcode) 4035 BuildMI(BB, TII->get(BinOpcode), TmpReg) 4036 .addReg(Incr2Reg).addReg(TmpDestReg); 4037 BuildMI(BB, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 4038 .addReg(TmpDestReg).addReg(MaskReg); 4039 BuildMI(BB, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 4040 .addReg(TmpReg).addReg(MaskReg); 4041 BuildMI(BB, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 4042 .addReg(Tmp3Reg).addReg(Tmp2Reg); 4043 BuildMI(BB, TII->get(PPC::STWCX)) 4044 .addReg(Tmp4Reg).addReg(PPC::R0).addReg(PtrReg); 4045 BuildMI(BB, TII->get(PPC::BCC)) 4046 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4047 BB->addSuccessor(loopMBB); 4048 BB->addSuccessor(exitMBB); 4049 4050 // exitMBB: 4051 // ... 4052 BB = exitMBB; 4053 BuildMI(BB, TII->get(PPC::SRW), dest).addReg(TmpDestReg).addReg(ShiftReg); 4054 return BB; 4055} 4056 4057MachineBasicBlock * 4058PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4059 MachineBasicBlock *BB) { 4060 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4061 4062 // To "insert" these instructions we actually have to insert their 4063 // control-flow patterns. 4064 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4065 MachineFunction::iterator It = BB; 4066 ++It; 4067 4068 MachineFunction *F = BB->getParent(); 4069 4070 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 4071 MI->getOpcode() == PPC::SELECT_CC_I8 || 4072 MI->getOpcode() == PPC::SELECT_CC_F4 || 4073 MI->getOpcode() == PPC::SELECT_CC_F8 || 4074 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 4075 4076 // The incoming instruction knows the destination vreg to set, the 4077 // condition code register to branch on, the true/false values to 4078 // select between, and a branch opcode to use. 4079 4080 // thisMBB: 4081 // ... 4082 // TrueVal = ... 4083 // cmpTY ccX, r1, r2 4084 // bCC copy1MBB 4085 // fallthrough --> copy0MBB 4086 MachineBasicBlock *thisMBB = BB; 4087 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4088 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4089 unsigned SelectPred = MI->getOperand(4).getImm(); 4090 BuildMI(BB, TII->get(PPC::BCC)) 4091 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 4092 F->insert(It, copy0MBB); 4093 F->insert(It, sinkMBB); 4094 // Update machine-CFG edges by transferring all successors of the current 4095 // block to the new block which will contain the Phi node for the select. 4096 sinkMBB->transferSuccessors(BB); 4097 // Next, add the true and fallthrough blocks as its successors. 4098 BB->addSuccessor(copy0MBB); 4099 BB->addSuccessor(sinkMBB); 4100 4101 // copy0MBB: 4102 // %FalseValue = ... 4103 // # fallthrough to sinkMBB 4104 BB = copy0MBB; 4105 4106 // Update machine-CFG edges 4107 BB->addSuccessor(sinkMBB); 4108 4109 // sinkMBB: 4110 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4111 // ... 4112 BB = sinkMBB; 4113 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 4114 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 4115 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4116 } 4117 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 4118 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 4119 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 4120 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 4121 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 4122 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 4123 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 4124 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 4125 4126 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 4127 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 4128 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 4129 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 4130 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 4131 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 4132 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 4133 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 4134 4135 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 4136 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 4137 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 4138 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 4139 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 4140 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 4141 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 4142 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 4143 4144 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 4145 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 4146 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 4147 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 4148 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 4149 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 4150 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 4151 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 4152 4153 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 4154 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 4155 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 4156 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 4157 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 4158 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 4159 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 4160 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 4161 4162 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 4163 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 4164 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 4165 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 4166 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 4167 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 4168 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 4169 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 4170 4171 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 4172 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 4173 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 4174 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 4175 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 4176 BB = EmitAtomicBinary(MI, BB, false, 0); 4177 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 4178 BB = EmitAtomicBinary(MI, BB, true, 0); 4179 4180 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 4181 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 4182 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 4183 4184 unsigned dest = MI->getOperand(0).getReg(); 4185 unsigned ptrA = MI->getOperand(1).getReg(); 4186 unsigned ptrB = MI->getOperand(2).getReg(); 4187 unsigned oldval = MI->getOperand(3).getReg(); 4188 unsigned newval = MI->getOperand(4).getReg(); 4189 4190 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4191 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4192 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4193 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4194 F->insert(It, loop1MBB); 4195 F->insert(It, loop2MBB); 4196 F->insert(It, midMBB); 4197 F->insert(It, exitMBB); 4198 exitMBB->transferSuccessors(BB); 4199 4200 // thisMBB: 4201 // ... 4202 // fallthrough --> loopMBB 4203 BB->addSuccessor(loop1MBB); 4204 4205 // loop1MBB: 4206 // l[wd]arx dest, ptr 4207 // cmp[wd] dest, oldval 4208 // bne- midMBB 4209 // loop2MBB: 4210 // st[wd]cx. newval, ptr 4211 // bne- loopMBB 4212 // b exitBB 4213 // midMBB: 4214 // st[wd]cx. dest, ptr 4215 // exitBB: 4216 BB = loop1MBB; 4217 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 4218 .addReg(ptrA).addReg(ptrB); 4219 BuildMI(BB, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 4220 .addReg(oldval).addReg(dest); 4221 BuildMI(BB, TII->get(PPC::BCC)) 4222 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 4223 BB->addSuccessor(loop2MBB); 4224 BB->addSuccessor(midMBB); 4225 4226 BB = loop2MBB; 4227 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4228 .addReg(newval).addReg(ptrA).addReg(ptrB); 4229 BuildMI(BB, TII->get(PPC::BCC)) 4230 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 4231 BuildMI(BB, TII->get(PPC::B)).addMBB(exitMBB); 4232 BB->addSuccessor(loop1MBB); 4233 BB->addSuccessor(exitMBB); 4234 4235 BB = midMBB; 4236 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4237 .addReg(dest).addReg(ptrA).addReg(ptrB); 4238 BB->addSuccessor(exitMBB); 4239 4240 // exitMBB: 4241 // ... 4242 BB = exitMBB; 4243 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 4244 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 4245 // We must use 64-bit registers for addresses when targeting 64-bit, 4246 // since we're actually doing arithmetic on them. Other registers 4247 // can be 32-bit. 4248 bool is64bit = PPCSubTarget.isPPC64(); 4249 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 4250 4251 unsigned dest = MI->getOperand(0).getReg(); 4252 unsigned ptrA = MI->getOperand(1).getReg(); 4253 unsigned ptrB = MI->getOperand(2).getReg(); 4254 unsigned oldval = MI->getOperand(3).getReg(); 4255 unsigned newval = MI->getOperand(4).getReg(); 4256 4257 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4258 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4259 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4260 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4261 F->insert(It, loop1MBB); 4262 F->insert(It, loop2MBB); 4263 F->insert(It, midMBB); 4264 F->insert(It, exitMBB); 4265 exitMBB->transferSuccessors(BB); 4266 4267 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4268 const TargetRegisterClass *RC = 4269 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4270 (const TargetRegisterClass *) &PPC::GPRCRegClass; 4271 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 4272 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 4273 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 4274 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 4275 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 4276 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 4277 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 4278 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 4279 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 4280 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 4281 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 4282 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 4283 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 4284 unsigned Ptr1Reg; 4285 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 4286 // thisMBB: 4287 // ... 4288 // fallthrough --> loopMBB 4289 BB->addSuccessor(loop1MBB); 4290 4291 // The 4-byte load must be aligned, while a char or short may be 4292 // anywhere in the word. Hence all this nasty bookkeeping code. 4293 // add ptr1, ptrA, ptrB [copy if ptrA==0] 4294 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 4295 // xori shift, shift1, 24 [16] 4296 // rlwinm ptr, ptr1, 0, 0, 29 4297 // slw newval2, newval, shift 4298 // slw oldval2, oldval,shift 4299 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 4300 // slw mask, mask2, shift 4301 // and newval3, newval2, mask 4302 // and oldval3, oldval2, mask 4303 // loop1MBB: 4304 // lwarx tmpDest, ptr 4305 // and tmp, tmpDest, mask 4306 // cmpw tmp, oldval3 4307 // bne- midMBB 4308 // loop2MBB: 4309 // andc tmp2, tmpDest, mask 4310 // or tmp4, tmp2, newval3 4311 // stwcx. tmp4, ptr 4312 // bne- loop1MBB 4313 // b exitBB 4314 // midMBB: 4315 // stwcx. tmpDest, ptr 4316 // exitBB: 4317 // srw dest, tmpDest, shift 4318 if (ptrA!=PPC::R0) { 4319 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4320 BuildMI(BB, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4321 .addReg(ptrA).addReg(ptrB); 4322 } else { 4323 Ptr1Reg = ptrB; 4324 } 4325 BuildMI(BB, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4326 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4327 BuildMI(BB, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4328 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4329 if (is64bit) 4330 BuildMI(BB, TII->get(PPC::RLDICR), PtrReg) 4331 .addReg(Ptr1Reg).addImm(0).addImm(61); 4332 else 4333 BuildMI(BB, TII->get(PPC::RLWINM), PtrReg) 4334 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4335 BuildMI(BB, TII->get(PPC::SLW), NewVal2Reg) 4336 .addReg(newval).addReg(ShiftReg); 4337 BuildMI(BB, TII->get(PPC::SLW), OldVal2Reg) 4338 .addReg(oldval).addReg(ShiftReg); 4339 if (is8bit) 4340 BuildMI(BB, TII->get(PPC::LI), Mask2Reg).addImm(255); 4341 else { 4342 BuildMI(BB, TII->get(PPC::LI), Mask3Reg).addImm(0); 4343 BuildMI(BB, TII->get(PPC::ORI), Mask2Reg).addReg(Mask3Reg).addImm(65535); 4344 } 4345 BuildMI(BB, TII->get(PPC::SLW), MaskReg) 4346 .addReg(Mask2Reg).addReg(ShiftReg); 4347 BuildMI(BB, TII->get(PPC::AND), NewVal3Reg) 4348 .addReg(NewVal2Reg).addReg(MaskReg); 4349 BuildMI(BB, TII->get(PPC::AND), OldVal3Reg) 4350 .addReg(OldVal2Reg).addReg(MaskReg); 4351 4352 BB = loop1MBB; 4353 BuildMI(BB, TII->get(PPC::LWARX), TmpDestReg) 4354 .addReg(PPC::R0).addReg(PtrReg); 4355 BuildMI(BB, TII->get(PPC::AND),TmpReg).addReg(TmpDestReg).addReg(MaskReg); 4356 BuildMI(BB, TII->get(PPC::CMPW), PPC::CR0) 4357 .addReg(TmpReg).addReg(OldVal3Reg); 4358 BuildMI(BB, TII->get(PPC::BCC)) 4359 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 4360 BB->addSuccessor(loop2MBB); 4361 BB->addSuccessor(midMBB); 4362 4363 BB = loop2MBB; 4364 BuildMI(BB, TII->get(PPC::ANDC),Tmp2Reg).addReg(TmpDestReg).addReg(MaskReg); 4365 BuildMI(BB, TII->get(PPC::OR),Tmp4Reg).addReg(Tmp2Reg).addReg(NewVal3Reg); 4366 BuildMI(BB, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 4367 .addReg(PPC::R0).addReg(PtrReg); 4368 BuildMI(BB, TII->get(PPC::BCC)) 4369 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 4370 BuildMI(BB, TII->get(PPC::B)).addMBB(exitMBB); 4371 BB->addSuccessor(loop1MBB); 4372 BB->addSuccessor(exitMBB); 4373 4374 BB = midMBB; 4375 BuildMI(BB, TII->get(PPC::STWCX)).addReg(TmpDestReg) 4376 .addReg(PPC::R0).addReg(PtrReg); 4377 BB->addSuccessor(exitMBB); 4378 4379 // exitMBB: 4380 // ... 4381 BB = exitMBB; 4382 BuildMI(BB, TII->get(PPC::SRW),dest).addReg(TmpReg).addReg(ShiftReg); 4383 } else { 4384 assert(0 && "Unexpected instr type to insert"); 4385 } 4386 4387 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 4388 return BB; 4389} 4390 4391//===----------------------------------------------------------------------===// 4392// Target Optimization Hooks 4393//===----------------------------------------------------------------------===// 4394 4395SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 4396 DAGCombinerInfo &DCI) const { 4397 TargetMachine &TM = getTargetMachine(); 4398 SelectionDAG &DAG = DCI.DAG; 4399 switch (N->getOpcode()) { 4400 default: break; 4401 case PPCISD::SHL: 4402 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 4403 if (C->getValue() == 0) // 0 << V -> 0. 4404 return N->getOperand(0); 4405 } 4406 break; 4407 case PPCISD::SRL: 4408 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 4409 if (C->getValue() == 0) // 0 >>u V -> 0. 4410 return N->getOperand(0); 4411 } 4412 break; 4413 case PPCISD::SRA: 4414 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 4415 if (C->getValue() == 0 || // 0 >>s V -> 0. 4416 C->isAllOnesValue()) // -1 >>s V -> -1. 4417 return N->getOperand(0); 4418 } 4419 break; 4420 4421 case ISD::SINT_TO_FP: 4422 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 4423 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 4424 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 4425 // We allow the src/dst to be either f32/f64, but the intermediate 4426 // type must be i64. 4427 if (N->getOperand(0).getValueType() == MVT::i64 && 4428 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 4429 SDValue Val = N->getOperand(0).getOperand(0); 4430 if (Val.getValueType() == MVT::f32) { 4431 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 4432 DCI.AddToWorklist(Val.getNode()); 4433 } 4434 4435 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val); 4436 DCI.AddToWorklist(Val.getNode()); 4437 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val); 4438 DCI.AddToWorklist(Val.getNode()); 4439 if (N->getValueType(0) == MVT::f32) { 4440 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val, 4441 DAG.getIntPtrConstant(0)); 4442 DCI.AddToWorklist(Val.getNode()); 4443 } 4444 return Val; 4445 } else if (N->getOperand(0).getValueType() == MVT::i32) { 4446 // If the intermediate type is i32, we can avoid the load/store here 4447 // too. 4448 } 4449 } 4450 } 4451 break; 4452 case ISD::STORE: 4453 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 4454 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 4455 !cast<StoreSDNode>(N)->isTruncatingStore() && 4456 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 4457 N->getOperand(1).getValueType() == MVT::i32 && 4458 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 4459 SDValue Val = N->getOperand(1).getOperand(0); 4460 if (Val.getValueType() == MVT::f32) { 4461 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); 4462 DCI.AddToWorklist(Val.getNode()); 4463 } 4464 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val); 4465 DCI.AddToWorklist(Val.getNode()); 4466 4467 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val, 4468 N->getOperand(2), N->getOperand(3)); 4469 DCI.AddToWorklist(Val.getNode()); 4470 return Val; 4471 } 4472 4473 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 4474 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 4475 N->getOperand(1).getNode()->hasOneUse() && 4476 (N->getOperand(1).getValueType() == MVT::i32 || 4477 N->getOperand(1).getValueType() == MVT::i16)) { 4478 SDValue BSwapOp = N->getOperand(1).getOperand(0); 4479 // Do an any-extend to 32-bits if this is a half-word input. 4480 if (BSwapOp.getValueType() == MVT::i16) 4481 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); 4482 4483 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp, 4484 N->getOperand(2), N->getOperand(3), 4485 DAG.getValueType(N->getOperand(1).getValueType())); 4486 } 4487 break; 4488 case ISD::BSWAP: 4489 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 4490 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 4491 N->getOperand(0).hasOneUse() && 4492 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 4493 SDValue Load = N->getOperand(0); 4494 LoadSDNode *LD = cast<LoadSDNode>(Load); 4495 // Create the byte-swapping load. 4496 std::vector<MVT> VTs; 4497 VTs.push_back(MVT::i32); 4498 VTs.push_back(MVT::Other); 4499 SDValue MO = DAG.getMemOperand(LD->getMemOperand()); 4500 SDValue Ops[] = { 4501 LD->getChain(), // Chain 4502 LD->getBasePtr(), // Ptr 4503 MO, // MemOperand 4504 DAG.getValueType(N->getValueType(0)) // VT 4505 }; 4506 SDValue BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); 4507 4508 // If this is an i16 load, insert the truncate. 4509 SDValue ResVal = BSLoad; 4510 if (N->getValueType(0) == MVT::i16) 4511 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); 4512 4513 // First, combine the bswap away. This makes the value produced by the 4514 // load dead. 4515 DCI.CombineTo(N, ResVal); 4516 4517 // Next, combine the load away, we give it a bogus result value but a real 4518 // chain result. The result value is dead because the bswap is dead. 4519 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 4520 4521 // Return N so it doesn't get rechecked! 4522 return SDValue(N, 0); 4523 } 4524 4525 break; 4526 case PPCISD::VCMP: { 4527 // If a VCMPo node already exists with exactly the same operands as this 4528 // node, use its result instead of this node (VCMPo computes both a CR6 and 4529 // a normal output). 4530 // 4531 if (!N->getOperand(0).hasOneUse() && 4532 !N->getOperand(1).hasOneUse() && 4533 !N->getOperand(2).hasOneUse()) { 4534 4535 // Scan all of the users of the LHS, looking for VCMPo's that match. 4536 SDNode *VCMPoNode = 0; 4537 4538 SDNode *LHSN = N->getOperand(0).getNode(); 4539 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 4540 UI != E; ++UI) 4541 if (UI->getOpcode() == PPCISD::VCMPo && 4542 UI->getOperand(1) == N->getOperand(1) && 4543 UI->getOperand(2) == N->getOperand(2) && 4544 UI->getOperand(0) == N->getOperand(0)) { 4545 VCMPoNode = *UI; 4546 break; 4547 } 4548 4549 // If there is no VCMPo node, or if the flag value has a single use, don't 4550 // transform this. 4551 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 4552 break; 4553 4554 // Look at the (necessarily single) use of the flag value. If it has a 4555 // chain, this transformation is more complex. Note that multiple things 4556 // could use the value result, which we should ignore. 4557 SDNode *FlagUser = 0; 4558 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 4559 FlagUser == 0; ++UI) { 4560 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 4561 SDNode *User = *UI; 4562 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 4563 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 4564 FlagUser = User; 4565 break; 4566 } 4567 } 4568 } 4569 4570 // If the user is a MFCR instruction, we know this is safe. Otherwise we 4571 // give up for right now. 4572 if (FlagUser->getOpcode() == PPCISD::MFCR) 4573 return SDValue(VCMPoNode, 0); 4574 } 4575 break; 4576 } 4577 case ISD::BR_CC: { 4578 // If this is a branch on an altivec predicate comparison, lower this so 4579 // that we don't have to do a MFCR: instead, branch directly on CR6. This 4580 // lowering is done pre-legalize, because the legalizer lowers the predicate 4581 // compare down to code that is difficult to reassemble. 4582 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 4583 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 4584 int CompareOpc; 4585 bool isDot; 4586 4587 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 4588 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 4589 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 4590 assert(isDot && "Can't compare against a vector result!"); 4591 4592 // If this is a comparison against something other than 0/1, then we know 4593 // that the condition is never/always true. 4594 unsigned Val = cast<ConstantSDNode>(RHS)->getValue(); 4595 if (Val != 0 && Val != 1) { 4596 if (CC == ISD::SETEQ) // Cond never true, remove branch. 4597 return N->getOperand(0); 4598 // Always !=, turn it into an unconditional branch. 4599 return DAG.getNode(ISD::BR, MVT::Other, 4600 N->getOperand(0), N->getOperand(4)); 4601 } 4602 4603 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 4604 4605 // Create the PPCISD altivec 'dot' comparison node. 4606 std::vector<MVT> VTs; 4607 SDValue Ops[] = { 4608 LHS.getOperand(2), // LHS of compare 4609 LHS.getOperand(3), // RHS of compare 4610 DAG.getConstant(CompareOpc, MVT::i32) 4611 }; 4612 VTs.push_back(LHS.getOperand(2).getValueType()); 4613 VTs.push_back(MVT::Flag); 4614 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); 4615 4616 // Unpack the result based on how the target uses it. 4617 PPC::Predicate CompOpc; 4618 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) { 4619 default: // Can't happen, don't crash on invalid number though. 4620 case 0: // Branch on the value of the EQ bit of CR6. 4621 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 4622 break; 4623 case 1: // Branch on the inverted value of the EQ bit of CR6. 4624 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 4625 break; 4626 case 2: // Branch on the value of the LT bit of CR6. 4627 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 4628 break; 4629 case 3: // Branch on the inverted value of the LT bit of CR6. 4630 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 4631 break; 4632 } 4633 4634 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0), 4635 DAG.getConstant(CompOpc, MVT::i32), 4636 DAG.getRegister(PPC::CR6, MVT::i32), 4637 N->getOperand(4), CompNode.getValue(1)); 4638 } 4639 break; 4640 } 4641 } 4642 4643 return SDValue(); 4644} 4645 4646//===----------------------------------------------------------------------===// 4647// Inline Assembly Support 4648//===----------------------------------------------------------------------===// 4649 4650void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 4651 const APInt &Mask, 4652 APInt &KnownZero, 4653 APInt &KnownOne, 4654 const SelectionDAG &DAG, 4655 unsigned Depth) const { 4656 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 4657 switch (Op.getOpcode()) { 4658 default: break; 4659 case PPCISD::LBRX: { 4660 // lhbrx is known to have the top bits cleared out. 4661 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 4662 KnownZero = 0xFFFF0000; 4663 break; 4664 } 4665 case ISD::INTRINSIC_WO_CHAIN: { 4666 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) { 4667 default: break; 4668 case Intrinsic::ppc_altivec_vcmpbfp_p: 4669 case Intrinsic::ppc_altivec_vcmpeqfp_p: 4670 case Intrinsic::ppc_altivec_vcmpequb_p: 4671 case Intrinsic::ppc_altivec_vcmpequh_p: 4672 case Intrinsic::ppc_altivec_vcmpequw_p: 4673 case Intrinsic::ppc_altivec_vcmpgefp_p: 4674 case Intrinsic::ppc_altivec_vcmpgtfp_p: 4675 case Intrinsic::ppc_altivec_vcmpgtsb_p: 4676 case Intrinsic::ppc_altivec_vcmpgtsh_p: 4677 case Intrinsic::ppc_altivec_vcmpgtsw_p: 4678 case Intrinsic::ppc_altivec_vcmpgtub_p: 4679 case Intrinsic::ppc_altivec_vcmpgtuh_p: 4680 case Intrinsic::ppc_altivec_vcmpgtuw_p: 4681 KnownZero = ~1U; // All bits but the low one are known to be zero. 4682 break; 4683 } 4684 } 4685 } 4686} 4687 4688 4689/// getConstraintType - Given a constraint, return the type of 4690/// constraint it is for this target. 4691PPCTargetLowering::ConstraintType 4692PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 4693 if (Constraint.size() == 1) { 4694 switch (Constraint[0]) { 4695 default: break; 4696 case 'b': 4697 case 'r': 4698 case 'f': 4699 case 'v': 4700 case 'y': 4701 return C_RegisterClass; 4702 } 4703 } 4704 return TargetLowering::getConstraintType(Constraint); 4705} 4706 4707std::pair<unsigned, const TargetRegisterClass*> 4708PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 4709 MVT VT) const { 4710 if (Constraint.size() == 1) { 4711 // GCC RS6000 Constraint Letters 4712 switch (Constraint[0]) { 4713 case 'b': // R1-R31 4714 case 'r': // R0-R31 4715 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 4716 return std::make_pair(0U, PPC::G8RCRegisterClass); 4717 return std::make_pair(0U, PPC::GPRCRegisterClass); 4718 case 'f': 4719 if (VT == MVT::f32) 4720 return std::make_pair(0U, PPC::F4RCRegisterClass); 4721 else if (VT == MVT::f64) 4722 return std::make_pair(0U, PPC::F8RCRegisterClass); 4723 break; 4724 case 'v': 4725 return std::make_pair(0U, PPC::VRRCRegisterClass); 4726 case 'y': // crrc 4727 return std::make_pair(0U, PPC::CRRCRegisterClass); 4728 } 4729 } 4730 4731 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4732} 4733 4734 4735/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 4736/// vector. If it is invalid, don't add anything to Ops. 4737void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter, 4738 std::vector<SDValue>&Ops, 4739 SelectionDAG &DAG) const { 4740 SDValue Result(0,0); 4741 switch (Letter) { 4742 default: break; 4743 case 'I': 4744 case 'J': 4745 case 'K': 4746 case 'L': 4747 case 'M': 4748 case 'N': 4749 case 'O': 4750 case 'P': { 4751 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 4752 if (!CST) return; // Must be an immediate to match. 4753 unsigned Value = CST->getValue(); 4754 switch (Letter) { 4755 default: assert(0 && "Unknown constraint letter!"); 4756 case 'I': // "I" is a signed 16-bit constant. 4757 if ((short)Value == (int)Value) 4758 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4759 break; 4760 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 4761 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 4762 if ((short)Value == 0) 4763 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4764 break; 4765 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 4766 if ((Value >> 16) == 0) 4767 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4768 break; 4769 case 'M': // "M" is a constant that is greater than 31. 4770 if (Value > 31) 4771 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4772 break; 4773 case 'N': // "N" is a positive constant that is an exact power of two. 4774 if ((int)Value > 0 && isPowerOf2_32(Value)) 4775 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4776 break; 4777 case 'O': // "O" is the constant zero. 4778 if (Value == 0) 4779 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4780 break; 4781 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 4782 if ((short)-Value == (int)-Value) 4783 Result = DAG.getTargetConstant(Value, Op.getValueType()); 4784 break; 4785 } 4786 break; 4787 } 4788 } 4789 4790 if (Result.getNode()) { 4791 Ops.push_back(Result); 4792 return; 4793 } 4794 4795 // Handle standard constraint letters. 4796 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG); 4797} 4798 4799// isLegalAddressingMode - Return true if the addressing mode represented 4800// by AM is legal for this target, for a load/store of the specified type. 4801bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 4802 const Type *Ty) const { 4803 // FIXME: PPC does not allow r+i addressing modes for vectors! 4804 4805 // PPC allows a sign-extended 16-bit immediate field. 4806 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 4807 return false; 4808 4809 // No global is ever allowed as a base. 4810 if (AM.BaseGV) 4811 return false; 4812 4813 // PPC only support r+r, 4814 switch (AM.Scale) { 4815 case 0: // "r+i" or just "i", depending on HasBaseReg. 4816 break; 4817 case 1: 4818 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 4819 return false; 4820 // Otherwise we have r+r or r+i. 4821 break; 4822 case 2: 4823 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 4824 return false; 4825 // Allow 2*r as r+r. 4826 break; 4827 default: 4828 // No other scales are supported. 4829 return false; 4830 } 4831 4832 return true; 4833} 4834 4835/// isLegalAddressImmediate - Return true if the integer value can be used 4836/// as the offset of the target addressing mode for load / store of the 4837/// given type. 4838bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{ 4839 // PPC allows a sign-extended 16-bit immediate field. 4840 return (V > -(1 << 16) && V < (1 << 16)-1); 4841} 4842 4843bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 4844 return false; 4845} 4846 4847SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) { 4848 // Depths > 0 not supported yet! 4849 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4850 return SDValue(); 4851 4852 MachineFunction &MF = DAG.getMachineFunction(); 4853 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4854 4855 // Just load the return address off the stack. 4856 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 4857 4858 // Make sure the function really does not optimize away the store of the RA 4859 // to the stack. 4860 FuncInfo->setLRStoreRequired(); 4861 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); 4862} 4863 4864SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { 4865 // Depths > 0 not supported yet! 4866 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) 4867 return SDValue(); 4868 4869 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4870 bool isPPC64 = PtrVT == MVT::i64; 4871 4872 MachineFunction &MF = DAG.getMachineFunction(); 4873 MachineFrameInfo *MFI = MF.getFrameInfo(); 4874 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 4875 && MFI->getStackSize(); 4876 4877 if (isPPC64) 4878 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1, 4879 MVT::i64); 4880 else 4881 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1, 4882 MVT::i32); 4883} 4884