PPCISelLowering.cpp revision 9b35a09e7e5b1aa26588e3852fe00a42b4f383ba
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPredicates.h" 17#include "PPCTargetMachine.h" 18#include "PPCPerfectShuffle.h" 19#include "llvm/ADT/STLExtras.h" 20#include "llvm/ADT/VectorExtras.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/PseudoSourceValue.h" 27#include "llvm/CodeGen/SelectionDAG.h" 28#include "llvm/CallingConv.h" 29#include "llvm/Constants.h" 30#include "llvm/Function.h" 31#include "llvm/Intrinsics.h" 32#include "llvm/Support/MathExtras.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Target/TargetLoweringObjectFile.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/raw_ostream.h" 38#include "llvm/DerivedTypes.h" 39using namespace llvm; 40 41static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 42 CCValAssign::LocInfo &LocInfo, 43 ISD::ArgFlagsTy &ArgFlags, 44 CCState &State); 45static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT, 46 EVT &LocVT, 47 CCValAssign::LocInfo &LocInfo, 48 ISD::ArgFlagsTy &ArgFlags, 49 CCState &State); 50static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT, 51 EVT &LocVT, 52 CCValAssign::LocInfo &LocInfo, 53 ISD::ArgFlagsTy &ArgFlags, 54 CCState &State); 55 56static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 57cl::desc("enable preincrement load/store generation on PPC (experimental)"), 58 cl::Hidden); 59 60static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 61 if (TM.getSubtargetImpl()->isDarwin()) 62 return new TargetLoweringObjectFileMachO(); 63 return new TargetLoweringObjectFileELF(); 64} 65 66 67PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 68 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 69 70 setPow2DivIsCheap(); 71 72 // Use _setjmp/_longjmp instead of setjmp/longjmp. 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 78 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 79 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 80 81 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 82 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 83 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 84 85 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 86 87 // PowerPC has pre-inc load and store's. 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 98 99 // This is used in the ppcf128->int sequence. Note it has different semantics 100 // from FP_ROUND: that rounds to nearest, this rounds to zero. 101 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 102 103 // PowerPC has no SREM/UREM instructions 104 setOperationAction(ISD::SREM, MVT::i32, Expand); 105 setOperationAction(ISD::UREM, MVT::i32, Expand); 106 setOperationAction(ISD::SREM, MVT::i64, Expand); 107 setOperationAction(ISD::UREM, MVT::i64, Expand); 108 109 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 110 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 111 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 112 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 113 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 114 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 115 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 116 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 117 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 118 119 // We don't support sin/cos/sqrt/fmod/pow 120 setOperationAction(ISD::FSIN , MVT::f64, Expand); 121 setOperationAction(ISD::FCOS , MVT::f64, Expand); 122 setOperationAction(ISD::FREM , MVT::f64, Expand); 123 setOperationAction(ISD::FPOW , MVT::f64, Expand); 124 setOperationAction(ISD::FSIN , MVT::f32, Expand); 125 setOperationAction(ISD::FCOS , MVT::f32, Expand); 126 setOperationAction(ISD::FREM , MVT::f32, Expand); 127 setOperationAction(ISD::FPOW , MVT::f32, Expand); 128 129 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 130 131 // If we're enabling GP optimizations, use hardware square root 132 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 133 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 134 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 135 } 136 137 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 138 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 139 140 // PowerPC does not have BSWAP, CTPOP or CTTZ 141 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 142 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 143 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 144 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 145 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 146 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 147 148 // PowerPC does not have ROTR 149 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 150 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 151 152 // PowerPC does not have Select 153 setOperationAction(ISD::SELECT, MVT::i32, Expand); 154 setOperationAction(ISD::SELECT, MVT::i64, Expand); 155 setOperationAction(ISD::SELECT, MVT::f32, Expand); 156 setOperationAction(ISD::SELECT, MVT::f64, Expand); 157 158 // PowerPC wants to turn select_cc of FP into fsel when possible. 159 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 160 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 161 162 // PowerPC wants to optimize integer setcc a bit 163 setOperationAction(ISD::SETCC, MVT::i32, Custom); 164 165 // PowerPC does not have BRCOND which requires SetCC 166 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 167 168 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 169 170 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 171 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 172 173 // PowerPC does not have [U|S]INT_TO_FP 174 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 175 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 176 177 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 178 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 179 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 180 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 181 182 // We cannot sextinreg(i1). Expand to shifts. 183 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 184 185 // Support label based line numbers. 186 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand); 187 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 188 189 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 190 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 191 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 192 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 193 194 195 // We want to legalize GlobalAddress and ConstantPool nodes into the 196 // appropriate instructions to materialize the address. 197 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 198 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 199 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 200 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 201 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 202 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 203 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 204 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 205 206 // TRAP is legal. 207 setOperationAction(ISD::TRAP, MVT::Other, Legal); 208 209 // TRAMPOLINE is custom lowered. 210 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 211 212 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 213 setOperationAction(ISD::VASTART , MVT::Other, Custom); 214 215 // VAARG is custom lowered with the 32-bit SVR4 ABI. 216 if ( TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 217 && !TM.getSubtarget<PPCSubtarget>().isPPC64()) 218 setOperationAction(ISD::VAARG, MVT::Other, Custom); 219 else 220 setOperationAction(ISD::VAARG, MVT::Other, Expand); 221 222 // Use the default implementation. 223 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 224 setOperationAction(ISD::VAEND , MVT::Other, Expand); 225 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 226 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 227 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 228 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 229 230 // We want to custom lower some of our intrinsics. 231 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 232 233 // Comparisons that require checking two conditions. 234 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 235 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 236 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 237 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 238 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 239 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 240 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 241 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 242 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 243 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 244 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 245 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 246 247 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 248 // They also have instructions for converting between i64 and fp. 249 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 250 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 251 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 252 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 253 // This is just the low 32 bits of a (signed) fp->i64 conversion. 254 // We cannot do this with Promote because i64 is not a legal type. 255 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 256 257 // FIXME: disable this lowered code. This generates 64-bit register values, 258 // and we don't model the fact that the top part is clobbered by calls. We 259 // need to flag these together so that the value isn't live across a call. 260 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 261 } else { 262 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 263 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 264 } 265 266 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 267 // 64-bit PowerPC implementations can support i64 types directly 268 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 269 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 270 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 271 // 64-bit PowerPC wants to expand i128 shifts itself. 272 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 273 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 274 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 275 } else { 276 // 32-bit PowerPC wants to expand i64 shifts itself. 277 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 278 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 279 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 280 } 281 282 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 283 // First set operation action for all vector types to expand. Then we 284 // will selectively turn on ones that can be effectively codegen'd. 285 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 286 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 287 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 288 289 // add/sub are legal for all supported vector VT's. 290 setOperationAction(ISD::ADD , VT, Legal); 291 setOperationAction(ISD::SUB , VT, Legal); 292 293 // We promote all shuffles to v16i8. 294 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 295 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 296 297 // We promote all non-typed operations to v4i32. 298 setOperationAction(ISD::AND , VT, Promote); 299 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 300 setOperationAction(ISD::OR , VT, Promote); 301 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 302 setOperationAction(ISD::XOR , VT, Promote); 303 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 304 setOperationAction(ISD::LOAD , VT, Promote); 305 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 306 setOperationAction(ISD::SELECT, VT, Promote); 307 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 308 setOperationAction(ISD::STORE, VT, Promote); 309 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 310 311 // No other operations are legal. 312 setOperationAction(ISD::MUL , VT, Expand); 313 setOperationAction(ISD::SDIV, VT, Expand); 314 setOperationAction(ISD::SREM, VT, Expand); 315 setOperationAction(ISD::UDIV, VT, Expand); 316 setOperationAction(ISD::UREM, VT, Expand); 317 setOperationAction(ISD::FDIV, VT, Expand); 318 setOperationAction(ISD::FNEG, VT, Expand); 319 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 320 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 321 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 322 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 323 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 324 setOperationAction(ISD::UDIVREM, VT, Expand); 325 setOperationAction(ISD::SDIVREM, VT, Expand); 326 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 327 setOperationAction(ISD::FPOW, VT, Expand); 328 setOperationAction(ISD::CTPOP, VT, Expand); 329 setOperationAction(ISD::CTLZ, VT, Expand); 330 setOperationAction(ISD::CTTZ, VT, Expand); 331 } 332 333 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 334 // with merges, splats, etc. 335 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 336 337 setOperationAction(ISD::AND , MVT::v4i32, Legal); 338 setOperationAction(ISD::OR , MVT::v4i32, Legal); 339 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 340 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 341 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 342 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 343 344 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 345 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 346 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 347 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 348 349 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 350 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 351 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 352 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 353 354 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 355 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 356 357 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 358 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 359 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 360 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 361 } 362 363 setShiftAmountType(MVT::i32); 364 setBooleanContents(ZeroOrOneBooleanContent); 365 366 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 367 setStackPointerRegisterToSaveRestore(PPC::X1); 368 setExceptionPointerRegister(PPC::X3); 369 setExceptionSelectorRegister(PPC::X4); 370 } else { 371 setStackPointerRegisterToSaveRestore(PPC::R1); 372 setExceptionPointerRegister(PPC::R3); 373 setExceptionSelectorRegister(PPC::R4); 374 } 375 376 // We have target-specific dag combine patterns for the following nodes: 377 setTargetDAGCombine(ISD::SINT_TO_FP); 378 setTargetDAGCombine(ISD::STORE); 379 setTargetDAGCombine(ISD::BR_CC); 380 setTargetDAGCombine(ISD::BSWAP); 381 382 // Darwin long double math library functions have $LDBL128 appended. 383 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) { 384 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 385 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 386 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 387 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 388 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 389 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 390 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 391 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 392 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 393 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 394 } 395 396 computeRegisterProperties(); 397} 398 399/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 400/// function arguments in the caller parameter area. 401unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const { 402 TargetMachine &TM = getTargetMachine(); 403 // Darwin passes everything on 4 byte boundary. 404 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 405 return 4; 406 // FIXME SVR4 TBD 407 return 4; 408} 409 410const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 411 switch (Opcode) { 412 default: return 0; 413 case PPCISD::FSEL: return "PPCISD::FSEL"; 414 case PPCISD::FCFID: return "PPCISD::FCFID"; 415 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 416 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 417 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 418 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 419 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 420 case PPCISD::VPERM: return "PPCISD::VPERM"; 421 case PPCISD::Hi: return "PPCISD::Hi"; 422 case PPCISD::Lo: return "PPCISD::Lo"; 423 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 424 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 425 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 426 case PPCISD::SRL: return "PPCISD::SRL"; 427 case PPCISD::SRA: return "PPCISD::SRA"; 428 case PPCISD::SHL: return "PPCISD::SHL"; 429 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 430 case PPCISD::STD_32: return "PPCISD::STD_32"; 431 case PPCISD::CALL_SVR4: return "PPCISD::CALL_SVR4"; 432 case PPCISD::CALL_Darwin: return "PPCISD::CALL_Darwin"; 433 case PPCISD::NOP: return "PPCISD::NOP"; 434 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 435 case PPCISD::BCTRL_Darwin: return "PPCISD::BCTRL_Darwin"; 436 case PPCISD::BCTRL_SVR4: return "PPCISD::BCTRL_SVR4"; 437 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 438 case PPCISD::MFCR: return "PPCISD::MFCR"; 439 case PPCISD::VCMP: return "PPCISD::VCMP"; 440 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 441 case PPCISD::LBRX: return "PPCISD::LBRX"; 442 case PPCISD::STBRX: return "PPCISD::STBRX"; 443 case PPCISD::LARX: return "PPCISD::LARX"; 444 case PPCISD::STCX: return "PPCISD::STCX"; 445 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 446 case PPCISD::MFFS: return "PPCISD::MFFS"; 447 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 448 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 449 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 450 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 451 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 452 } 453} 454 455MVT::SimpleValueType PPCTargetLowering::getSetCCResultType(EVT VT) const { 456 return MVT::i32; 457} 458 459/// getFunctionAlignment - Return the Log2 alignment of this function. 460unsigned PPCTargetLowering::getFunctionAlignment(const Function *F) const { 461 if (getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) 462 return F->hasFnAttr(Attribute::OptimizeForSize) ? 2 : 4; 463 else 464 return 2; 465} 466 467/// getPreferredLSDADataFormat - Return the preferred exception handling data 468/// format for the LSDA. 469unsigned PPCTargetLowering::getPreferredLSDADataFormat() const { 470 if (getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) 471 return dwarf::DW_EH_PE_pcrel; 472 473 if (PPCSubTarget.isPPC64() || 474 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 475 unsigned DataTy = 476 (PPCSubTarget.isPPC64() ? 477 dwarf::DW_EH_PE_udata8 : dwarf::DW_EH_PE_udata4); 478 return dwarf::DW_EH_PE_pcrel | DataTy; 479 } 480 481 return dwarf::DW_EH_PE_absptr; 482} 483 484/// getPreferredFDEDataFormat - Return the preferred exception handling data 485/// format for the FDE. 486unsigned PPCTargetLowering::getPreferredFDEDataFormat() const { 487 if (getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) 488 return dwarf::DW_EH_PE_pcrel; 489 490 if (PPCSubTarget.isPPC64() || 491 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 492 unsigned DataTy = 493 (PPCSubTarget.isPPC64() ? 494 dwarf::DW_EH_PE_udata8 : dwarf::DW_EH_PE_udata4); 495 return dwarf::DW_EH_PE_pcrel | DataTy; 496 } 497 498 return dwarf::DW_EH_PE_absptr; 499} 500 501//===----------------------------------------------------------------------===// 502// Node matching predicates, for use by the tblgen matching code. 503//===----------------------------------------------------------------------===// 504 505/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 506static bool isFloatingPointZero(SDValue Op) { 507 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 508 return CFP->getValueAPF().isZero(); 509 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 510 // Maybe this has already been legalized into the constant pool? 511 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 512 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 513 return CFP->getValueAPF().isZero(); 514 } 515 return false; 516} 517 518/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 519/// true if Op is undef or if it matches the specified value. 520static bool isConstantOrUndef(int Op, int Val) { 521 return Op < 0 || Op == Val; 522} 523 524/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 525/// VPKUHUM instruction. 526bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 527 if (!isUnary) { 528 for (unsigned i = 0; i != 16; ++i) 529 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 530 return false; 531 } else { 532 for (unsigned i = 0; i != 8; ++i) 533 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 534 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 535 return false; 536 } 537 return true; 538} 539 540/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 541/// VPKUWUM instruction. 542bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 543 if (!isUnary) { 544 for (unsigned i = 0; i != 16; i += 2) 545 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 546 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 547 return false; 548 } else { 549 for (unsigned i = 0; i != 8; i += 2) 550 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 551 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 552 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 553 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 554 return false; 555 } 556 return true; 557} 558 559/// isVMerge - Common function, used to match vmrg* shuffles. 560/// 561static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 562 unsigned LHSStart, unsigned RHSStart) { 563 assert(N->getValueType(0) == MVT::v16i8 && 564 "PPC only supports shuffles by bytes!"); 565 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 566 "Unsupported merge size!"); 567 568 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 569 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 570 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 571 LHSStart+j+i*UnitSize) || 572 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 573 RHSStart+j+i*UnitSize)) 574 return false; 575 } 576 return true; 577} 578 579/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 580/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 581bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 582 bool isUnary) { 583 if (!isUnary) 584 return isVMerge(N, UnitSize, 8, 24); 585 return isVMerge(N, UnitSize, 8, 8); 586} 587 588/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 589/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 590bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 591 bool isUnary) { 592 if (!isUnary) 593 return isVMerge(N, UnitSize, 0, 16); 594 return isVMerge(N, UnitSize, 0, 0); 595} 596 597 598/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 599/// amount, otherwise return -1. 600int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 601 assert(N->getValueType(0) == MVT::v16i8 && 602 "PPC only supports shuffles by bytes!"); 603 604 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 605 606 // Find the first non-undef value in the shuffle mask. 607 unsigned i; 608 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 609 /*search*/; 610 611 if (i == 16) return -1; // all undef. 612 613 // Otherwise, check to see if the rest of the elements are consecutively 614 // numbered from this value. 615 unsigned ShiftAmt = SVOp->getMaskElt(i); 616 if (ShiftAmt < i) return -1; 617 ShiftAmt -= i; 618 619 if (!isUnary) { 620 // Check the rest of the elements to see if they are consecutive. 621 for (++i; i != 16; ++i) 622 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 623 return -1; 624 } else { 625 // Check the rest of the elements to see if they are consecutive. 626 for (++i; i != 16; ++i) 627 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 628 return -1; 629 } 630 return ShiftAmt; 631} 632 633/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 634/// specifies a splat of a single element that is suitable for input to 635/// VSPLTB/VSPLTH/VSPLTW. 636bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 637 assert(N->getValueType(0) == MVT::v16i8 && 638 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 639 640 // This is a splat operation if each element of the permute is the same, and 641 // if the value doesn't reference the second vector. 642 unsigned ElementBase = N->getMaskElt(0); 643 644 // FIXME: Handle UNDEF elements too! 645 if (ElementBase >= 16) 646 return false; 647 648 // Check that the indices are consecutive, in the case of a multi-byte element 649 // splatted with a v16i8 mask. 650 for (unsigned i = 1; i != EltSize; ++i) 651 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 652 return false; 653 654 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 655 if (N->getMaskElt(i) < 0) continue; 656 for (unsigned j = 0; j != EltSize; ++j) 657 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 658 return false; 659 } 660 return true; 661} 662 663/// isAllNegativeZeroVector - Returns true if all elements of build_vector 664/// are -0.0. 665bool PPC::isAllNegativeZeroVector(SDNode *N) { 666 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 667 668 APInt APVal, APUndef; 669 unsigned BitSize; 670 bool HasAnyUndefs; 671 672 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32)) 673 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 674 return CFP->getValueAPF().isNegZero(); 675 676 return false; 677} 678 679/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 680/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 681unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 682 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 683 assert(isSplatShuffleMask(SVOp, EltSize)); 684 return SVOp->getMaskElt(0) / EltSize; 685} 686 687/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 688/// by using a vspltis[bhw] instruction of the specified element size, return 689/// the constant being splatted. The ByteSize field indicates the number of 690/// bytes of each element [124] -> [bhw]. 691SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 692 SDValue OpVal(0, 0); 693 694 // If ByteSize of the splat is bigger than the element size of the 695 // build_vector, then we have a case where we are checking for a splat where 696 // multiple elements of the buildvector are folded together into a single 697 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 698 unsigned EltSize = 16/N->getNumOperands(); 699 if (EltSize < ByteSize) { 700 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 701 SDValue UniquedVals[4]; 702 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 703 704 // See if all of the elements in the buildvector agree across. 705 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 706 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 707 // If the element isn't a constant, bail fully out. 708 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 709 710 711 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 712 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 713 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 714 return SDValue(); // no match. 715 } 716 717 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 718 // either constant or undef values that are identical for each chunk. See 719 // if these chunks can form into a larger vspltis*. 720 721 // Check to see if all of the leading entries are either 0 or -1. If 722 // neither, then this won't fit into the immediate field. 723 bool LeadingZero = true; 724 bool LeadingOnes = true; 725 for (unsigned i = 0; i != Multiple-1; ++i) { 726 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 727 728 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 729 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 730 } 731 // Finally, check the least significant entry. 732 if (LeadingZero) { 733 if (UniquedVals[Multiple-1].getNode() == 0) 734 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 735 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 736 if (Val < 16) 737 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 738 } 739 if (LeadingOnes) { 740 if (UniquedVals[Multiple-1].getNode() == 0) 741 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 742 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 743 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 744 return DAG.getTargetConstant(Val, MVT::i32); 745 } 746 747 return SDValue(); 748 } 749 750 // Check to see if this buildvec has a single non-undef value in its elements. 751 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 752 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 753 if (OpVal.getNode() == 0) 754 OpVal = N->getOperand(i); 755 else if (OpVal != N->getOperand(i)) 756 return SDValue(); 757 } 758 759 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 760 761 unsigned ValSizeInBytes = EltSize; 762 uint64_t Value = 0; 763 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 764 Value = CN->getZExtValue(); 765 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 766 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 767 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 768 } 769 770 // If the splat value is larger than the element value, then we can never do 771 // this splat. The only case that we could fit the replicated bits into our 772 // immediate field for would be zero, and we prefer to use vxor for it. 773 if (ValSizeInBytes < ByteSize) return SDValue(); 774 775 // If the element value is larger than the splat value, cut it in half and 776 // check to see if the two halves are equal. Continue doing this until we 777 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 778 while (ValSizeInBytes > ByteSize) { 779 ValSizeInBytes >>= 1; 780 781 // If the top half equals the bottom half, we're still ok. 782 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 783 (Value & ((1 << (8*ValSizeInBytes))-1))) 784 return SDValue(); 785 } 786 787 // Properly sign extend the value. 788 int ShAmt = (4-ByteSize)*8; 789 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 790 791 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 792 if (MaskVal == 0) return SDValue(); 793 794 // Finally, if this value fits in a 5 bit sext field, return it 795 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 796 return DAG.getTargetConstant(MaskVal, MVT::i32); 797 return SDValue(); 798} 799 800//===----------------------------------------------------------------------===// 801// Addressing Mode Selection 802//===----------------------------------------------------------------------===// 803 804/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 805/// or 64-bit immediate, and if the value can be accurately represented as a 806/// sign extension from a 16-bit value. If so, this returns true and the 807/// immediate. 808static bool isIntS16Immediate(SDNode *N, short &Imm) { 809 if (N->getOpcode() != ISD::Constant) 810 return false; 811 812 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 813 if (N->getValueType(0) == MVT::i32) 814 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 815 else 816 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 817} 818static bool isIntS16Immediate(SDValue Op, short &Imm) { 819 return isIntS16Immediate(Op.getNode(), Imm); 820} 821 822 823/// SelectAddressRegReg - Given the specified addressed, check to see if it 824/// can be represented as an indexed [r+r] operation. Returns false if it 825/// can be more efficiently represented with [r+imm]. 826bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 827 SDValue &Index, 828 SelectionDAG &DAG) const { 829 short imm = 0; 830 if (N.getOpcode() == ISD::ADD) { 831 if (isIntS16Immediate(N.getOperand(1), imm)) 832 return false; // r+i 833 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 834 return false; // r+i 835 836 Base = N.getOperand(0); 837 Index = N.getOperand(1); 838 return true; 839 } else if (N.getOpcode() == ISD::OR) { 840 if (isIntS16Immediate(N.getOperand(1), imm)) 841 return false; // r+i can fold it if we can. 842 843 // If this is an or of disjoint bitfields, we can codegen this as an add 844 // (for better address arithmetic) if the LHS and RHS of the OR are provably 845 // disjoint. 846 APInt LHSKnownZero, LHSKnownOne; 847 APInt RHSKnownZero, RHSKnownOne; 848 DAG.ComputeMaskedBits(N.getOperand(0), 849 APInt::getAllOnesValue(N.getOperand(0) 850 .getValueSizeInBits()), 851 LHSKnownZero, LHSKnownOne); 852 853 if (LHSKnownZero.getBoolValue()) { 854 DAG.ComputeMaskedBits(N.getOperand(1), 855 APInt::getAllOnesValue(N.getOperand(1) 856 .getValueSizeInBits()), 857 RHSKnownZero, RHSKnownOne); 858 // If all of the bits are known zero on the LHS or RHS, the add won't 859 // carry. 860 if (~(LHSKnownZero | RHSKnownZero) == 0) { 861 Base = N.getOperand(0); 862 Index = N.getOperand(1); 863 return true; 864 } 865 } 866 } 867 868 return false; 869} 870 871/// Returns true if the address N can be represented by a base register plus 872/// a signed 16-bit displacement [r+imm], and if it is not better 873/// represented as reg+reg. 874bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 875 SDValue &Base, 876 SelectionDAG &DAG) const { 877 // FIXME dl should come from parent load or store, not from address 878 DebugLoc dl = N.getDebugLoc(); 879 // If this can be more profitably realized as r+r, fail. 880 if (SelectAddressRegReg(N, Disp, Base, DAG)) 881 return false; 882 883 if (N.getOpcode() == ISD::ADD) { 884 short imm = 0; 885 if (isIntS16Immediate(N.getOperand(1), imm)) { 886 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 887 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 888 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 889 } else { 890 Base = N.getOperand(0); 891 } 892 return true; // [r+i] 893 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 894 // Match LOAD (ADD (X, Lo(G))). 895 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 896 && "Cannot handle constant offsets yet!"); 897 Disp = N.getOperand(1).getOperand(0); // The global address. 898 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 899 Disp.getOpcode() == ISD::TargetConstantPool || 900 Disp.getOpcode() == ISD::TargetJumpTable); 901 Base = N.getOperand(0); 902 return true; // [&g+r] 903 } 904 } else if (N.getOpcode() == ISD::OR) { 905 short imm = 0; 906 if (isIntS16Immediate(N.getOperand(1), imm)) { 907 // If this is an or of disjoint bitfields, we can codegen this as an add 908 // (for better address arithmetic) if the LHS and RHS of the OR are 909 // provably disjoint. 910 APInt LHSKnownZero, LHSKnownOne; 911 DAG.ComputeMaskedBits(N.getOperand(0), 912 APInt::getAllOnesValue(N.getOperand(0) 913 .getValueSizeInBits()), 914 LHSKnownZero, LHSKnownOne); 915 916 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 917 // If all of the bits are known zero on the LHS or RHS, the add won't 918 // carry. 919 Base = N.getOperand(0); 920 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 921 return true; 922 } 923 } 924 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 925 // Loading from a constant address. 926 927 // If this address fits entirely in a 16-bit sext immediate field, codegen 928 // this as "d, 0" 929 short Imm; 930 if (isIntS16Immediate(CN, Imm)) { 931 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 932 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 933 return true; 934 } 935 936 // Handle 32-bit sext immediates with LIS + addr mode. 937 if (CN->getValueType(0) == MVT::i32 || 938 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 939 int Addr = (int)CN->getZExtValue(); 940 941 // Otherwise, break this down into an LIS + disp. 942 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 943 944 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 945 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 946 Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base), 0); 947 return true; 948 } 949 } 950 951 Disp = DAG.getTargetConstant(0, getPointerTy()); 952 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 953 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 954 else 955 Base = N; 956 return true; // [r+0] 957} 958 959/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 960/// represented as an indexed [r+r] operation. 961bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 962 SDValue &Index, 963 SelectionDAG &DAG) const { 964 // Check to see if we can easily represent this as an [r+r] address. This 965 // will fail if it thinks that the address is more profitably represented as 966 // reg+imm, e.g. where imm = 0. 967 if (SelectAddressRegReg(N, Base, Index, DAG)) 968 return true; 969 970 // If the operand is an addition, always emit this as [r+r], since this is 971 // better (for code size, and execution, as the memop does the add for free) 972 // than emitting an explicit add. 973 if (N.getOpcode() == ISD::ADD) { 974 Base = N.getOperand(0); 975 Index = N.getOperand(1); 976 return true; 977 } 978 979 // Otherwise, do it the hard way, using R0 as the base register. 980 Base = DAG.getRegister(PPC::R0, N.getValueType()); 981 Index = N; 982 return true; 983} 984 985/// SelectAddressRegImmShift - Returns true if the address N can be 986/// represented by a base register plus a signed 14-bit displacement 987/// [r+imm*4]. Suitable for use by STD and friends. 988bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, 989 SDValue &Base, 990 SelectionDAG &DAG) const { 991 // FIXME dl should come from the parent load or store, not the address 992 DebugLoc dl = N.getDebugLoc(); 993 // If this can be more profitably realized as r+r, fail. 994 if (SelectAddressRegReg(N, Disp, Base, DAG)) 995 return false; 996 997 if (N.getOpcode() == ISD::ADD) { 998 short imm = 0; 999 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 1000 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 1001 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1002 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1003 } else { 1004 Base = N.getOperand(0); 1005 } 1006 return true; // [r+i] 1007 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1008 // Match LOAD (ADD (X, Lo(G))). 1009 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1010 && "Cannot handle constant offsets yet!"); 1011 Disp = N.getOperand(1).getOperand(0); // The global address. 1012 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1013 Disp.getOpcode() == ISD::TargetConstantPool || 1014 Disp.getOpcode() == ISD::TargetJumpTable); 1015 Base = N.getOperand(0); 1016 return true; // [&g+r] 1017 } 1018 } else if (N.getOpcode() == ISD::OR) { 1019 short imm = 0; 1020 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 1021 // If this is an or of disjoint bitfields, we can codegen this as an add 1022 // (for better address arithmetic) if the LHS and RHS of the OR are 1023 // provably disjoint. 1024 APInt LHSKnownZero, LHSKnownOne; 1025 DAG.ComputeMaskedBits(N.getOperand(0), 1026 APInt::getAllOnesValue(N.getOperand(0) 1027 .getValueSizeInBits()), 1028 LHSKnownZero, LHSKnownOne); 1029 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1030 // If all of the bits are known zero on the LHS or RHS, the add won't 1031 // carry. 1032 Base = N.getOperand(0); 1033 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 1034 return true; 1035 } 1036 } 1037 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1038 // Loading from a constant address. Verify low two bits are clear. 1039 if ((CN->getZExtValue() & 3) == 0) { 1040 // If this address fits entirely in a 14-bit sext immediate field, codegen 1041 // this as "d, 0" 1042 short Imm; 1043 if (isIntS16Immediate(CN, Imm)) { 1044 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 1045 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 1046 return true; 1047 } 1048 1049 // Fold the low-part of 32-bit absolute addresses into addr mode. 1050 if (CN->getValueType(0) == MVT::i32 || 1051 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 1052 int Addr = (int)CN->getZExtValue(); 1053 1054 // Otherwise, break this down into an LIS + disp. 1055 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 1056 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 1057 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1058 Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base),0); 1059 return true; 1060 } 1061 } 1062 } 1063 1064 Disp = DAG.getTargetConstant(0, getPointerTy()); 1065 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 1066 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1067 else 1068 Base = N; 1069 return true; // [r+0] 1070} 1071 1072 1073/// getPreIndexedAddressParts - returns true by value, base pointer and 1074/// offset pointer and addressing mode by reference if the node's address 1075/// can be legally represented as pre-indexed load / store address. 1076bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1077 SDValue &Offset, 1078 ISD::MemIndexedMode &AM, 1079 SelectionDAG &DAG) const { 1080 // Disabled by default for now. 1081 if (!EnablePPCPreinc) return false; 1082 1083 SDValue Ptr; 1084 EVT VT; 1085 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1086 Ptr = LD->getBasePtr(); 1087 VT = LD->getMemoryVT(); 1088 1089 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1090 ST = ST; 1091 Ptr = ST->getBasePtr(); 1092 VT = ST->getMemoryVT(); 1093 } else 1094 return false; 1095 1096 // PowerPC doesn't have preinc load/store instructions for vectors. 1097 if (VT.isVector()) 1098 return false; 1099 1100 // TODO: Check reg+reg first. 1101 1102 // LDU/STU use reg+imm*4, others use reg+imm. 1103 if (VT != MVT::i64) { 1104 // reg + imm 1105 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1106 return false; 1107 } else { 1108 // reg + imm * 4. 1109 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1110 return false; 1111 } 1112 1113 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1114 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1115 // sext i32 to i64 when addr mode is r+i. 1116 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1117 LD->getExtensionType() == ISD::SEXTLOAD && 1118 isa<ConstantSDNode>(Offset)) 1119 return false; 1120 } 1121 1122 AM = ISD::PRE_INC; 1123 return true; 1124} 1125 1126//===----------------------------------------------------------------------===// 1127// LowerOperation implementation 1128//===----------------------------------------------------------------------===// 1129 1130SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1131 SelectionDAG &DAG) { 1132 EVT PtrVT = Op.getValueType(); 1133 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1134 Constant *C = CP->getConstVal(); 1135 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 1136 SDValue Zero = DAG.getConstant(0, PtrVT); 1137 // FIXME there isn't really any debug info here 1138 DebugLoc dl = Op.getDebugLoc(); 1139 1140 const TargetMachine &TM = DAG.getTarget(); 1141 1142 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, CPI, Zero); 1143 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, CPI, Zero); 1144 1145 // If this is a non-darwin platform, we don't support non-static relo models 1146 // yet. 1147 if (TM.getRelocationModel() == Reloc::Static || 1148 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1149 // Generate non-pic code that has direct accesses to the constant pool. 1150 // The address of the global is just (hi(&g)+lo(&g)). 1151 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1152 } 1153 1154 if (TM.getRelocationModel() == Reloc::PIC_) { 1155 // With PIC, the first instruction is actually "GR+hi(&G)". 1156 Hi = DAG.getNode(ISD::ADD, dl, PtrVT, 1157 DAG.getNode(PPCISD::GlobalBaseReg, 1158 DebugLoc::getUnknownLoc(), PtrVT), Hi); 1159 } 1160 1161 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1162 return Lo; 1163} 1164 1165SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) { 1166 EVT PtrVT = Op.getValueType(); 1167 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1168 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1169 SDValue Zero = DAG.getConstant(0, PtrVT); 1170 // FIXME there isn't really any debug loc here 1171 DebugLoc dl = Op.getDebugLoc(); 1172 1173 const TargetMachine &TM = DAG.getTarget(); 1174 1175 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, JTI, Zero); 1176 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, JTI, Zero); 1177 1178 // If this is a non-darwin platform, we don't support non-static relo models 1179 // yet. 1180 if (TM.getRelocationModel() == Reloc::Static || 1181 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1182 // Generate non-pic code that has direct accesses to the constant pool. 1183 // The address of the global is just (hi(&g)+lo(&g)). 1184 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1185 } 1186 1187 if (TM.getRelocationModel() == Reloc::PIC_) { 1188 // With PIC, the first instruction is actually "GR+hi(&G)". 1189 Hi = DAG.getNode(ISD::ADD, dl, PtrVT, 1190 DAG.getNode(PPCISD::GlobalBaseReg, 1191 DebugLoc::getUnknownLoc(), PtrVT), Hi); 1192 } 1193 1194 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1195 return Lo; 1196} 1197 1198SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1199 SelectionDAG &DAG) { 1200 llvm_unreachable("TLS not implemented for PPC."); 1201 return SDValue(); // Not reached 1202} 1203 1204SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1205 SelectionDAG &DAG) { 1206 EVT PtrVT = Op.getValueType(); 1207 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1208 GlobalValue *GV = GSDN->getGlobal(); 1209 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1210 SDValue Zero = DAG.getConstant(0, PtrVT); 1211 // FIXME there isn't really any debug info here 1212 DebugLoc dl = GSDN->getDebugLoc(); 1213 1214 const TargetMachine &TM = DAG.getTarget(); 1215 1216 // 64-bit SVR4 ABI code is always position-independent. 1217 // The actual address of the GlobalValue is stored in the TOC. 1218 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1219 return DAG.getNode(PPCISD::TOC_ENTRY, dl, MVT::i64, GA, 1220 DAG.getRegister(PPC::X2, MVT::i64)); 1221 } 1222 1223 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, GA, Zero); 1224 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, GA, Zero); 1225 1226 // If this is a non-darwin platform, we don't support non-static relo models 1227 // yet. 1228 if (TM.getRelocationModel() == Reloc::Static || 1229 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1230 // Generate non-pic code that has direct accesses to globals. 1231 // The address of the global is just (hi(&g)+lo(&g)). 1232 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1233 } 1234 1235 if (TM.getRelocationModel() == Reloc::PIC_) { 1236 // With PIC, the first instruction is actually "GR+hi(&G)". 1237 Hi = DAG.getNode(ISD::ADD, dl, PtrVT, 1238 DAG.getNode(PPCISD::GlobalBaseReg, 1239 DebugLoc::getUnknownLoc(), PtrVT), Hi); 1240 } 1241 1242 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1243 1244 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) 1245 return Lo; 1246 1247 // If the global is weak or external, we have to go through the lazy 1248 // resolution stub. 1249 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Lo, NULL, 0); 1250} 1251 1252SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) { 1253 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1254 DebugLoc dl = Op.getDebugLoc(); 1255 1256 // If we're comparing for equality to zero, expose the fact that this is 1257 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1258 // fold the new nodes. 1259 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1260 if (C->isNullValue() && CC == ISD::SETEQ) { 1261 EVT VT = Op.getOperand(0).getValueType(); 1262 SDValue Zext = Op.getOperand(0); 1263 if (VT.bitsLT(MVT::i32)) { 1264 VT = MVT::i32; 1265 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1266 } 1267 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1268 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1269 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1270 DAG.getConstant(Log2b, MVT::i32)); 1271 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1272 } 1273 // Leave comparisons against 0 and -1 alone for now, since they're usually 1274 // optimized. FIXME: revisit this when we can custom lower all setcc 1275 // optimizations. 1276 if (C->isAllOnesValue() || C->isNullValue()) 1277 return SDValue(); 1278 } 1279 1280 // If we have an integer seteq/setne, turn it into a compare against zero 1281 // by xor'ing the rhs with the lhs, which is faster than setting a 1282 // condition register, reading it back out, and masking the correct bit. The 1283 // normal approach here uses sub to do this instead of xor. Using xor exposes 1284 // the result to other bit-twiddling opportunities. 1285 EVT LHSVT = Op.getOperand(0).getValueType(); 1286 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1287 EVT VT = Op.getValueType(); 1288 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1289 Op.getOperand(1)); 1290 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1291 } 1292 return SDValue(); 1293} 1294 1295SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1296 int VarArgsFrameIndex, 1297 int VarArgsStackOffset, 1298 unsigned VarArgsNumGPR, 1299 unsigned VarArgsNumFPR, 1300 const PPCSubtarget &Subtarget) { 1301 1302 llvm_unreachable("VAARG not yet implemented for the SVR4 ABI!"); 1303 return SDValue(); // Not reached 1304} 1305 1306SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) { 1307 SDValue Chain = Op.getOperand(0); 1308 SDValue Trmp = Op.getOperand(1); // trampoline 1309 SDValue FPtr = Op.getOperand(2); // nested function 1310 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1311 DebugLoc dl = Op.getDebugLoc(); 1312 1313 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1314 bool isPPC64 = (PtrVT == MVT::i64); 1315 const Type *IntPtrTy = 1316 DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType( 1317 *DAG.getContext()); 1318 1319 TargetLowering::ArgListTy Args; 1320 TargetLowering::ArgListEntry Entry; 1321 1322 Entry.Ty = IntPtrTy; 1323 Entry.Node = Trmp; Args.push_back(Entry); 1324 1325 // TrampSize == (isPPC64 ? 48 : 40); 1326 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1327 isPPC64 ? MVT::i64 : MVT::i32); 1328 Args.push_back(Entry); 1329 1330 Entry.Node = FPtr; Args.push_back(Entry); 1331 Entry.Node = Nest; Args.push_back(Entry); 1332 1333 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1334 std::pair<SDValue, SDValue> CallResult = 1335 LowerCallTo(Chain, Op.getValueType().getTypeForEVT(*DAG.getContext()), 1336 false, false, false, false, 0, CallingConv::C, false, 1337 /*isReturnValueUsed=*/true, 1338 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1339 Args, DAG, dl); 1340 1341 SDValue Ops[] = 1342 { CallResult.first, CallResult.second }; 1343 1344 return DAG.getMergeValues(Ops, 2, dl); 1345} 1346 1347SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1348 int VarArgsFrameIndex, 1349 int VarArgsStackOffset, 1350 unsigned VarArgsNumGPR, 1351 unsigned VarArgsNumFPR, 1352 const PPCSubtarget &Subtarget) { 1353 DebugLoc dl = Op.getDebugLoc(); 1354 1355 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1356 // vastart just stores the address of the VarArgsFrameIndex slot into the 1357 // memory location argument. 1358 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1359 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1360 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1361 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0); 1362 } 1363 1364 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1365 // We suppose the given va_list is already allocated. 1366 // 1367 // typedef struct { 1368 // char gpr; /* index into the array of 8 GPRs 1369 // * stored in the register save area 1370 // * gpr=0 corresponds to r3, 1371 // * gpr=1 to r4, etc. 1372 // */ 1373 // char fpr; /* index into the array of 8 FPRs 1374 // * stored in the register save area 1375 // * fpr=0 corresponds to f1, 1376 // * fpr=1 to f2, etc. 1377 // */ 1378 // char *overflow_arg_area; 1379 // /* location on stack that holds 1380 // * the next overflow argument 1381 // */ 1382 // char *reg_save_area; 1383 // /* where r3:r10 and f1:f8 (if saved) 1384 // * are stored 1385 // */ 1386 // } va_list[1]; 1387 1388 1389 SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i32); 1390 SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i32); 1391 1392 1393 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1394 1395 SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); 1396 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1397 1398 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1399 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1400 1401 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1402 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1403 1404 uint64_t FPROffset = 1; 1405 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1406 1407 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1408 1409 // Store first byte : number of int regs 1410 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1411 Op.getOperand(1), SV, 0, MVT::i8); 1412 uint64_t nextOffset = FPROffset; 1413 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1414 ConstFPROffset); 1415 1416 // Store second byte : number of float regs 1417 SDValue secondStore = 1418 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset, MVT::i8); 1419 nextOffset += StackOffset; 1420 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1421 1422 // Store second word : arguments given on stack 1423 SDValue thirdStore = 1424 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, SV, nextOffset); 1425 nextOffset += FrameOffset; 1426 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1427 1428 // Store third word : arguments given in registers 1429 return DAG.getStore(thirdStore, dl, FR, nextPtr, SV, nextOffset); 1430 1431} 1432 1433#include "PPCGenCallingConv.inc" 1434 1435static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 1436 CCValAssign::LocInfo &LocInfo, 1437 ISD::ArgFlagsTy &ArgFlags, 1438 CCState &State) { 1439 return true; 1440} 1441 1442static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT, 1443 EVT &LocVT, 1444 CCValAssign::LocInfo &LocInfo, 1445 ISD::ArgFlagsTy &ArgFlags, 1446 CCState &State) { 1447 static const unsigned ArgRegs[] = { 1448 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1449 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1450 }; 1451 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1452 1453 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1454 1455 // Skip one register if the first unallocated register has an even register 1456 // number and there are still argument registers available which have not been 1457 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1458 // need to skip a register if RegNum is odd. 1459 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1460 State.AllocateReg(ArgRegs[RegNum]); 1461 } 1462 1463 // Always return false here, as this function only makes sure that the first 1464 // unallocated register has an odd register number and does not actually 1465 // allocate a register for the current argument. 1466 return false; 1467} 1468 1469static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT, 1470 EVT &LocVT, 1471 CCValAssign::LocInfo &LocInfo, 1472 ISD::ArgFlagsTy &ArgFlags, 1473 CCState &State) { 1474 static const unsigned ArgRegs[] = { 1475 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1476 PPC::F8 1477 }; 1478 1479 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1480 1481 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1482 1483 // If there is only one Floating-point register left we need to put both f64 1484 // values of a split ppc_fp128 value on the stack. 1485 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1486 State.AllocateReg(ArgRegs[RegNum]); 1487 } 1488 1489 // Always return false here, as this function only makes sure that the two f64 1490 // values a ppc_fp128 value is split into are both passed in registers or both 1491 // passed on the stack and does not actually allocate a register for the 1492 // current argument. 1493 return false; 1494} 1495 1496/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1497/// on Darwin. 1498static const unsigned *GetFPR() { 1499 static const unsigned FPR[] = { 1500 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1501 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1502 }; 1503 1504 return FPR; 1505} 1506 1507/// CalculateStackSlotSize - Calculates the size reserved for this argument on 1508/// the stack. 1509static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1510 unsigned PtrByteSize) { 1511 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1512 if (Flags.isByVal()) 1513 ArgSize = Flags.getByValSize(); 1514 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1515 1516 return ArgSize; 1517} 1518 1519SDValue 1520PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1521 unsigned CallConv, bool isVarArg, 1522 const SmallVectorImpl<ISD::InputArg> 1523 &Ins, 1524 DebugLoc dl, SelectionDAG &DAG, 1525 SmallVectorImpl<SDValue> &InVals) { 1526 if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) { 1527 return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins, 1528 dl, DAG, InVals); 1529 } else { 1530 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1531 dl, DAG, InVals); 1532 } 1533} 1534 1535SDValue 1536PPCTargetLowering::LowerFormalArguments_SVR4( 1537 SDValue Chain, 1538 unsigned CallConv, bool isVarArg, 1539 const SmallVectorImpl<ISD::InputArg> 1540 &Ins, 1541 DebugLoc dl, SelectionDAG &DAG, 1542 SmallVectorImpl<SDValue> &InVals) { 1543 1544 // 32-bit SVR4 ABI Stack Frame Layout: 1545 // +-----------------------------------+ 1546 // +--> | Back chain | 1547 // | +-----------------------------------+ 1548 // | | Floating-point register save area | 1549 // | +-----------------------------------+ 1550 // | | General register save area | 1551 // | +-----------------------------------+ 1552 // | | CR save word | 1553 // | +-----------------------------------+ 1554 // | | VRSAVE save word | 1555 // | +-----------------------------------+ 1556 // | | Alignment padding | 1557 // | +-----------------------------------+ 1558 // | | Vector register save area | 1559 // | +-----------------------------------+ 1560 // | | Local variable space | 1561 // | +-----------------------------------+ 1562 // | | Parameter list area | 1563 // | +-----------------------------------+ 1564 // | | LR save word | 1565 // | +-----------------------------------+ 1566 // SP--> +--- | Back chain | 1567 // +-----------------------------------+ 1568 // 1569 // Specifications: 1570 // System V Application Binary Interface PowerPC Processor Supplement 1571 // AltiVec Technology Programming Interface Manual 1572 1573 MachineFunction &MF = DAG.getMachineFunction(); 1574 MachineFrameInfo *MFI = MF.getFrameInfo(); 1575 1576 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1577 // Potential tail calls could cause overwriting of argument stack slots. 1578 bool isImmutable = !(PerformTailCallOpt && (CallConv==CallingConv::Fast)); 1579 unsigned PtrByteSize = 4; 1580 1581 // Assign locations to all of the incoming arguments. 1582 SmallVector<CCValAssign, 16> ArgLocs; 1583 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1584 *DAG.getContext()); 1585 1586 // Reserve space for the linkage area on the stack. 1587 CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); 1588 1589 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4); 1590 1591 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1592 CCValAssign &VA = ArgLocs[i]; 1593 1594 // Arguments stored in registers. 1595 if (VA.isRegLoc()) { 1596 TargetRegisterClass *RC; 1597 EVT ValVT = VA.getValVT(); 1598 1599 switch (ValVT.getSimpleVT().SimpleTy) { 1600 default: 1601 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1602 case MVT::i32: 1603 RC = PPC::GPRCRegisterClass; 1604 break; 1605 case MVT::f32: 1606 RC = PPC::F4RCRegisterClass; 1607 break; 1608 case MVT::f64: 1609 RC = PPC::F8RCRegisterClass; 1610 break; 1611 case MVT::v16i8: 1612 case MVT::v8i16: 1613 case MVT::v4i32: 1614 case MVT::v4f32: 1615 RC = PPC::VRRCRegisterClass; 1616 break; 1617 } 1618 1619 // Transform the arguments stored in physical registers into virtual ones. 1620 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1621 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 1622 1623 InVals.push_back(ArgValue); 1624 } else { 1625 // Argument stored in memory. 1626 assert(VA.isMemLoc()); 1627 1628 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 1629 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 1630 isImmutable); 1631 1632 // Create load nodes to retrieve arguments from the stack. 1633 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1634 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0)); 1635 } 1636 } 1637 1638 // Assign locations to all of the incoming aggregate by value arguments. 1639 // Aggregates passed by value are stored in the local variable space of the 1640 // caller's stack frame, right above the parameter list area. 1641 SmallVector<CCValAssign, 16> ByValArgLocs; 1642 CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), 1643 ByValArgLocs, *DAG.getContext()); 1644 1645 // Reserve stack space for the allocations in CCInfo. 1646 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 1647 1648 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal); 1649 1650 // Area that is at least reserved in the caller of this function. 1651 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 1652 1653 // Set the size that is at least reserved in caller of this function. Tail 1654 // call optimized function's reserved stack space needs to be aligned so that 1655 // taking the difference between two stack areas will result in an aligned 1656 // stack. 1657 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1658 1659 MinReservedArea = 1660 std::max(MinReservedArea, 1661 PPCFrameInfo::getMinCallFrameSize(false, false)); 1662 1663 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 1664 getStackAlignment(); 1665 unsigned AlignMask = TargetAlign-1; 1666 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 1667 1668 FI->setMinReservedArea(MinReservedArea); 1669 1670 SmallVector<SDValue, 8> MemOps; 1671 1672 // If the function takes variable number of arguments, make a frame index for 1673 // the start of the first vararg value... for expansion of llvm.va_start. 1674 if (isVarArg) { 1675 static const unsigned GPArgRegs[] = { 1676 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1677 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1678 }; 1679 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 1680 1681 static const unsigned FPArgRegs[] = { 1682 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1683 PPC::F8 1684 }; 1685 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 1686 1687 VarArgsNumGPR = CCInfo.getFirstUnallocated(GPArgRegs, NumGPArgRegs); 1688 VarArgsNumFPR = CCInfo.getFirstUnallocated(FPArgRegs, NumFPArgRegs); 1689 1690 // Make room for NumGPArgRegs and NumFPArgRegs. 1691 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 1692 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 1693 1694 VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 1695 CCInfo.getNextStackOffset()); 1696 1697 VarArgsFrameIndex = MFI->CreateStackObject(Depth, 8); 1698 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 1699 1700 // The fixed integer arguments of a variadic function are 1701 // stored to the VarArgsFrameIndex on the stack. 1702 unsigned GPRIndex = 0; 1703 for (; GPRIndex != VarArgsNumGPR; ++GPRIndex) { 1704 SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT); 1705 SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0); 1706 MemOps.push_back(Store); 1707 // Increment the address by four for the next argument to store 1708 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1709 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1710 } 1711 1712 // If this function is vararg, store any remaining integer argument regs 1713 // to their spots on the stack so that they may be loaded by deferencing the 1714 // result of va_next. 1715 for (; GPRIndex != NumGPArgRegs; ++GPRIndex) { 1716 unsigned VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 1717 1718 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1719 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 1720 MemOps.push_back(Store); 1721 // Increment the address by four for the next argument to store 1722 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1723 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1724 } 1725 1726 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 1727 // is set. 1728 1729 // The double arguments are stored to the VarArgsFrameIndex 1730 // on the stack. 1731 unsigned FPRIndex = 0; 1732 for (FPRIndex = 0; FPRIndex != VarArgsNumFPR; ++FPRIndex) { 1733 SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64); 1734 SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0); 1735 MemOps.push_back(Store); 1736 // Increment the address by eight for the next argument to store 1737 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 1738 PtrVT); 1739 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1740 } 1741 1742 for (; FPRIndex != NumFPArgRegs; ++FPRIndex) { 1743 unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 1744 1745 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 1746 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 1747 MemOps.push_back(Store); 1748 // Increment the address by eight for the next argument to store 1749 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 1750 PtrVT); 1751 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1752 } 1753 } 1754 1755 if (!MemOps.empty()) 1756 Chain = DAG.getNode(ISD::TokenFactor, dl, 1757 MVT::Other, &MemOps[0], MemOps.size()); 1758 1759 return Chain; 1760} 1761 1762SDValue 1763PPCTargetLowering::LowerFormalArguments_Darwin( 1764 SDValue Chain, 1765 unsigned CallConv, bool isVarArg, 1766 const SmallVectorImpl<ISD::InputArg> 1767 &Ins, 1768 DebugLoc dl, SelectionDAG &DAG, 1769 SmallVectorImpl<SDValue> &InVals) { 1770 // TODO: add description of PPC stack frame format, or at least some docs. 1771 // 1772 MachineFunction &MF = DAG.getMachineFunction(); 1773 MachineFrameInfo *MFI = MF.getFrameInfo(); 1774 1775 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1776 bool isPPC64 = PtrVT == MVT::i64; 1777 // Potential tail calls could cause overwriting of argument stack slots. 1778 bool isImmutable = !(PerformTailCallOpt && (CallConv==CallingConv::Fast)); 1779 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1780 1781 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true); 1782 // Area that is at least reserved in caller of this function. 1783 unsigned MinReservedArea = ArgOffset; 1784 1785 static const unsigned GPR_32[] = { // 32-bit registers. 1786 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1787 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1788 }; 1789 static const unsigned GPR_64[] = { // 64-bit registers. 1790 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1791 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1792 }; 1793 1794 static const unsigned *FPR = GetFPR(); 1795 1796 static const unsigned VR[] = { 1797 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1798 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1799 }; 1800 1801 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 1802 const unsigned Num_FPR_Regs = 13; 1803 const unsigned Num_VR_Regs = array_lengthof( VR); 1804 1805 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1806 1807 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1808 1809 // In 32-bit non-varargs functions, the stack space for vectors is after the 1810 // stack space for non-vectors. We do not use this space unless we have 1811 // too many vectors to fit in registers, something that only occurs in 1812 // constructed examples:), but we have to walk the arglist to figure 1813 // that out...for the pathological case, compute VecArgOffset as the 1814 // start of the vector parameter area. Computing VecArgOffset is the 1815 // entire point of the following loop. 1816 unsigned VecArgOffset = ArgOffset; 1817 if (!isVarArg && !isPPC64) { 1818 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 1819 ++ArgNo) { 1820 EVT ObjectVT = Ins[ArgNo].VT; 1821 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1822 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 1823 1824 if (Flags.isByVal()) { 1825 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 1826 ObjSize = Flags.getByValSize(); 1827 unsigned ArgSize = 1828 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1829 VecArgOffset += ArgSize; 1830 continue; 1831 } 1832 1833 switch(ObjectVT.getSimpleVT().SimpleTy) { 1834 default: llvm_unreachable("Unhandled argument type!"); 1835 case MVT::i32: 1836 case MVT::f32: 1837 VecArgOffset += isPPC64 ? 8 : 4; 1838 break; 1839 case MVT::i64: // PPC64 1840 case MVT::f64: 1841 VecArgOffset += 8; 1842 break; 1843 case MVT::v4f32: 1844 case MVT::v4i32: 1845 case MVT::v8i16: 1846 case MVT::v16i8: 1847 // Nothing to do, we're only looking at Nonvector args here. 1848 break; 1849 } 1850 } 1851 } 1852 // We've found where the vector parameter area in memory is. Skip the 1853 // first 12 parameters; these don't use that memory. 1854 VecArgOffset = ((VecArgOffset+15)/16)*16; 1855 VecArgOffset += 12*16; 1856 1857 // Add DAG nodes to load the arguments or copy them out of registers. On 1858 // entry to a function on PPC, the arguments start after the linkage area, 1859 // although the first ones are often in registers. 1860 1861 SmallVector<SDValue, 8> MemOps; 1862 unsigned nAltivecParamsAtEnd = 0; 1863 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 1864 SDValue ArgVal; 1865 bool needsLoad = false; 1866 EVT ObjectVT = Ins[ArgNo].VT; 1867 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1868 unsigned ArgSize = ObjSize; 1869 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 1870 1871 unsigned CurArgOffset = ArgOffset; 1872 1873 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 1874 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 1875 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 1876 if (isVarArg || isPPC64) { 1877 MinReservedArea = ((MinReservedArea+15)/16)*16; 1878 MinReservedArea += CalculateStackSlotSize(ObjectVT, 1879 Flags, 1880 PtrByteSize); 1881 } else nAltivecParamsAtEnd++; 1882 } else 1883 // Calculate min reserved area. 1884 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 1885 Flags, 1886 PtrByteSize); 1887 1888 // FIXME the codegen can be much improved in some cases. 1889 // We do not have to keep everything in memory. 1890 if (Flags.isByVal()) { 1891 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 1892 ObjSize = Flags.getByValSize(); 1893 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1894 // Objects of size 1 and 2 are right justified, everything else is 1895 // left justified. This means the memory address is adjusted forwards. 1896 if (ObjSize==1 || ObjSize==2) { 1897 CurArgOffset = CurArgOffset + (4 - ObjSize); 1898 } 1899 // The value of the object is its address. 1900 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); 1901 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1902 InVals.push_back(FIN); 1903 if (ObjSize==1 || ObjSize==2) { 1904 if (GPR_idx != Num_GPR_Regs) { 1905 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1906 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1907 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 1908 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 ); 1909 MemOps.push_back(Store); 1910 ++GPR_idx; 1911 } 1912 1913 ArgOffset += PtrByteSize; 1914 1915 continue; 1916 } 1917 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 1918 // Store whatever pieces of the object are in registers 1919 // to memory. ArgVal will be address of the beginning of 1920 // the object. 1921 if (GPR_idx != Num_GPR_Regs) { 1922 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1923 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset); 1924 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1925 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1926 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 1927 MemOps.push_back(Store); 1928 ++GPR_idx; 1929 ArgOffset += PtrByteSize; 1930 } else { 1931 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 1932 break; 1933 } 1934 } 1935 continue; 1936 } 1937 1938 switch (ObjectVT.getSimpleVT().SimpleTy) { 1939 default: llvm_unreachable("Unhandled argument type!"); 1940 case MVT::i32: 1941 if (!isPPC64) { 1942 if (GPR_idx != Num_GPR_Regs) { 1943 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1944 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1945 ++GPR_idx; 1946 } else { 1947 needsLoad = true; 1948 ArgSize = PtrByteSize; 1949 } 1950 // All int arguments reserve stack space in the Darwin ABI. 1951 ArgOffset += PtrByteSize; 1952 break; 1953 } 1954 // FALLTHROUGH 1955 case MVT::i64: // PPC64 1956 if (GPR_idx != Num_GPR_Regs) { 1957 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 1958 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1959 1960 if (ObjectVT == MVT::i32) { 1961 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 1962 // value to MVT::i64 and then truncate to the correct register size. 1963 if (Flags.isSExt()) 1964 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 1965 DAG.getValueType(ObjectVT)); 1966 else if (Flags.isZExt()) 1967 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 1968 DAG.getValueType(ObjectVT)); 1969 1970 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 1971 } 1972 1973 ++GPR_idx; 1974 } else { 1975 needsLoad = true; 1976 ArgSize = PtrByteSize; 1977 } 1978 // All int arguments reserve stack space in the Darwin ABI. 1979 ArgOffset += 8; 1980 break; 1981 1982 case MVT::f32: 1983 case MVT::f64: 1984 // Every 4 bytes of argument space consumes one of the GPRs available for 1985 // argument passing. 1986 if (GPR_idx != Num_GPR_Regs) { 1987 ++GPR_idx; 1988 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 1989 ++GPR_idx; 1990 } 1991 if (FPR_idx != Num_FPR_Regs) { 1992 unsigned VReg; 1993 1994 if (ObjectVT == MVT::f32) 1995 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 1996 else 1997 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 1998 1999 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2000 ++FPR_idx; 2001 } else { 2002 needsLoad = true; 2003 } 2004 2005 // All FP arguments reserve stack space in the Darwin ABI. 2006 ArgOffset += isPPC64 ? 8 : ObjSize; 2007 break; 2008 case MVT::v4f32: 2009 case MVT::v4i32: 2010 case MVT::v8i16: 2011 case MVT::v16i8: 2012 // Note that vector arguments in registers don't reserve stack space, 2013 // except in varargs functions. 2014 if (VR_idx != Num_VR_Regs) { 2015 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2016 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2017 if (isVarArg) { 2018 while ((ArgOffset % 16) != 0) { 2019 ArgOffset += PtrByteSize; 2020 if (GPR_idx != Num_GPR_Regs) 2021 GPR_idx++; 2022 } 2023 ArgOffset += 16; 2024 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2025 } 2026 ++VR_idx; 2027 } else { 2028 if (!isVarArg && !isPPC64) { 2029 // Vectors go after all the nonvectors. 2030 CurArgOffset = VecArgOffset; 2031 VecArgOffset += 16; 2032 } else { 2033 // Vectors are aligned. 2034 ArgOffset = ((ArgOffset+15)/16)*16; 2035 CurArgOffset = ArgOffset; 2036 ArgOffset += 16; 2037 } 2038 needsLoad = true; 2039 } 2040 break; 2041 } 2042 2043 // We need to load the argument to a virtual register if we determined above 2044 // that we ran out of physical registers of the appropriate type. 2045 if (needsLoad) { 2046 int FI = MFI->CreateFixedObject(ObjSize, 2047 CurArgOffset + (ArgSize - ObjSize), 2048 isImmutable); 2049 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2050 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0); 2051 } 2052 2053 InVals.push_back(ArgVal); 2054 } 2055 2056 // Set the size that is at least reserved in caller of this function. Tail 2057 // call optimized function's reserved stack space needs to be aligned so that 2058 // taking the difference between two stack areas will result in an aligned 2059 // stack. 2060 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2061 // Add the Altivec parameters at the end, if needed. 2062 if (nAltivecParamsAtEnd) { 2063 MinReservedArea = ((MinReservedArea+15)/16)*16; 2064 MinReservedArea += 16*nAltivecParamsAtEnd; 2065 } 2066 MinReservedArea = 2067 std::max(MinReservedArea, 2068 PPCFrameInfo::getMinCallFrameSize(isPPC64, true)); 2069 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 2070 getStackAlignment(); 2071 unsigned AlignMask = TargetAlign-1; 2072 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2073 FI->setMinReservedArea(MinReservedArea); 2074 2075 // If the function takes variable number of arguments, make a frame index for 2076 // the start of the first vararg value... for expansion of llvm.va_start. 2077 if (isVarArg) { 2078 int Depth = ArgOffset; 2079 2080 VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2081 Depth); 2082 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 2083 2084 // If this function is vararg, store any remaining integer argument regs 2085 // to their spots on the stack so that they may be loaded by deferencing the 2086 // result of va_next. 2087 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2088 unsigned VReg; 2089 2090 if (isPPC64) 2091 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2092 else 2093 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2094 2095 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2096 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); 2097 MemOps.push_back(Store); 2098 // Increment the address by four for the next argument to store 2099 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2100 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2101 } 2102 } 2103 2104 if (!MemOps.empty()) 2105 Chain = DAG.getNode(ISD::TokenFactor, dl, 2106 MVT::Other, &MemOps[0], MemOps.size()); 2107 2108 return Chain; 2109} 2110 2111/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus 2112/// linkage area for the Darwin ABI. 2113static unsigned 2114CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2115 bool isPPC64, 2116 bool isVarArg, 2117 unsigned CC, 2118 const SmallVectorImpl<ISD::OutputArg> 2119 &Outs, 2120 unsigned &nAltivecParamsAtEnd) { 2121 // Count how many bytes are to be pushed on the stack, including the linkage 2122 // area, and parameter passing area. We start with 24/48 bytes, which is 2123 // prereserved space for [SP][CR][LR][3 x unused]. 2124 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, true); 2125 unsigned NumOps = Outs.size(); 2126 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2127 2128 // Add up all the space actually used. 2129 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2130 // they all go in registers, but we must reserve stack space for them for 2131 // possible use by the caller. In varargs or 64-bit calls, parameters are 2132 // assigned stack space in order, with padding so Altivec parameters are 2133 // 16-byte aligned. 2134 nAltivecParamsAtEnd = 0; 2135 for (unsigned i = 0; i != NumOps; ++i) { 2136 SDValue Arg = Outs[i].Val; 2137 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2138 EVT ArgVT = Arg.getValueType(); 2139 // Varargs Altivec parameters are padded to a 16 byte boundary. 2140 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2141 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2142 if (!isVarArg && !isPPC64) { 2143 // Non-varargs Altivec parameters go after all the non-Altivec 2144 // parameters; handle those later so we know how much padding we need. 2145 nAltivecParamsAtEnd++; 2146 continue; 2147 } 2148 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2149 NumBytes = ((NumBytes+15)/16)*16; 2150 } 2151 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2152 } 2153 2154 // Allow for Altivec parameters at the end, if needed. 2155 if (nAltivecParamsAtEnd) { 2156 NumBytes = ((NumBytes+15)/16)*16; 2157 NumBytes += 16*nAltivecParamsAtEnd; 2158 } 2159 2160 // The prolog code of the callee may store up to 8 GPR argument registers to 2161 // the stack, allowing va_start to index over them in memory if its varargs. 2162 // Because we cannot tell if this is needed on the caller side, we have to 2163 // conservatively assume that it is needed. As such, make sure we have at 2164 // least enough stack space for the caller to store the 8 GPRs. 2165 NumBytes = std::max(NumBytes, 2166 PPCFrameInfo::getMinCallFrameSize(isPPC64, true)); 2167 2168 // Tail call needs the stack to be aligned. 2169 if (CC==CallingConv::Fast && PerformTailCallOpt) { 2170 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 2171 getStackAlignment(); 2172 unsigned AlignMask = TargetAlign-1; 2173 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2174 } 2175 2176 return NumBytes; 2177} 2178 2179/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2180/// adjusted to accomodate the arguments for the tailcall. 2181static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall, 2182 unsigned ParamSize) { 2183 2184 if (!IsTailCall) return 0; 2185 2186 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2187 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2188 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2189 // Remember only if the new adjustement is bigger. 2190 if (SPDiff < FI->getTailCallSPDelta()) 2191 FI->setTailCallSPDelta(SPDiff); 2192 2193 return SPDiff; 2194} 2195 2196/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2197/// for tail call optimization. Targets which want to do tail call 2198/// optimization should implement this function. 2199bool 2200PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2201 unsigned CalleeCC, 2202 bool isVarArg, 2203 const SmallVectorImpl<ISD::InputArg> &Ins, 2204 SelectionDAG& DAG) const { 2205 // Variable argument functions are not supported. 2206 if (isVarArg) 2207 return false; 2208 2209 MachineFunction &MF = DAG.getMachineFunction(); 2210 unsigned CallerCC = MF.getFunction()->getCallingConv(); 2211 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2212 // Functions containing by val parameters are not supported. 2213 for (unsigned i = 0; i != Ins.size(); i++) { 2214 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2215 if (Flags.isByVal()) return false; 2216 } 2217 2218 // Non PIC/GOT tail calls are supported. 2219 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2220 return true; 2221 2222 // At the moment we can only do local tail calls (in same module, hidden 2223 // or protected) if we are generating PIC. 2224 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2225 return G->getGlobal()->hasHiddenVisibility() 2226 || G->getGlobal()->hasProtectedVisibility(); 2227 } 2228 2229 return false; 2230} 2231 2232/// isCallCompatibleAddress - Return the immediate to use if the specified 2233/// 32-bit value is representable in the immediate field of a BxA instruction. 2234static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2235 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2236 if (!C) return 0; 2237 2238 int Addr = C->getZExtValue(); 2239 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2240 (Addr << 6 >> 6) != Addr) 2241 return 0; // Top 6 bits have to be sext of immediate. 2242 2243 return DAG.getConstant((int)C->getZExtValue() >> 2, 2244 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2245} 2246 2247namespace { 2248 2249struct TailCallArgumentInfo { 2250 SDValue Arg; 2251 SDValue FrameIdxOp; 2252 int FrameIdx; 2253 2254 TailCallArgumentInfo() : FrameIdx(0) {} 2255}; 2256 2257} 2258 2259/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2260static void 2261StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2262 SDValue Chain, 2263 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, 2264 SmallVector<SDValue, 8> &MemOpChains, 2265 DebugLoc dl) { 2266 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2267 SDValue Arg = TailCallArgs[i].Arg; 2268 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2269 int FI = TailCallArgs[i].FrameIdx; 2270 // Store relative to framepointer. 2271 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2272 PseudoSourceValue::getFixedStack(FI), 2273 0)); 2274 } 2275} 2276 2277/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2278/// the appropriate stack slot for the tail call optimized function call. 2279static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2280 MachineFunction &MF, 2281 SDValue Chain, 2282 SDValue OldRetAddr, 2283 SDValue OldFP, 2284 int SPDiff, 2285 bool isPPC64, 2286 bool isDarwinABI, 2287 DebugLoc dl) { 2288 if (SPDiff) { 2289 // Calculate the new stack slot for the return address. 2290 int SlotSize = isPPC64 ? 8 : 4; 2291 int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64, 2292 isDarwinABI); 2293 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 2294 NewRetAddrLoc); 2295 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2296 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 2297 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 2298 PseudoSourceValue::getFixedStack(NewRetAddr), 0); 2299 2300 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 2301 // slot as the FP is never overwritten. 2302 if (isDarwinABI) { 2303 int NewFPLoc = 2304 SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI); 2305 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); 2306 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 2307 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 2308 PseudoSourceValue::getFixedStack(NewFPIdx), 0); 2309 } 2310 } 2311 return Chain; 2312} 2313 2314/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 2315/// the position of the argument. 2316static void 2317CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 2318 SDValue Arg, int SPDiff, unsigned ArgOffset, 2319 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 2320 int Offset = ArgOffset + SPDiff; 2321 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 2322 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); 2323 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2324 SDValue FIN = DAG.getFrameIndex(FI, VT); 2325 TailCallArgumentInfo Info; 2326 Info.Arg = Arg; 2327 Info.FrameIdxOp = FIN; 2328 Info.FrameIdx = FI; 2329 TailCallArguments.push_back(Info); 2330} 2331 2332/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 2333/// stack slot. Returns the chain as result and the loaded frame pointers in 2334/// LROpOut/FPOpout. Used when tail calling. 2335SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 2336 int SPDiff, 2337 SDValue Chain, 2338 SDValue &LROpOut, 2339 SDValue &FPOpOut, 2340 bool isDarwinABI, 2341 DebugLoc dl) { 2342 if (SPDiff) { 2343 // Load the LR and FP stack slot for later adjusting. 2344 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 2345 LROpOut = getReturnAddrFrameIndex(DAG); 2346 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0); 2347 Chain = SDValue(LROpOut.getNode(), 1); 2348 2349 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 2350 // slot as the FP is never overwritten. 2351 if (isDarwinABI) { 2352 FPOpOut = getFramePointerFrameIndex(DAG); 2353 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0); 2354 Chain = SDValue(FPOpOut.getNode(), 1); 2355 } 2356 } 2357 return Chain; 2358} 2359 2360/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 2361/// by "Src" to address "Dst" of size "Size". Alignment information is 2362/// specified by the specific parameter attribute. The copy will be passed as 2363/// a byval function parameter. 2364/// Sometimes what we are copying is the end of a larger object, the part that 2365/// does not fit in registers. 2366static SDValue 2367CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 2368 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 2369 DebugLoc dl) { 2370 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 2371 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 2372 false, NULL, 0, NULL, 0); 2373} 2374 2375/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 2376/// tail calls. 2377static void 2378LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 2379 SDValue Arg, SDValue PtrOff, int SPDiff, 2380 unsigned ArgOffset, bool isPPC64, bool isTailCall, 2381 bool isVector, SmallVector<SDValue, 8> &MemOpChains, 2382 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments, 2383 DebugLoc dl) { 2384 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2385 if (!isTailCall) { 2386 if (isVector) { 2387 SDValue StackPtr; 2388 if (isPPC64) 2389 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2390 else 2391 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2392 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 2393 DAG.getConstant(ArgOffset, PtrVT)); 2394 } 2395 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0)); 2396 // Calculate and remember argument location. 2397 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 2398 TailCallArguments); 2399} 2400 2401static 2402void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 2403 DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 2404 SDValue LROp, SDValue FPOp, bool isDarwinABI, 2405 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { 2406 MachineFunction &MF = DAG.getMachineFunction(); 2407 2408 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 2409 // might overwrite each other in case of tail call optimization. 2410 SmallVector<SDValue, 8> MemOpChains2; 2411 // Do not flag preceeding copytoreg stuff together with the following stuff. 2412 InFlag = SDValue(); 2413 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 2414 MemOpChains2, dl); 2415 if (!MemOpChains2.empty()) 2416 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2417 &MemOpChains2[0], MemOpChains2.size()); 2418 2419 // Store the return address to the appropriate stack slot. 2420 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 2421 isPPC64, isDarwinABI, dl); 2422 2423 // Emit callseq_end just before tailcall node. 2424 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2425 DAG.getIntPtrConstant(0, true), InFlag); 2426 InFlag = Chain.getValue(1); 2427} 2428 2429static 2430unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 2431 SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall, 2432 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 2433 SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, 2434 bool isSVR4ABI) { 2435 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2436 NodeTys.push_back(MVT::Other); // Returns a chain 2437 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2438 2439 unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin; 2440 2441 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2442 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2443 // node so that legalize doesn't hack it. 2444 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2445 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 2446 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 2447 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 2448 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 2449 // If this is an absolute destination address, use the munged value. 2450 Callee = SDValue(Dest, 0); 2451 else { 2452 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 2453 // to do the call, we can't use PPCISD::CALL. 2454 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 2455 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 2456 2 + (InFlag.getNode() != 0)); 2457 InFlag = Chain.getValue(1); 2458 2459 NodeTys.clear(); 2460 NodeTys.push_back(MVT::Other); 2461 NodeTys.push_back(MVT::Flag); 2462 Ops.push_back(Chain); 2463 CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin; 2464 Callee.setNode(0); 2465 // Add CTR register as callee so a bctr can be emitted later. 2466 if (isTailCall) 2467 Ops.push_back(DAG.getRegister(PPC::CTR, PtrVT)); 2468 } 2469 2470 // If this is a direct call, pass the chain and the callee. 2471 if (Callee.getNode()) { 2472 Ops.push_back(Chain); 2473 Ops.push_back(Callee); 2474 } 2475 // If this is a tail call add stack pointer delta. 2476 if (isTailCall) 2477 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 2478 2479 // Add argument registers to the end of the list so that they are known live 2480 // into the call. 2481 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2482 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2483 RegsToPass[i].second.getValueType())); 2484 2485 return CallOpc; 2486} 2487 2488SDValue 2489PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 2490 unsigned CallConv, bool isVarArg, 2491 const SmallVectorImpl<ISD::InputArg> &Ins, 2492 DebugLoc dl, SelectionDAG &DAG, 2493 SmallVectorImpl<SDValue> &InVals) { 2494 2495 SmallVector<CCValAssign, 16> RVLocs; 2496 CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(), 2497 RVLocs, *DAG.getContext()); 2498 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 2499 2500 // Copy all of the result registers out of their specified physreg. 2501 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2502 CCValAssign &VA = RVLocs[i]; 2503 EVT VT = VA.getValVT(); 2504 assert(VA.isRegLoc() && "Can only return in registers!"); 2505 Chain = DAG.getCopyFromReg(Chain, dl, 2506 VA.getLocReg(), VT, InFlag).getValue(1); 2507 InVals.push_back(Chain.getValue(0)); 2508 InFlag = Chain.getValue(2); 2509 } 2510 2511 return Chain; 2512} 2513 2514SDValue 2515PPCTargetLowering::FinishCall(unsigned CallConv, DebugLoc dl, bool isTailCall, 2516 bool isVarArg, 2517 SelectionDAG &DAG, 2518 SmallVector<std::pair<unsigned, SDValue>, 8> 2519 &RegsToPass, 2520 SDValue InFlag, SDValue Chain, 2521 SDValue &Callee, 2522 int SPDiff, unsigned NumBytes, 2523 const SmallVectorImpl<ISD::InputArg> &Ins, 2524 SmallVectorImpl<SDValue> &InVals) { 2525 std::vector<EVT> NodeTys; 2526 SmallVector<SDValue, 8> Ops; 2527 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 2528 isTailCall, RegsToPass, Ops, NodeTys, 2529 PPCSubTarget.isSVR4ABI()); 2530 2531 // When performing tail call optimization the callee pops its arguments off 2532 // the stack. Account for this here so these bytes can be pushed back on in 2533 // PPCRegisterInfo::eliminateCallFramePseudoInstr. 2534 int BytesCalleePops = 2535 (CallConv==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0; 2536 2537 if (InFlag.getNode()) 2538 Ops.push_back(InFlag); 2539 2540 // Emit tail call. 2541 if (isTailCall) { 2542 // If this is the first return lowered for this function, add the regs 2543 // to the liveout set for the function. 2544 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 2545 SmallVector<CCValAssign, 16> RVLocs; 2546 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 2547 *DAG.getContext()); 2548 CCInfo.AnalyzeCallResult(Ins, RetCC_PPC); 2549 for (unsigned i = 0; i != RVLocs.size(); ++i) 2550 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 2551 } 2552 2553 assert(((Callee.getOpcode() == ISD::Register && 2554 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 2555 Callee.getOpcode() == ISD::TargetExternalSymbol || 2556 Callee.getOpcode() == ISD::TargetGlobalAddress || 2557 isa<ConstantSDNode>(Callee)) && 2558 "Expecting an global address, external symbol, absolute value or register"); 2559 2560 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 2561 } 2562 2563 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 2564 InFlag = Chain.getValue(1); 2565 2566 // Add a NOP immediately after the branch instruction when using the 64-bit 2567 // SVR4 ABI. At link time, if caller and callee are in a different module and 2568 // thus have a different TOC, the call will be replaced with a call to a stub 2569 // function which saves the current TOC, loads the TOC of the callee and 2570 // branches to the callee. The NOP will be replaced with a load instruction 2571 // which restores the TOC of the caller from the TOC save slot of the current 2572 // stack frame. If caller and callee belong to the same module (and have the 2573 // same TOC), the NOP will remain unchanged. 2574 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 2575 // Insert NOP. 2576 InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Flag, InFlag); 2577 } 2578 2579 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2580 DAG.getIntPtrConstant(BytesCalleePops, true), 2581 InFlag); 2582 if (!Ins.empty()) 2583 InFlag = Chain.getValue(1); 2584 2585 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2586 Ins, dl, DAG, InVals); 2587} 2588 2589SDValue 2590PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 2591 unsigned CallConv, bool isVarArg, 2592 bool isTailCall, 2593 const SmallVectorImpl<ISD::OutputArg> &Outs, 2594 const SmallVectorImpl<ISD::InputArg> &Ins, 2595 DebugLoc dl, SelectionDAG &DAG, 2596 SmallVectorImpl<SDValue> &InVals) { 2597 if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) { 2598 return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg, 2599 isTailCall, Outs, Ins, 2600 dl, DAG, InVals); 2601 } else { 2602 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 2603 isTailCall, Outs, Ins, 2604 dl, DAG, InVals); 2605 } 2606} 2607 2608SDValue 2609PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, 2610 unsigned CallConv, bool isVarArg, 2611 bool isTailCall, 2612 const SmallVectorImpl<ISD::OutputArg> &Outs, 2613 const SmallVectorImpl<ISD::InputArg> &Ins, 2614 DebugLoc dl, SelectionDAG &DAG, 2615 SmallVectorImpl<SDValue> &InVals) { 2616 // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description 2617 // of the 32-bit SVR4 ABI stack frame layout. 2618 2619 assert((!isTailCall || 2620 (CallConv == CallingConv::Fast && PerformTailCallOpt)) && 2621 "IsEligibleForTailCallOptimization missed a case!"); 2622 2623 assert((CallConv == CallingConv::C || 2624 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 2625 2626 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2627 unsigned PtrByteSize = 4; 2628 2629 MachineFunction &MF = DAG.getMachineFunction(); 2630 2631 // Mark this function as potentially containing a function that contains a 2632 // tail call. As a consequence the frame pointer will be used for dynamicalloc 2633 // and restoring the callers stack pointer in this functions epilog. This is 2634 // done because by tail calling the called function might overwrite the value 2635 // in this function's (MF) stack pointer stack slot 0(SP). 2636 if (PerformTailCallOpt && CallConv==CallingConv::Fast) 2637 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 2638 2639 // Count how many bytes are to be pushed on the stack, including the linkage 2640 // area, parameter list area and the part of the local variable space which 2641 // contains copies of aggregates which are passed by value. 2642 2643 // Assign locations to all of the outgoing arguments. 2644 SmallVector<CCValAssign, 16> ArgLocs; 2645 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 2646 ArgLocs, *DAG.getContext()); 2647 2648 // Reserve space for the linkage area on the stack. 2649 CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); 2650 2651 if (isVarArg) { 2652 // Handle fixed and variable vector arguments differently. 2653 // Fixed vector arguments go into registers as long as registers are 2654 // available. Variable vector arguments always go into memory. 2655 unsigned NumArgs = Outs.size(); 2656 2657 for (unsigned i = 0; i != NumArgs; ++i) { 2658 EVT ArgVT = Outs[i].Val.getValueType(); 2659 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2660 bool Result; 2661 2662 if (Outs[i].IsFixed) { 2663 Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 2664 CCInfo); 2665 } else { 2666 Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 2667 ArgFlags, CCInfo); 2668 } 2669 2670 if (Result) { 2671#ifndef NDEBUG 2672 errs() << "Call operand #" << i << " has unhandled type " 2673 << ArgVT.getEVTString() << "\n"; 2674#endif 2675 llvm_unreachable(0); 2676 } 2677 } 2678 } else { 2679 // All arguments are treated the same. 2680 CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4); 2681 } 2682 2683 // Assign locations to all of the outgoing aggregate by value arguments. 2684 SmallVector<CCValAssign, 16> ByValArgLocs; 2685 CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs, 2686 *DAG.getContext()); 2687 2688 // Reserve stack space for the allocations in CCInfo. 2689 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2690 2691 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal); 2692 2693 // Size of the linkage area, parameter list area and the part of the local 2694 // space variable where copies of aggregates which are passed by value are 2695 // stored. 2696 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 2697 2698 // Calculate by how many bytes the stack has to be adjusted in case of tail 2699 // call optimization. 2700 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 2701 2702 // Adjust the stack pointer for the new arguments... 2703 // These operations are automatically eliminated by the prolog/epilog pass 2704 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2705 SDValue CallSeqStart = Chain; 2706 2707 // Load the return address and frame pointer so it can be moved somewhere else 2708 // later. 2709 SDValue LROp, FPOp; 2710 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 2711 dl); 2712 2713 // Set up a copy of the stack pointer for use loading and storing any 2714 // arguments that may not fit in the registers available for argument 2715 // passing. 2716 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2717 2718 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2719 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 2720 SmallVector<SDValue, 8> MemOpChains; 2721 2722 // Walk the register/memloc assignments, inserting copies/loads. 2723 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 2724 i != e; 2725 ++i) { 2726 CCValAssign &VA = ArgLocs[i]; 2727 SDValue Arg = Outs[i].Val; 2728 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2729 2730 if (Flags.isByVal()) { 2731 // Argument is an aggregate which is passed by value, thus we need to 2732 // create a copy of it in the local variable space of the current stack 2733 // frame (which is the stack frame of the caller) and pass the address of 2734 // this copy to the callee. 2735 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 2736 CCValAssign &ByValVA = ByValArgLocs[j++]; 2737 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 2738 2739 // Memory reserved in the local variable space of the callers stack frame. 2740 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 2741 2742 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2743 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2744 2745 // Create a copy of the argument in the local area of the current 2746 // stack frame. 2747 SDValue MemcpyCall = 2748 CreateCopyOfByValArgument(Arg, PtrOff, 2749 CallSeqStart.getNode()->getOperand(0), 2750 Flags, DAG, dl); 2751 2752 // This must go outside the CALLSEQ_START..END. 2753 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2754 CallSeqStart.getNode()->getOperand(1)); 2755 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 2756 NewCallSeqStart.getNode()); 2757 Chain = CallSeqStart = NewCallSeqStart; 2758 2759 // Pass the address of the aggregate copy on the stack either in a 2760 // physical register or in the parameter list area of the current stack 2761 // frame to the callee. 2762 Arg = PtrOff; 2763 } 2764 2765 if (VA.isRegLoc()) { 2766 // Put argument in a physical register. 2767 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2768 } else { 2769 // Put argument in the parameter list area of the current stack frame. 2770 assert(VA.isMemLoc()); 2771 unsigned LocMemOffset = VA.getLocMemOffset(); 2772 2773 if (!isTailCall) { 2774 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2775 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2776 2777 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 2778 PseudoSourceValue::getStack(), LocMemOffset)); 2779 } else { 2780 // Calculate and remember argument location. 2781 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 2782 TailCallArguments); 2783 } 2784 } 2785 } 2786 2787 if (!MemOpChains.empty()) 2788 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2789 &MemOpChains[0], MemOpChains.size()); 2790 2791 // Build a sequence of copy-to-reg nodes chained together with token chain 2792 // and flag operands which copy the outgoing args into the appropriate regs. 2793 SDValue InFlag; 2794 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2795 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2796 RegsToPass[i].second, InFlag); 2797 InFlag = Chain.getValue(1); 2798 } 2799 2800 // Set CR6 to true if this is a vararg call. 2801 if (isVarArg) { 2802 SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0); 2803 Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag); 2804 InFlag = Chain.getValue(1); 2805 } 2806 2807 if (isTailCall) { 2808 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 2809 false, TailCallArguments); 2810 } 2811 2812 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 2813 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 2814 Ins, InVals); 2815} 2816 2817SDValue 2818PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 2819 unsigned CallConv, bool isVarArg, 2820 bool isTailCall, 2821 const SmallVectorImpl<ISD::OutputArg> &Outs, 2822 const SmallVectorImpl<ISD::InputArg> &Ins, 2823 DebugLoc dl, SelectionDAG &DAG, 2824 SmallVectorImpl<SDValue> &InVals) { 2825 2826 unsigned NumOps = Outs.size(); 2827 2828 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2829 bool isPPC64 = PtrVT == MVT::i64; 2830 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2831 2832 MachineFunction &MF = DAG.getMachineFunction(); 2833 2834 // Mark this function as potentially containing a function that contains a 2835 // tail call. As a consequence the frame pointer will be used for dynamicalloc 2836 // and restoring the callers stack pointer in this functions epilog. This is 2837 // done because by tail calling the called function might overwrite the value 2838 // in this function's (MF) stack pointer stack slot 0(SP). 2839 if (PerformTailCallOpt && CallConv==CallingConv::Fast) 2840 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 2841 2842 unsigned nAltivecParamsAtEnd = 0; 2843 2844 // Count how many bytes are to be pushed on the stack, including the linkage 2845 // area, and parameter passing area. We start with 24/48 bytes, which is 2846 // prereserved space for [SP][CR][LR][3 x unused]. 2847 unsigned NumBytes = 2848 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 2849 Outs, 2850 nAltivecParamsAtEnd); 2851 2852 // Calculate by how many bytes the stack has to be adjusted in case of tail 2853 // call optimization. 2854 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 2855 2856 // To protect arguments on the stack from being clobbered in a tail call, 2857 // force all the loads to happen before doing any other lowering. 2858 if (isTailCall) 2859 Chain = DAG.getStackArgumentTokenFactor(Chain); 2860 2861 // Adjust the stack pointer for the new arguments... 2862 // These operations are automatically eliminated by the prolog/epilog pass 2863 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2864 SDValue CallSeqStart = Chain; 2865 2866 // Load the return address and frame pointer so it can be move somewhere else 2867 // later. 2868 SDValue LROp, FPOp; 2869 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 2870 dl); 2871 2872 // Set up a copy of the stack pointer for use loading and storing any 2873 // arguments that may not fit in the registers available for argument 2874 // passing. 2875 SDValue StackPtr; 2876 if (isPPC64) 2877 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2878 else 2879 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2880 2881 // Figure out which arguments are going to go in registers, and which in 2882 // memory. Also, if this is a vararg function, floating point operations 2883 // must be stored to our stack, and loaded into integer regs as well, if 2884 // any integer regs are available for argument passing. 2885 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true); 2886 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2887 2888 static const unsigned GPR_32[] = { // 32-bit registers. 2889 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2890 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2891 }; 2892 static const unsigned GPR_64[] = { // 64-bit registers. 2893 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2894 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2895 }; 2896 static const unsigned *FPR = GetFPR(); 2897 2898 static const unsigned VR[] = { 2899 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2900 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2901 }; 2902 const unsigned NumGPRs = array_lengthof(GPR_32); 2903 const unsigned NumFPRs = 13; 2904 const unsigned NumVRs = array_lengthof(VR); 2905 2906 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 2907 2908 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2909 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 2910 2911 SmallVector<SDValue, 8> MemOpChains; 2912 for (unsigned i = 0; i != NumOps; ++i) { 2913 bool inMem = false; 2914 SDValue Arg = Outs[i].Val; 2915 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2916 2917 // PtrOff will be used to store the current argument to the stack if a 2918 // register cannot be found for it. 2919 SDValue PtrOff; 2920 2921 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 2922 2923 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 2924 2925 // On PPC64, promote integers to 64-bit values. 2926 if (isPPC64 && Arg.getValueType() == MVT::i32) { 2927 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 2928 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 2929 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 2930 } 2931 2932 // FIXME memcpy is used way more than necessary. Correctness first. 2933 if (Flags.isByVal()) { 2934 unsigned Size = Flags.getByValSize(); 2935 if (Size==1 || Size==2) { 2936 // Very small objects are passed right-justified. 2937 // Everything else is passed left-justified. 2938 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 2939 if (GPR_idx != NumGPRs) { 2940 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 2941 NULL, 0, VT); 2942 MemOpChains.push_back(Load.getValue(1)); 2943 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2944 2945 ArgOffset += PtrByteSize; 2946 } else { 2947 SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); 2948 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 2949 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, 2950 CallSeqStart.getNode()->getOperand(0), 2951 Flags, DAG, dl); 2952 // This must go outside the CALLSEQ_START..END. 2953 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2954 CallSeqStart.getNode()->getOperand(1)); 2955 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 2956 NewCallSeqStart.getNode()); 2957 Chain = CallSeqStart = NewCallSeqStart; 2958 ArgOffset += PtrByteSize; 2959 } 2960 continue; 2961 } 2962 // Copy entire object into memory. There are cases where gcc-generated 2963 // code assumes it is there, even if it could be put entirely into 2964 // registers. (This is not what the doc says.) 2965 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 2966 CallSeqStart.getNode()->getOperand(0), 2967 Flags, DAG, dl); 2968 // This must go outside the CALLSEQ_START..END. 2969 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2970 CallSeqStart.getNode()->getOperand(1)); 2971 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode()); 2972 Chain = CallSeqStart = NewCallSeqStart; 2973 // And copy the pieces of it that fit into registers. 2974 for (unsigned j=0; j<Size; j+=PtrByteSize) { 2975 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 2976 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 2977 if (GPR_idx != NumGPRs) { 2978 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, NULL, 0); 2979 MemOpChains.push_back(Load.getValue(1)); 2980 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 2981 ArgOffset += PtrByteSize; 2982 } else { 2983 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 2984 break; 2985 } 2986 } 2987 continue; 2988 } 2989 2990 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 2991 default: llvm_unreachable("Unexpected ValueType for argument!"); 2992 case MVT::i32: 2993 case MVT::i64: 2994 if (GPR_idx != NumGPRs) { 2995 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 2996 } else { 2997 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 2998 isPPC64, isTailCall, false, MemOpChains, 2999 TailCallArguments, dl); 3000 inMem = true; 3001 } 3002 ArgOffset += PtrByteSize; 3003 break; 3004 case MVT::f32: 3005 case MVT::f64: 3006 if (FPR_idx != NumFPRs) { 3007 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3008 3009 if (isVarArg) { 3010 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0); 3011 MemOpChains.push_back(Store); 3012 3013 // Float varargs are always shadowed in available integer registers 3014 if (GPR_idx != NumGPRs) { 3015 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0); 3016 MemOpChains.push_back(Load.getValue(1)); 3017 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3018 } 3019 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 3020 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3021 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3022 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0); 3023 MemOpChains.push_back(Load.getValue(1)); 3024 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3025 } 3026 } else { 3027 // If we have any FPRs remaining, we may also have GPRs remaining. 3028 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 3029 // GPRs. 3030 if (GPR_idx != NumGPRs) 3031 ++GPR_idx; 3032 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 3033 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 3034 ++GPR_idx; 3035 } 3036 } else { 3037 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3038 isPPC64, isTailCall, false, MemOpChains, 3039 TailCallArguments, dl); 3040 inMem = true; 3041 } 3042 if (isPPC64) 3043 ArgOffset += 8; 3044 else 3045 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 3046 break; 3047 case MVT::v4f32: 3048 case MVT::v4i32: 3049 case MVT::v8i16: 3050 case MVT::v16i8: 3051 if (isVarArg) { 3052 // These go aligned on the stack, or in the corresponding R registers 3053 // when within range. The Darwin PPC ABI doc claims they also go in 3054 // V registers; in fact gcc does this only for arguments that are 3055 // prototyped, not for those that match the ... We do it for all 3056 // arguments, seems to work. 3057 while (ArgOffset % 16 !=0) { 3058 ArgOffset += PtrByteSize; 3059 if (GPR_idx != NumGPRs) 3060 GPR_idx++; 3061 } 3062 // We could elide this store in the case where the object fits 3063 // entirely in R registers. Maybe later. 3064 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3065 DAG.getConstant(ArgOffset, PtrVT)); 3066 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0); 3067 MemOpChains.push_back(Store); 3068 if (VR_idx != NumVRs) { 3069 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, NULL, 0); 3070 MemOpChains.push_back(Load.getValue(1)); 3071 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 3072 } 3073 ArgOffset += 16; 3074 for (unsigned i=0; i<16; i+=PtrByteSize) { 3075 if (GPR_idx == NumGPRs) 3076 break; 3077 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 3078 DAG.getConstant(i, PtrVT)); 3079 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, NULL, 0); 3080 MemOpChains.push_back(Load.getValue(1)); 3081 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3082 } 3083 break; 3084 } 3085 3086 // Non-varargs Altivec params generally go in registers, but have 3087 // stack space allocated at the end. 3088 if (VR_idx != NumVRs) { 3089 // Doesn't have GPR space allocated. 3090 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 3091 } else if (nAltivecParamsAtEnd==0) { 3092 // We are emitting Altivec params in order. 3093 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3094 isPPC64, isTailCall, true, MemOpChains, 3095 TailCallArguments, dl); 3096 ArgOffset += 16; 3097 } 3098 break; 3099 } 3100 } 3101 // If all Altivec parameters fit in registers, as they usually do, 3102 // they get stack space following the non-Altivec parameters. We 3103 // don't track this here because nobody below needs it. 3104 // If there are more Altivec parameters than fit in registers emit 3105 // the stores here. 3106 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 3107 unsigned j = 0; 3108 // Offset is aligned; skip 1st 12 params which go in V registers. 3109 ArgOffset = ((ArgOffset+15)/16)*16; 3110 ArgOffset += 12*16; 3111 for (unsigned i = 0; i != NumOps; ++i) { 3112 SDValue Arg = Outs[i].Val; 3113 EVT ArgType = Arg.getValueType(); 3114 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 3115 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 3116 if (++j > NumVRs) { 3117 SDValue PtrOff; 3118 // We are emitting Altivec params in order. 3119 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3120 isPPC64, isTailCall, true, MemOpChains, 3121 TailCallArguments, dl); 3122 ArgOffset += 16; 3123 } 3124 } 3125 } 3126 } 3127 3128 if (!MemOpChains.empty()) 3129 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3130 &MemOpChains[0], MemOpChains.size()); 3131 3132 // Build a sequence of copy-to-reg nodes chained together with token chain 3133 // and flag operands which copy the outgoing args into the appropriate regs. 3134 SDValue InFlag; 3135 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3136 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3137 RegsToPass[i].second, InFlag); 3138 InFlag = Chain.getValue(1); 3139 } 3140 3141 if (isTailCall) { 3142 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 3143 FPOp, true, TailCallArguments); 3144 } 3145 3146 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3147 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3148 Ins, InVals); 3149} 3150 3151SDValue 3152PPCTargetLowering::LowerReturn(SDValue Chain, 3153 unsigned CallConv, bool isVarArg, 3154 const SmallVectorImpl<ISD::OutputArg> &Outs, 3155 DebugLoc dl, SelectionDAG &DAG) { 3156 3157 SmallVector<CCValAssign, 16> RVLocs; 3158 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 3159 RVLocs, *DAG.getContext()); 3160 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 3161 3162 // If this is the first return lowered for this function, add the regs to the 3163 // liveout set for the function. 3164 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 3165 for (unsigned i = 0; i != RVLocs.size(); ++i) 3166 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 3167 } 3168 3169 SDValue Flag; 3170 3171 // Copy the result values into the output registers. 3172 for (unsigned i = 0; i != RVLocs.size(); ++i) { 3173 CCValAssign &VA = RVLocs[i]; 3174 assert(VA.isRegLoc() && "Can only return in registers!"); 3175 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3176 Outs[i].Val, Flag); 3177 Flag = Chain.getValue(1); 3178 } 3179 3180 if (Flag.getNode()) 3181 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 3182 else 3183 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain); 3184} 3185 3186SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 3187 const PPCSubtarget &Subtarget) { 3188 // When we pop the dynamic allocation we need to restore the SP link. 3189 DebugLoc dl = Op.getDebugLoc(); 3190 3191 // Get the corect type for pointers. 3192 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3193 3194 // Construct the stack pointer operand. 3195 bool IsPPC64 = Subtarget.isPPC64(); 3196 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; 3197 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 3198 3199 // Get the operands for the STACKRESTORE. 3200 SDValue Chain = Op.getOperand(0); 3201 SDValue SaveSP = Op.getOperand(1); 3202 3203 // Load the old link SP. 3204 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, NULL, 0); 3205 3206 // Restore the stack pointer. 3207 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 3208 3209 // Store the old link SP. 3210 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, NULL, 0); 3211} 3212 3213 3214 3215SDValue 3216PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 3217 MachineFunction &MF = DAG.getMachineFunction(); 3218 bool IsPPC64 = PPCSubTarget.isPPC64(); 3219 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 3220 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3221 3222 // Get current frame pointer save index. The users of this index will be 3223 // primarily DYNALLOC instructions. 3224 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 3225 int RASI = FI->getReturnAddrSaveIndex(); 3226 3227 // If the frame pointer save index hasn't been defined yet. 3228 if (!RASI) { 3229 // Find out what the fix offset of the frame pointer save area. 3230 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, isDarwinABI); 3231 // Allocate the frame index for frame pointer save area. 3232 RASI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, LROffset); 3233 // Save the result. 3234 FI->setReturnAddrSaveIndex(RASI); 3235 } 3236 return DAG.getFrameIndex(RASI, PtrVT); 3237} 3238 3239SDValue 3240PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 3241 MachineFunction &MF = DAG.getMachineFunction(); 3242 bool IsPPC64 = PPCSubTarget.isPPC64(); 3243 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 3244 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3245 3246 // Get current frame pointer save index. The users of this index will be 3247 // primarily DYNALLOC instructions. 3248 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 3249 int FPSI = FI->getFramePointerSaveIndex(); 3250 3251 // If the frame pointer save index hasn't been defined yet. 3252 if (!FPSI) { 3253 // Find out what the fix offset of the frame pointer save area. 3254 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, 3255 isDarwinABI); 3256 3257 // Allocate the frame index for frame pointer save area. 3258 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset); 3259 // Save the result. 3260 FI->setFramePointerSaveIndex(FPSI); 3261 } 3262 return DAG.getFrameIndex(FPSI, PtrVT); 3263} 3264 3265SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 3266 SelectionDAG &DAG, 3267 const PPCSubtarget &Subtarget) { 3268 // Get the inputs. 3269 SDValue Chain = Op.getOperand(0); 3270 SDValue Size = Op.getOperand(1); 3271 DebugLoc dl = Op.getDebugLoc(); 3272 3273 // Get the corect type for pointers. 3274 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3275 // Negate the size. 3276 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 3277 DAG.getConstant(0, PtrVT), Size); 3278 // Construct a node for the frame pointer save index. 3279 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 3280 // Build a DYNALLOC node. 3281 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 3282 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 3283 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 3284} 3285 3286/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 3287/// possible. 3288SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { 3289 // Not FP? Not a fsel. 3290 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 3291 !Op.getOperand(2).getValueType().isFloatingPoint()) 3292 return Op; 3293 3294 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3295 3296 // Cannot handle SETEQ/SETNE. 3297 if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op; 3298 3299 EVT ResVT = Op.getValueType(); 3300 EVT CmpVT = Op.getOperand(0).getValueType(); 3301 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3302 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 3303 DebugLoc dl = Op.getDebugLoc(); 3304 3305 // If the RHS of the comparison is a 0.0, we don't need to do the 3306 // subtraction at all. 3307 if (isFloatingPointZero(RHS)) 3308 switch (CC) { 3309 default: break; // SETUO etc aren't handled by fsel. 3310 case ISD::SETULT: 3311 case ISD::SETLT: 3312 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 3313 case ISD::SETOGE: 3314 case ISD::SETGE: 3315 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 3316 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 3317 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 3318 case ISD::SETUGT: 3319 case ISD::SETGT: 3320 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 3321 case ISD::SETOLE: 3322 case ISD::SETLE: 3323 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 3324 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 3325 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 3326 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 3327 } 3328 3329 SDValue Cmp; 3330 switch (CC) { 3331 default: break; // SETUO etc aren't handled by fsel. 3332 case ISD::SETULT: 3333 case ISD::SETLT: 3334 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 3335 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3336 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3337 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 3338 case ISD::SETOGE: 3339 case ISD::SETGE: 3340 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 3341 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3342 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3343 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 3344 case ISD::SETUGT: 3345 case ISD::SETGT: 3346 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 3347 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3348 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3349 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 3350 case ISD::SETOLE: 3351 case ISD::SETLE: 3352 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 3353 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3354 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3355 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 3356 } 3357 return Op; 3358} 3359 3360// FIXME: Split this code up when LegalizeDAGTypes lands. 3361SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 3362 DebugLoc dl) { 3363 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 3364 SDValue Src = Op.getOperand(0); 3365 if (Src.getValueType() == MVT::f32) 3366 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 3367 3368 SDValue Tmp; 3369 switch (Op.getValueType().getSimpleVT().SimpleTy) { 3370 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 3371 case MVT::i32: 3372 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 3373 PPCISD::FCTIDZ, 3374 dl, MVT::f64, Src); 3375 break; 3376 case MVT::i64: 3377 Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src); 3378 break; 3379 } 3380 3381 // Convert the FP value to an int value through memory. 3382 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); 3383 3384 // Emit a store to the stack slot. 3385 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, NULL, 0); 3386 3387 // Result is a load from the stack slot. If loading 4 bytes, make sure to 3388 // add in a bias. 3389 if (Op.getValueType() == MVT::i32) 3390 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 3391 DAG.getConstant(4, FIPtr.getValueType())); 3392 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, NULL, 0); 3393} 3394 3395SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3396 DebugLoc dl = Op.getDebugLoc(); 3397 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 3398 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 3399 return SDValue(); 3400 3401 if (Op.getOperand(0).getValueType() == MVT::i64) { 3402 SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl, 3403 MVT::f64, Op.getOperand(0)); 3404 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); 3405 if (Op.getValueType() == MVT::f32) 3406 FP = DAG.getNode(ISD::FP_ROUND, dl, 3407 MVT::f32, FP, DAG.getIntPtrConstant(0)); 3408 return FP; 3409 } 3410 3411 assert(Op.getOperand(0).getValueType() == MVT::i32 && 3412 "Unhandled SINT_TO_FP type in custom expander!"); 3413 // Since we only generate this in 64-bit mode, we can take advantage of 3414 // 64-bit registers. In particular, sign extend the input value into the 3415 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 3416 // then lfd it and fcfid it. 3417 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 3418 int FrameIdx = FrameInfo->CreateStackObject(8, 8); 3419 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3420 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 3421 3422 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32, 3423 Op.getOperand(0)); 3424 3425 // STD the extended value into the stack slot. 3426 MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx), 3427 MachineMemOperand::MOStore, 0, 8, 8); 3428 SDValue Store = DAG.getNode(PPCISD::STD_32, dl, MVT::Other, 3429 DAG.getEntryNode(), Ext64, FIdx, 3430 DAG.getMemOperand(MO)); 3431 // Load the value as a double. 3432 SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0); 3433 3434 // FCFID it and return it. 3435 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld); 3436 if (Op.getValueType() == MVT::f32) 3437 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 3438 return FP; 3439} 3440 3441SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) { 3442 DebugLoc dl = Op.getDebugLoc(); 3443 /* 3444 The rounding mode is in bits 30:31 of FPSR, and has the following 3445 settings: 3446 00 Round to nearest 3447 01 Round to 0 3448 10 Round to +inf 3449 11 Round to -inf 3450 3451 FLT_ROUNDS, on the other hand, expects the following: 3452 -1 Undefined 3453 0 Round to 0 3454 1 Round to nearest 3455 2 Round to +inf 3456 3 Round to -inf 3457 3458 To perform the conversion, we do: 3459 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 3460 */ 3461 3462 MachineFunction &MF = DAG.getMachineFunction(); 3463 EVT VT = Op.getValueType(); 3464 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3465 std::vector<EVT> NodeTys; 3466 SDValue MFFSreg, InFlag; 3467 3468 // Save FP Control Word to register 3469 NodeTys.push_back(MVT::f64); // return register 3470 NodeTys.push_back(MVT::Flag); // unused in this context 3471 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 3472 3473 // Save FP register to stack slot 3474 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); 3475 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 3476 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 3477 StackSlot, NULL, 0); 3478 3479 // Load FP Control Word from low 32 bits of stack slot. 3480 SDValue Four = DAG.getConstant(4, PtrVT); 3481 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 3482 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, NULL, 0); 3483 3484 // Transform as necessary 3485 SDValue CWD1 = 3486 DAG.getNode(ISD::AND, dl, MVT::i32, 3487 CWD, DAG.getConstant(3, MVT::i32)); 3488 SDValue CWD2 = 3489 DAG.getNode(ISD::SRL, dl, MVT::i32, 3490 DAG.getNode(ISD::AND, dl, MVT::i32, 3491 DAG.getNode(ISD::XOR, dl, MVT::i32, 3492 CWD, DAG.getConstant(3, MVT::i32)), 3493 DAG.getConstant(3, MVT::i32)), 3494 DAG.getConstant(1, MVT::i32)); 3495 3496 SDValue RetVal = 3497 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 3498 3499 return DAG.getNode((VT.getSizeInBits() < 16 ? 3500 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 3501} 3502 3503SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) { 3504 EVT VT = Op.getValueType(); 3505 unsigned BitWidth = VT.getSizeInBits(); 3506 DebugLoc dl = Op.getDebugLoc(); 3507 assert(Op.getNumOperands() == 3 && 3508 VT == Op.getOperand(1).getValueType() && 3509 "Unexpected SHL!"); 3510 3511 // Expand into a bunch of logical ops. Note that these ops 3512 // depend on the PPC behavior for oversized shift amounts. 3513 SDValue Lo = Op.getOperand(0); 3514 SDValue Hi = Op.getOperand(1); 3515 SDValue Amt = Op.getOperand(2); 3516 EVT AmtVT = Amt.getValueType(); 3517 3518 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3519 DAG.getConstant(BitWidth, AmtVT), Amt); 3520 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 3521 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 3522 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 3523 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3524 DAG.getConstant(-BitWidth, AmtVT)); 3525 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 3526 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 3527 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 3528 SDValue OutOps[] = { OutLo, OutHi }; 3529 return DAG.getMergeValues(OutOps, 2, dl); 3530} 3531 3532SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) { 3533 EVT VT = Op.getValueType(); 3534 DebugLoc dl = Op.getDebugLoc(); 3535 unsigned BitWidth = VT.getSizeInBits(); 3536 assert(Op.getNumOperands() == 3 && 3537 VT == Op.getOperand(1).getValueType() && 3538 "Unexpected SRL!"); 3539 3540 // Expand into a bunch of logical ops. Note that these ops 3541 // depend on the PPC behavior for oversized shift amounts. 3542 SDValue Lo = Op.getOperand(0); 3543 SDValue Hi = Op.getOperand(1); 3544 SDValue Amt = Op.getOperand(2); 3545 EVT AmtVT = Amt.getValueType(); 3546 3547 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3548 DAG.getConstant(BitWidth, AmtVT), Amt); 3549 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 3550 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 3551 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 3552 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3553 DAG.getConstant(-BitWidth, AmtVT)); 3554 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 3555 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 3556 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 3557 SDValue OutOps[] = { OutLo, OutHi }; 3558 return DAG.getMergeValues(OutOps, 2, dl); 3559} 3560 3561SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) { 3562 DebugLoc dl = Op.getDebugLoc(); 3563 EVT VT = Op.getValueType(); 3564 unsigned BitWidth = VT.getSizeInBits(); 3565 assert(Op.getNumOperands() == 3 && 3566 VT == Op.getOperand(1).getValueType() && 3567 "Unexpected SRA!"); 3568 3569 // Expand into a bunch of logical ops, followed by a select_cc. 3570 SDValue Lo = Op.getOperand(0); 3571 SDValue Hi = Op.getOperand(1); 3572 SDValue Amt = Op.getOperand(2); 3573 EVT AmtVT = Amt.getValueType(); 3574 3575 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3576 DAG.getConstant(BitWidth, AmtVT), Amt); 3577 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 3578 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 3579 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 3580 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3581 DAG.getConstant(-BitWidth, AmtVT)); 3582 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 3583 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 3584 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 3585 Tmp4, Tmp6, ISD::SETLE); 3586 SDValue OutOps[] = { OutLo, OutHi }; 3587 return DAG.getMergeValues(OutOps, 2, dl); 3588} 3589 3590//===----------------------------------------------------------------------===// 3591// Vector related lowering. 3592// 3593 3594/// BuildSplatI - Build a canonical splati of Val with an element size of 3595/// SplatSize. Cast the result to VT. 3596static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 3597 SelectionDAG &DAG, DebugLoc dl) { 3598 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 3599 3600 static const EVT VTys[] = { // canonical VT to use for each size. 3601 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 3602 }; 3603 3604 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 3605 3606 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 3607 if (Val == -1) 3608 SplatSize = 1; 3609 3610 EVT CanonicalVT = VTys[SplatSize-1]; 3611 3612 // Build a canonical splat for this value. 3613 SDValue Elt = DAG.getConstant(Val, MVT::i32); 3614 SmallVector<SDValue, 8> Ops; 3615 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 3616 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 3617 &Ops[0], Ops.size()); 3618 return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res); 3619} 3620 3621/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 3622/// specified intrinsic ID. 3623static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 3624 SelectionDAG &DAG, DebugLoc dl, 3625 EVT DestVT = MVT::Other) { 3626 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 3627 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 3628 DAG.getConstant(IID, MVT::i32), LHS, RHS); 3629} 3630 3631/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 3632/// specified intrinsic ID. 3633static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 3634 SDValue Op2, SelectionDAG &DAG, 3635 DebugLoc dl, EVT DestVT = MVT::Other) { 3636 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 3637 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 3638 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 3639} 3640 3641 3642/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 3643/// amount. The result has the specified value type. 3644static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 3645 EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3646 // Force LHS/RHS to be the right type. 3647 LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS); 3648 RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS); 3649 3650 int Ops[16]; 3651 for (unsigned i = 0; i != 16; ++i) 3652 Ops[i] = i + Amt; 3653 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 3654 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); 3655} 3656 3657// If this is a case we can't handle, return null and let the default 3658// expansion code take care of it. If we CAN select this case, and if it 3659// selects to a single instruction, return Op. Otherwise, if we can codegen 3660// this case more efficiently than a constant pool load, lower it to the 3661// sequence of ops that should be used. 3662SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { 3663 DebugLoc dl = Op.getDebugLoc(); 3664 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3665 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 3666 3667 // Check if this is a splat of a constant value. 3668 APInt APSplatBits, APSplatUndef; 3669 unsigned SplatBitSize; 3670 bool HasAnyUndefs; 3671 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 3672 HasAnyUndefs) || SplatBitSize > 32) 3673 return SDValue(); 3674 3675 unsigned SplatBits = APSplatBits.getZExtValue(); 3676 unsigned SplatUndef = APSplatUndef.getZExtValue(); 3677 unsigned SplatSize = SplatBitSize / 8; 3678 3679 // First, handle single instruction cases. 3680 3681 // All zeros? 3682 if (SplatBits == 0) { 3683 // Canonicalize all zero vectors to be v4i32. 3684 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 3685 SDValue Z = DAG.getConstant(0, MVT::i32); 3686 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 3687 Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z); 3688 } 3689 return Op; 3690 } 3691 3692 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 3693 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 3694 (32-SplatBitSize)); 3695 if (SextVal >= -16 && SextVal <= 15) 3696 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 3697 3698 3699 // Two instruction sequences. 3700 3701 // If this value is in the range [-32,30] and is even, use: 3702 // tmp = VSPLTI[bhw], result = add tmp, tmp 3703 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 3704 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl); 3705 Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res); 3706 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3707 } 3708 3709 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 3710 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 3711 // for fneg/fabs. 3712 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 3713 // Make -1 and vspltisw -1: 3714 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 3715 3716 // Make the VSLW intrinsic, computing 0x8000_0000. 3717 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 3718 OnesV, DAG, dl); 3719 3720 // xor by OnesV to invert it. 3721 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 3722 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3723 } 3724 3725 // Check to see if this is a wide variety of vsplti*, binop self cases. 3726 static const signed char SplatCsts[] = { 3727 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 3728 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 3729 }; 3730 3731 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 3732 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 3733 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 3734 int i = SplatCsts[idx]; 3735 3736 // Figure out what shift amount will be used by altivec if shifted by i in 3737 // this splat size. 3738 unsigned TypeShiftAmt = i & (SplatBitSize-1); 3739 3740 // vsplti + shl self. 3741 if (SextVal == (i << (int)TypeShiftAmt)) { 3742 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3743 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3744 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 3745 Intrinsic::ppc_altivec_vslw 3746 }; 3747 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3748 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3749 } 3750 3751 // vsplti + srl self. 3752 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3753 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3754 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3755 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 3756 Intrinsic::ppc_altivec_vsrw 3757 }; 3758 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3759 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3760 } 3761 3762 // vsplti + sra self. 3763 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3764 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3765 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3766 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 3767 Intrinsic::ppc_altivec_vsraw 3768 }; 3769 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3770 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3771 } 3772 3773 // vsplti + rol self. 3774 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 3775 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 3776 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3777 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3778 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 3779 Intrinsic::ppc_altivec_vrlw 3780 }; 3781 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3782 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3783 } 3784 3785 // t = vsplti c, result = vsldoi t, t, 1 3786 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 3787 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 3788 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 3789 } 3790 // t = vsplti c, result = vsldoi t, t, 2 3791 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 3792 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 3793 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 3794 } 3795 // t = vsplti c, result = vsldoi t, t, 3 3796 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 3797 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 3798 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 3799 } 3800 } 3801 3802 // Three instruction sequences. 3803 3804 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 3805 if (SextVal >= 0 && SextVal <= 31) { 3806 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl); 3807 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 3808 LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS); 3809 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); 3810 } 3811 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 3812 if (SextVal >= -31 && SextVal <= 0) { 3813 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl); 3814 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 3815 LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS); 3816 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); 3817 } 3818 3819 return SDValue(); 3820} 3821 3822/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3823/// the specified operations to build the shuffle. 3824static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3825 SDValue RHS, SelectionDAG &DAG, 3826 DebugLoc dl) { 3827 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3828 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3829 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3830 3831 enum { 3832 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3833 OP_VMRGHW, 3834 OP_VMRGLW, 3835 OP_VSPLTISW0, 3836 OP_VSPLTISW1, 3837 OP_VSPLTISW2, 3838 OP_VSPLTISW3, 3839 OP_VSLDOI4, 3840 OP_VSLDOI8, 3841 OP_VSLDOI12 3842 }; 3843 3844 if (OpNum == OP_COPY) { 3845 if (LHSID == (1*9+2)*9+3) return LHS; 3846 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 3847 return RHS; 3848 } 3849 3850 SDValue OpLHS, OpRHS; 3851 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 3852 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 3853 3854 int ShufIdxs[16]; 3855 switch (OpNum) { 3856 default: llvm_unreachable("Unknown i32 permute!"); 3857 case OP_VMRGHW: 3858 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 3859 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 3860 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 3861 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 3862 break; 3863 case OP_VMRGLW: 3864 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 3865 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 3866 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 3867 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 3868 break; 3869 case OP_VSPLTISW0: 3870 for (unsigned i = 0; i != 16; ++i) 3871 ShufIdxs[i] = (i&3)+0; 3872 break; 3873 case OP_VSPLTISW1: 3874 for (unsigned i = 0; i != 16; ++i) 3875 ShufIdxs[i] = (i&3)+4; 3876 break; 3877 case OP_VSPLTISW2: 3878 for (unsigned i = 0; i != 16; ++i) 3879 ShufIdxs[i] = (i&3)+8; 3880 break; 3881 case OP_VSPLTISW3: 3882 for (unsigned i = 0; i != 16; ++i) 3883 ShufIdxs[i] = (i&3)+12; 3884 break; 3885 case OP_VSLDOI4: 3886 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 3887 case OP_VSLDOI8: 3888 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 3889 case OP_VSLDOI12: 3890 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 3891 } 3892 EVT VT = OpLHS.getValueType(); 3893 OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS); 3894 OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS); 3895 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 3896 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); 3897} 3898 3899/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 3900/// is a shuffle we can handle in a single instruction, return it. Otherwise, 3901/// return the code it can be lowered into. Worst case, it can always be 3902/// lowered into a vperm. 3903SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 3904 SelectionDAG &DAG) { 3905 DebugLoc dl = Op.getDebugLoc(); 3906 SDValue V1 = Op.getOperand(0); 3907 SDValue V2 = Op.getOperand(1); 3908 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 3909 EVT VT = Op.getValueType(); 3910 3911 // Cases that are handled by instructions that take permute immediates 3912 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 3913 // selected by the instruction selector. 3914 if (V2.getOpcode() == ISD::UNDEF) { 3915 if (PPC::isSplatShuffleMask(SVOp, 1) || 3916 PPC::isSplatShuffleMask(SVOp, 2) || 3917 PPC::isSplatShuffleMask(SVOp, 4) || 3918 PPC::isVPKUWUMShuffleMask(SVOp, true) || 3919 PPC::isVPKUHUMShuffleMask(SVOp, true) || 3920 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 3921 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 3922 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 3923 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 3924 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 3925 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 3926 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 3927 return Op; 3928 } 3929 } 3930 3931 // Altivec has a variety of "shuffle immediates" that take two vector inputs 3932 // and produce a fixed permutation. If any of these match, do not lower to 3933 // VPERM. 3934 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 3935 PPC::isVPKUHUMShuffleMask(SVOp, false) || 3936 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 3937 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 3938 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 3939 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 3940 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 3941 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 3942 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 3943 return Op; 3944 3945 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 3946 // perfect shuffle table to emit an optimal matching sequence. 3947 SmallVector<int, 16> PermMask; 3948 SVOp->getMask(PermMask); 3949 3950 unsigned PFIndexes[4]; 3951 bool isFourElementShuffle = true; 3952 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 3953 unsigned EltNo = 8; // Start out undef. 3954 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 3955 if (PermMask[i*4+j] < 0) 3956 continue; // Undef, ignore it. 3957 3958 unsigned ByteSource = PermMask[i*4+j]; 3959 if ((ByteSource & 3) != j) { 3960 isFourElementShuffle = false; 3961 break; 3962 } 3963 3964 if (EltNo == 8) { 3965 EltNo = ByteSource/4; 3966 } else if (EltNo != ByteSource/4) { 3967 isFourElementShuffle = false; 3968 break; 3969 } 3970 } 3971 PFIndexes[i] = EltNo; 3972 } 3973 3974 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 3975 // perfect shuffle vector to determine if it is cost effective to do this as 3976 // discrete instructions, or whether we should use a vperm. 3977 if (isFourElementShuffle) { 3978 // Compute the index in the perfect shuffle table. 3979 unsigned PFTableIndex = 3980 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 3981 3982 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 3983 unsigned Cost = (PFEntry >> 30); 3984 3985 // Determining when to avoid vperm is tricky. Many things affect the cost 3986 // of vperm, particularly how many times the perm mask needs to be computed. 3987 // For example, if the perm mask can be hoisted out of a loop or is already 3988 // used (perhaps because there are multiple permutes with the same shuffle 3989 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 3990 // the loop requires an extra register. 3991 // 3992 // As a compromise, we only emit discrete instructions if the shuffle can be 3993 // generated in 3 or fewer operations. When we have loop information 3994 // available, if this block is within a loop, we should avoid using vperm 3995 // for 3-operation perms and use a constant pool load instead. 3996 if (Cost < 3) 3997 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 3998 } 3999 4000 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 4001 // vector that will get spilled to the constant pool. 4002 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 4003 4004 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 4005 // that it is in input element units, not in bytes. Convert now. 4006 EVT EltVT = V1.getValueType().getVectorElementType(); 4007 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 4008 4009 SmallVector<SDValue, 16> ResultMask; 4010 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4011 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 4012 4013 for (unsigned j = 0; j != BytesPerElement; ++j) 4014 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 4015 MVT::i32)); 4016 } 4017 4018 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 4019 &ResultMask[0], ResultMask.size()); 4020 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 4021} 4022 4023/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 4024/// altivec comparison. If it is, return true and fill in Opc/isDot with 4025/// information about the intrinsic. 4026static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 4027 bool &isDot) { 4028 unsigned IntrinsicID = 4029 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 4030 CompareOpc = -1; 4031 isDot = false; 4032 switch (IntrinsicID) { 4033 default: return false; 4034 // Comparison predicates. 4035 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 4036 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 4037 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 4038 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 4039 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 4040 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 4041 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 4042 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 4043 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 4044 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 4045 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 4046 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 4047 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 4048 4049 // Normal Comparisons. 4050 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 4051 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 4052 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 4053 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 4054 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 4055 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 4056 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 4057 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 4058 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 4059 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 4060 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 4061 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 4062 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 4063 } 4064 return true; 4065} 4066 4067/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 4068/// lower, do it, otherwise return null. 4069SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4070 SelectionDAG &DAG) { 4071 // If this is a lowered altivec predicate compare, CompareOpc is set to the 4072 // opcode number of the comparison. 4073 DebugLoc dl = Op.getDebugLoc(); 4074 int CompareOpc; 4075 bool isDot; 4076 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 4077 return SDValue(); // Don't custom lower most intrinsics. 4078 4079 // If this is a non-dot comparison, make the VCMP node and we are done. 4080 if (!isDot) { 4081 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 4082 Op.getOperand(1), Op.getOperand(2), 4083 DAG.getConstant(CompareOpc, MVT::i32)); 4084 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp); 4085 } 4086 4087 // Create the PPCISD altivec 'dot' comparison node. 4088 SDValue Ops[] = { 4089 Op.getOperand(2), // LHS 4090 Op.getOperand(3), // RHS 4091 DAG.getConstant(CompareOpc, MVT::i32) 4092 }; 4093 std::vector<EVT> VTs; 4094 VTs.push_back(Op.getOperand(2).getValueType()); 4095 VTs.push_back(MVT::Flag); 4096 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 4097 4098 // Now that we have the comparison, emit a copy from the CR to a GPR. 4099 // This is flagged to the above dot comparison. 4100 SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32, 4101 DAG.getRegister(PPC::CR6, MVT::i32), 4102 CompNode.getValue(1)); 4103 4104 // Unpack the result based on how the target uses it. 4105 unsigned BitNo; // Bit # of CR6. 4106 bool InvertBit; // Invert result? 4107 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 4108 default: // Can't happen, don't crash on invalid number though. 4109 case 0: // Return the value of the EQ bit of CR6. 4110 BitNo = 0; InvertBit = false; 4111 break; 4112 case 1: // Return the inverted value of the EQ bit of CR6. 4113 BitNo = 0; InvertBit = true; 4114 break; 4115 case 2: // Return the value of the LT bit of CR6. 4116 BitNo = 2; InvertBit = false; 4117 break; 4118 case 3: // Return the inverted value of the LT bit of CR6. 4119 BitNo = 2; InvertBit = true; 4120 break; 4121 } 4122 4123 // Shift the bit into the low position. 4124 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 4125 DAG.getConstant(8-(3-BitNo), MVT::i32)); 4126 // Isolate the bit. 4127 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 4128 DAG.getConstant(1, MVT::i32)); 4129 4130 // If we are supposed to, toggle the bit. 4131 if (InvertBit) 4132 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 4133 DAG.getConstant(1, MVT::i32)); 4134 return Flags; 4135} 4136 4137SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 4138 SelectionDAG &DAG) { 4139 DebugLoc dl = Op.getDebugLoc(); 4140 // Create a stack slot that is 16-byte aligned. 4141 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 4142 int FrameIdx = FrameInfo->CreateStackObject(16, 16); 4143 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4144 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4145 4146 // Store the input value into Value#0 of the stack slot. 4147 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 4148 Op.getOperand(0), FIdx, NULL, 0); 4149 // Load it out. 4150 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, NULL, 0); 4151} 4152 4153SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) { 4154 DebugLoc dl = Op.getDebugLoc(); 4155 if (Op.getValueType() == MVT::v4i32) { 4156 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4157 4158 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 4159 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 4160 4161 SDValue RHSSwap = // = vrlw RHS, 16 4162 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 4163 4164 // Shrinkify inputs to v8i16. 4165 LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS); 4166 RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS); 4167 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap); 4168 4169 // Low parts multiplied together, generating 32-bit results (we ignore the 4170 // top parts). 4171 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 4172 LHS, RHS, DAG, dl, MVT::v4i32); 4173 4174 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 4175 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 4176 // Shift the high parts up 16 bits. 4177 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 4178 Neg16, DAG, dl); 4179 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 4180 } else if (Op.getValueType() == MVT::v8i16) { 4181 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4182 4183 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 4184 4185 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 4186 LHS, RHS, Zero, DAG, dl); 4187 } else if (Op.getValueType() == MVT::v16i8) { 4188 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4189 4190 // Multiply the even 8-bit parts, producing 16-bit sums. 4191 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 4192 LHS, RHS, DAG, dl, MVT::v8i16); 4193 EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts); 4194 4195 // Multiply the odd 8-bit parts, producing 16-bit sums. 4196 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 4197 LHS, RHS, DAG, dl, MVT::v8i16); 4198 OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts); 4199 4200 // Merge the results together. 4201 int Ops[16]; 4202 for (unsigned i = 0; i != 8; ++i) { 4203 Ops[i*2 ] = 2*i+1; 4204 Ops[i*2+1] = 2*i+1+16; 4205 } 4206 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 4207 } else { 4208 llvm_unreachable("Unknown mul to lower!"); 4209 } 4210} 4211 4212/// LowerOperation - Provide custom lowering hooks for some operations. 4213/// 4214SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { 4215 switch (Op.getOpcode()) { 4216 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 4217 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4218 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4219 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4220 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4221 case ISD::SETCC: return LowerSETCC(Op, DAG); 4222 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 4223 case ISD::VASTART: 4224 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 4225 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 4226 4227 case ISD::VAARG: 4228 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset, 4229 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); 4230 4231 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 4232 case ISD::DYNAMIC_STACKALLOC: 4233 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 4234 4235 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4236 case ISD::FP_TO_UINT: 4237 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 4238 Op.getDebugLoc()); 4239 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4240 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4241 4242 // Lower 64-bit shifts. 4243 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 4244 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 4245 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 4246 4247 // Vector-related lowering. 4248 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4249 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4250 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4251 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4252 case ISD::MUL: return LowerMUL(Op, DAG); 4253 4254 // Frame & Return address. 4255 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4256 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4257 } 4258 return SDValue(); 4259} 4260 4261void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 4262 SmallVectorImpl<SDValue>&Results, 4263 SelectionDAG &DAG) { 4264 DebugLoc dl = N->getDebugLoc(); 4265 switch (N->getOpcode()) { 4266 default: 4267 assert(false && "Do not know how to custom type legalize this operation!"); 4268 return; 4269 case ISD::FP_ROUND_INREG: { 4270 assert(N->getValueType(0) == MVT::ppcf128); 4271 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 4272 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 4273 MVT::f64, N->getOperand(0), 4274 DAG.getIntPtrConstant(0)); 4275 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 4276 MVT::f64, N->getOperand(0), 4277 DAG.getIntPtrConstant(1)); 4278 4279 // This sequence changes FPSCR to do round-to-zero, adds the two halves 4280 // of the long double, and puts FPSCR back the way it was. We do not 4281 // actually model FPSCR. 4282 std::vector<EVT> NodeTys; 4283 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; 4284 4285 NodeTys.push_back(MVT::f64); // Return register 4286 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns 4287 Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4288 MFFSreg = Result.getValue(0); 4289 InFlag = Result.getValue(1); 4290 4291 NodeTys.clear(); 4292 NodeTys.push_back(MVT::Flag); // Returns a flag 4293 Ops[0] = DAG.getConstant(31, MVT::i32); 4294 Ops[1] = InFlag; 4295 Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2); 4296 InFlag = Result.getValue(0); 4297 4298 NodeTys.clear(); 4299 NodeTys.push_back(MVT::Flag); // Returns a flag 4300 Ops[0] = DAG.getConstant(30, MVT::i32); 4301 Ops[1] = InFlag; 4302 Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2); 4303 InFlag = Result.getValue(0); 4304 4305 NodeTys.clear(); 4306 NodeTys.push_back(MVT::f64); // result of add 4307 NodeTys.push_back(MVT::Flag); // Returns a flag 4308 Ops[0] = Lo; 4309 Ops[1] = Hi; 4310 Ops[2] = InFlag; 4311 Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3); 4312 FPreg = Result.getValue(0); 4313 InFlag = Result.getValue(1); 4314 4315 NodeTys.clear(); 4316 NodeTys.push_back(MVT::f64); 4317 Ops[0] = DAG.getConstant(1, MVT::i32); 4318 Ops[1] = MFFSreg; 4319 Ops[2] = FPreg; 4320 Ops[3] = InFlag; 4321 Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4); 4322 FPreg = Result.getValue(0); 4323 4324 // We know the low half is about to be thrown away, so just use something 4325 // convenient. 4326 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 4327 FPreg, FPreg)); 4328 return; 4329 } 4330 case ISD::FP_TO_SINT: 4331 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 4332 return; 4333 } 4334} 4335 4336 4337//===----------------------------------------------------------------------===// 4338// Other Lowering Code 4339//===----------------------------------------------------------------------===// 4340 4341MachineBasicBlock * 4342PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4343 bool is64bit, unsigned BinOpcode) const { 4344 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4345 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4346 4347 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4348 MachineFunction *F = BB->getParent(); 4349 MachineFunction::iterator It = BB; 4350 ++It; 4351 4352 unsigned dest = MI->getOperand(0).getReg(); 4353 unsigned ptrA = MI->getOperand(1).getReg(); 4354 unsigned ptrB = MI->getOperand(2).getReg(); 4355 unsigned incr = MI->getOperand(3).getReg(); 4356 DebugLoc dl = MI->getDebugLoc(); 4357 4358 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 4359 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4360 F->insert(It, loopMBB); 4361 F->insert(It, exitMBB); 4362 exitMBB->transferSuccessors(BB); 4363 4364 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4365 unsigned TmpReg = (!BinOpcode) ? incr : 4366 RegInfo.createVirtualRegister( 4367 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4368 (const TargetRegisterClass *) &PPC::GPRCRegClass); 4369 4370 // thisMBB: 4371 // ... 4372 // fallthrough --> loopMBB 4373 BB->addSuccessor(loopMBB); 4374 4375 // loopMBB: 4376 // l[wd]arx dest, ptr 4377 // add r0, dest, incr 4378 // st[wd]cx. r0, ptr 4379 // bne- loopMBB 4380 // fallthrough --> exitMBB 4381 BB = loopMBB; 4382 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 4383 .addReg(ptrA).addReg(ptrB); 4384 if (BinOpcode) 4385 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 4386 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4387 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 4388 BuildMI(BB, dl, TII->get(PPC::BCC)) 4389 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4390 BB->addSuccessor(loopMBB); 4391 BB->addSuccessor(exitMBB); 4392 4393 // exitMBB: 4394 // ... 4395 BB = exitMBB; 4396 return BB; 4397} 4398 4399MachineBasicBlock * 4400PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 4401 MachineBasicBlock *BB, 4402 bool is8bit, // operation 4403 unsigned BinOpcode) const { 4404 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4405 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4406 // In 64 bit mode we have to use 64 bits for addresses, even though the 4407 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 4408 // registers without caring whether they're 32 or 64, but here we're 4409 // doing actual arithmetic on the addresses. 4410 bool is64bit = PPCSubTarget.isPPC64(); 4411 4412 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4413 MachineFunction *F = BB->getParent(); 4414 MachineFunction::iterator It = BB; 4415 ++It; 4416 4417 unsigned dest = MI->getOperand(0).getReg(); 4418 unsigned ptrA = MI->getOperand(1).getReg(); 4419 unsigned ptrB = MI->getOperand(2).getReg(); 4420 unsigned incr = MI->getOperand(3).getReg(); 4421 DebugLoc dl = MI->getDebugLoc(); 4422 4423 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 4424 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4425 F->insert(It, loopMBB); 4426 F->insert(It, exitMBB); 4427 exitMBB->transferSuccessors(BB); 4428 4429 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4430 const TargetRegisterClass *RC = 4431 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4432 (const TargetRegisterClass *) &PPC::GPRCRegClass; 4433 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 4434 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 4435 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 4436 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 4437 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 4438 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 4439 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 4440 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 4441 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 4442 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 4443 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 4444 unsigned Ptr1Reg; 4445 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 4446 4447 // thisMBB: 4448 // ... 4449 // fallthrough --> loopMBB 4450 BB->addSuccessor(loopMBB); 4451 4452 // The 4-byte load must be aligned, while a char or short may be 4453 // anywhere in the word. Hence all this nasty bookkeeping code. 4454 // add ptr1, ptrA, ptrB [copy if ptrA==0] 4455 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 4456 // xori shift, shift1, 24 [16] 4457 // rlwinm ptr, ptr1, 0, 0, 29 4458 // slw incr2, incr, shift 4459 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 4460 // slw mask, mask2, shift 4461 // loopMBB: 4462 // lwarx tmpDest, ptr 4463 // add tmp, tmpDest, incr2 4464 // andc tmp2, tmpDest, mask 4465 // and tmp3, tmp, mask 4466 // or tmp4, tmp3, tmp2 4467 // stwcx. tmp4, ptr 4468 // bne- loopMBB 4469 // fallthrough --> exitMBB 4470 // srw dest, tmpDest, shift 4471 4472 if (ptrA!=PPC::R0) { 4473 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4474 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4475 .addReg(ptrA).addReg(ptrB); 4476 } else { 4477 Ptr1Reg = ptrB; 4478 } 4479 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4480 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4481 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4482 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4483 if (is64bit) 4484 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 4485 .addReg(Ptr1Reg).addImm(0).addImm(61); 4486 else 4487 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 4488 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4489 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 4490 .addReg(incr).addReg(ShiftReg); 4491 if (is8bit) 4492 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 4493 else { 4494 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 4495 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 4496 } 4497 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 4498 .addReg(Mask2Reg).addReg(ShiftReg); 4499 4500 BB = loopMBB; 4501 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 4502 .addReg(PPC::R0).addReg(PtrReg); 4503 if (BinOpcode) 4504 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 4505 .addReg(Incr2Reg).addReg(TmpDestReg); 4506 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 4507 .addReg(TmpDestReg).addReg(MaskReg); 4508 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 4509 .addReg(TmpReg).addReg(MaskReg); 4510 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 4511 .addReg(Tmp3Reg).addReg(Tmp2Reg); 4512 BuildMI(BB, dl, TII->get(PPC::STWCX)) 4513 .addReg(Tmp4Reg).addReg(PPC::R0).addReg(PtrReg); 4514 BuildMI(BB, dl, TII->get(PPC::BCC)) 4515 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4516 BB->addSuccessor(loopMBB); 4517 BB->addSuccessor(exitMBB); 4518 4519 // exitMBB: 4520 // ... 4521 BB = exitMBB; 4522 BuildMI(BB, dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg).addReg(ShiftReg); 4523 return BB; 4524} 4525 4526MachineBasicBlock * 4527PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4528 MachineBasicBlock *BB) const { 4529 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4530 4531 // To "insert" these instructions we actually have to insert their 4532 // control-flow patterns. 4533 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4534 MachineFunction::iterator It = BB; 4535 ++It; 4536 4537 MachineFunction *F = BB->getParent(); 4538 4539 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 4540 MI->getOpcode() == PPC::SELECT_CC_I8 || 4541 MI->getOpcode() == PPC::SELECT_CC_F4 || 4542 MI->getOpcode() == PPC::SELECT_CC_F8 || 4543 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 4544 4545 // The incoming instruction knows the destination vreg to set, the 4546 // condition code register to branch on, the true/false values to 4547 // select between, and a branch opcode to use. 4548 4549 // thisMBB: 4550 // ... 4551 // TrueVal = ... 4552 // cmpTY ccX, r1, r2 4553 // bCC copy1MBB 4554 // fallthrough --> copy0MBB 4555 MachineBasicBlock *thisMBB = BB; 4556 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4557 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4558 unsigned SelectPred = MI->getOperand(4).getImm(); 4559 DebugLoc dl = MI->getDebugLoc(); 4560 BuildMI(BB, dl, TII->get(PPC::BCC)) 4561 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 4562 F->insert(It, copy0MBB); 4563 F->insert(It, sinkMBB); 4564 // Update machine-CFG edges by transferring all successors of the current 4565 // block to the new block which will contain the Phi node for the select. 4566 sinkMBB->transferSuccessors(BB); 4567 // Next, add the true and fallthrough blocks as its successors. 4568 BB->addSuccessor(copy0MBB); 4569 BB->addSuccessor(sinkMBB); 4570 4571 // copy0MBB: 4572 // %FalseValue = ... 4573 // # fallthrough to sinkMBB 4574 BB = copy0MBB; 4575 4576 // Update machine-CFG edges 4577 BB->addSuccessor(sinkMBB); 4578 4579 // sinkMBB: 4580 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4581 // ... 4582 BB = sinkMBB; 4583 BuildMI(BB, dl, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 4584 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 4585 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4586 } 4587 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 4588 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 4589 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 4590 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 4591 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 4592 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 4593 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 4594 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 4595 4596 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 4597 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 4598 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 4599 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 4600 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 4601 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 4602 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 4603 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 4604 4605 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 4606 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 4607 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 4608 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 4609 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 4610 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 4611 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 4612 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 4613 4614 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 4615 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 4616 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 4617 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 4618 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 4619 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 4620 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 4621 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 4622 4623 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 4624 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 4625 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 4626 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 4627 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 4628 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 4629 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 4630 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 4631 4632 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 4633 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 4634 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 4635 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 4636 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 4637 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 4638 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 4639 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 4640 4641 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 4642 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 4643 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 4644 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 4645 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 4646 BB = EmitAtomicBinary(MI, BB, false, 0); 4647 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 4648 BB = EmitAtomicBinary(MI, BB, true, 0); 4649 4650 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 4651 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 4652 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 4653 4654 unsigned dest = MI->getOperand(0).getReg(); 4655 unsigned ptrA = MI->getOperand(1).getReg(); 4656 unsigned ptrB = MI->getOperand(2).getReg(); 4657 unsigned oldval = MI->getOperand(3).getReg(); 4658 unsigned newval = MI->getOperand(4).getReg(); 4659 DebugLoc dl = MI->getDebugLoc(); 4660 4661 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4662 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4663 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4664 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4665 F->insert(It, loop1MBB); 4666 F->insert(It, loop2MBB); 4667 F->insert(It, midMBB); 4668 F->insert(It, exitMBB); 4669 exitMBB->transferSuccessors(BB); 4670 4671 // thisMBB: 4672 // ... 4673 // fallthrough --> loopMBB 4674 BB->addSuccessor(loop1MBB); 4675 4676 // loop1MBB: 4677 // l[wd]arx dest, ptr 4678 // cmp[wd] dest, oldval 4679 // bne- midMBB 4680 // loop2MBB: 4681 // st[wd]cx. newval, ptr 4682 // bne- loopMBB 4683 // b exitBB 4684 // midMBB: 4685 // st[wd]cx. dest, ptr 4686 // exitBB: 4687 BB = loop1MBB; 4688 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 4689 .addReg(ptrA).addReg(ptrB); 4690 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 4691 .addReg(oldval).addReg(dest); 4692 BuildMI(BB, dl, TII->get(PPC::BCC)) 4693 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 4694 BB->addSuccessor(loop2MBB); 4695 BB->addSuccessor(midMBB); 4696 4697 BB = loop2MBB; 4698 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4699 .addReg(newval).addReg(ptrA).addReg(ptrB); 4700 BuildMI(BB, dl, TII->get(PPC::BCC)) 4701 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 4702 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 4703 BB->addSuccessor(loop1MBB); 4704 BB->addSuccessor(exitMBB); 4705 4706 BB = midMBB; 4707 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4708 .addReg(dest).addReg(ptrA).addReg(ptrB); 4709 BB->addSuccessor(exitMBB); 4710 4711 // exitMBB: 4712 // ... 4713 BB = exitMBB; 4714 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 4715 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 4716 // We must use 64-bit registers for addresses when targeting 64-bit, 4717 // since we're actually doing arithmetic on them. Other registers 4718 // can be 32-bit. 4719 bool is64bit = PPCSubTarget.isPPC64(); 4720 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 4721 4722 unsigned dest = MI->getOperand(0).getReg(); 4723 unsigned ptrA = MI->getOperand(1).getReg(); 4724 unsigned ptrB = MI->getOperand(2).getReg(); 4725 unsigned oldval = MI->getOperand(3).getReg(); 4726 unsigned newval = MI->getOperand(4).getReg(); 4727 DebugLoc dl = MI->getDebugLoc(); 4728 4729 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4730 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4731 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4732 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4733 F->insert(It, loop1MBB); 4734 F->insert(It, loop2MBB); 4735 F->insert(It, midMBB); 4736 F->insert(It, exitMBB); 4737 exitMBB->transferSuccessors(BB); 4738 4739 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4740 const TargetRegisterClass *RC = 4741 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4742 (const TargetRegisterClass *) &PPC::GPRCRegClass; 4743 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 4744 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 4745 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 4746 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 4747 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 4748 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 4749 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 4750 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 4751 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 4752 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 4753 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 4754 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 4755 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 4756 unsigned Ptr1Reg; 4757 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 4758 // thisMBB: 4759 // ... 4760 // fallthrough --> loopMBB 4761 BB->addSuccessor(loop1MBB); 4762 4763 // The 4-byte load must be aligned, while a char or short may be 4764 // anywhere in the word. Hence all this nasty bookkeeping code. 4765 // add ptr1, ptrA, ptrB [copy if ptrA==0] 4766 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 4767 // xori shift, shift1, 24 [16] 4768 // rlwinm ptr, ptr1, 0, 0, 29 4769 // slw newval2, newval, shift 4770 // slw oldval2, oldval,shift 4771 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 4772 // slw mask, mask2, shift 4773 // and newval3, newval2, mask 4774 // and oldval3, oldval2, mask 4775 // loop1MBB: 4776 // lwarx tmpDest, ptr 4777 // and tmp, tmpDest, mask 4778 // cmpw tmp, oldval3 4779 // bne- midMBB 4780 // loop2MBB: 4781 // andc tmp2, tmpDest, mask 4782 // or tmp4, tmp2, newval3 4783 // stwcx. tmp4, ptr 4784 // bne- loop1MBB 4785 // b exitBB 4786 // midMBB: 4787 // stwcx. tmpDest, ptr 4788 // exitBB: 4789 // srw dest, tmpDest, shift 4790 if (ptrA!=PPC::R0) { 4791 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4792 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4793 .addReg(ptrA).addReg(ptrB); 4794 } else { 4795 Ptr1Reg = ptrB; 4796 } 4797 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4798 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4799 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4800 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4801 if (is64bit) 4802 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 4803 .addReg(Ptr1Reg).addImm(0).addImm(61); 4804 else 4805 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 4806 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4807 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 4808 .addReg(newval).addReg(ShiftReg); 4809 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 4810 .addReg(oldval).addReg(ShiftReg); 4811 if (is8bit) 4812 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 4813 else { 4814 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 4815 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 4816 .addReg(Mask3Reg).addImm(65535); 4817 } 4818 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 4819 .addReg(Mask2Reg).addReg(ShiftReg); 4820 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 4821 .addReg(NewVal2Reg).addReg(MaskReg); 4822 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 4823 .addReg(OldVal2Reg).addReg(MaskReg); 4824 4825 BB = loop1MBB; 4826 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 4827 .addReg(PPC::R0).addReg(PtrReg); 4828 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 4829 .addReg(TmpDestReg).addReg(MaskReg); 4830 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 4831 .addReg(TmpReg).addReg(OldVal3Reg); 4832 BuildMI(BB, dl, TII->get(PPC::BCC)) 4833 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 4834 BB->addSuccessor(loop2MBB); 4835 BB->addSuccessor(midMBB); 4836 4837 BB = loop2MBB; 4838 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 4839 .addReg(TmpDestReg).addReg(MaskReg); 4840 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 4841 .addReg(Tmp2Reg).addReg(NewVal3Reg); 4842 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 4843 .addReg(PPC::R0).addReg(PtrReg); 4844 BuildMI(BB, dl, TII->get(PPC::BCC)) 4845 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 4846 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 4847 BB->addSuccessor(loop1MBB); 4848 BB->addSuccessor(exitMBB); 4849 4850 BB = midMBB; 4851 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 4852 .addReg(PPC::R0).addReg(PtrReg); 4853 BB->addSuccessor(exitMBB); 4854 4855 // exitMBB: 4856 // ... 4857 BB = exitMBB; 4858 BuildMI(BB, dl, TII->get(PPC::SRW),dest).addReg(TmpReg).addReg(ShiftReg); 4859 } else { 4860 llvm_unreachable("Unexpected instr type to insert"); 4861 } 4862 4863 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 4864 return BB; 4865} 4866 4867//===----------------------------------------------------------------------===// 4868// Target Optimization Hooks 4869//===----------------------------------------------------------------------===// 4870 4871SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 4872 DAGCombinerInfo &DCI) const { 4873 TargetMachine &TM = getTargetMachine(); 4874 SelectionDAG &DAG = DCI.DAG; 4875 DebugLoc dl = N->getDebugLoc(); 4876 switch (N->getOpcode()) { 4877 default: break; 4878 case PPCISD::SHL: 4879 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 4880 if (C->getZExtValue() == 0) // 0 << V -> 0. 4881 return N->getOperand(0); 4882 } 4883 break; 4884 case PPCISD::SRL: 4885 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 4886 if (C->getZExtValue() == 0) // 0 >>u V -> 0. 4887 return N->getOperand(0); 4888 } 4889 break; 4890 case PPCISD::SRA: 4891 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 4892 if (C->getZExtValue() == 0 || // 0 >>s V -> 0. 4893 C->isAllOnesValue()) // -1 >>s V -> -1. 4894 return N->getOperand(0); 4895 } 4896 break; 4897 4898 case ISD::SINT_TO_FP: 4899 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 4900 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 4901 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 4902 // We allow the src/dst to be either f32/f64, but the intermediate 4903 // type must be i64. 4904 if (N->getOperand(0).getValueType() == MVT::i64 && 4905 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 4906 SDValue Val = N->getOperand(0).getOperand(0); 4907 if (Val.getValueType() == MVT::f32) { 4908 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 4909 DCI.AddToWorklist(Val.getNode()); 4910 } 4911 4912 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 4913 DCI.AddToWorklist(Val.getNode()); 4914 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 4915 DCI.AddToWorklist(Val.getNode()); 4916 if (N->getValueType(0) == MVT::f32) { 4917 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 4918 DAG.getIntPtrConstant(0)); 4919 DCI.AddToWorklist(Val.getNode()); 4920 } 4921 return Val; 4922 } else if (N->getOperand(0).getValueType() == MVT::i32) { 4923 // If the intermediate type is i32, we can avoid the load/store here 4924 // too. 4925 } 4926 } 4927 } 4928 break; 4929 case ISD::STORE: 4930 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 4931 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 4932 !cast<StoreSDNode>(N)->isTruncatingStore() && 4933 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 4934 N->getOperand(1).getValueType() == MVT::i32 && 4935 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 4936 SDValue Val = N->getOperand(1).getOperand(0); 4937 if (Val.getValueType() == MVT::f32) { 4938 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 4939 DCI.AddToWorklist(Val.getNode()); 4940 } 4941 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 4942 DCI.AddToWorklist(Val.getNode()); 4943 4944 Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val, 4945 N->getOperand(2), N->getOperand(3)); 4946 DCI.AddToWorklist(Val.getNode()); 4947 return Val; 4948 } 4949 4950 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 4951 if (N->getOperand(1).getOpcode() == ISD::BSWAP && 4952 N->getOperand(1).getNode()->hasOneUse() && 4953 (N->getOperand(1).getValueType() == MVT::i32 || 4954 N->getOperand(1).getValueType() == MVT::i16)) { 4955 SDValue BSwapOp = N->getOperand(1).getOperand(0); 4956 // Do an any-extend to 32-bits if this is a half-word input. 4957 if (BSwapOp.getValueType() == MVT::i16) 4958 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 4959 4960 return DAG.getNode(PPCISD::STBRX, dl, MVT::Other, N->getOperand(0), 4961 BSwapOp, N->getOperand(2), N->getOperand(3), 4962 DAG.getValueType(N->getOperand(1).getValueType())); 4963 } 4964 break; 4965 case ISD::BSWAP: 4966 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 4967 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 4968 N->getOperand(0).hasOneUse() && 4969 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 4970 SDValue Load = N->getOperand(0); 4971 LoadSDNode *LD = cast<LoadSDNode>(Load); 4972 // Create the byte-swapping load. 4973 std::vector<EVT> VTs; 4974 VTs.push_back(MVT::i32); 4975 VTs.push_back(MVT::Other); 4976 SDValue MO = DAG.getMemOperand(LD->getMemOperand()); 4977 SDValue Ops[] = { 4978 LD->getChain(), // Chain 4979 LD->getBasePtr(), // Ptr 4980 MO, // MemOperand 4981 DAG.getValueType(N->getValueType(0)) // VT 4982 }; 4983 SDValue BSLoad = DAG.getNode(PPCISD::LBRX, dl, VTs, Ops, 4); 4984 4985 // If this is an i16 load, insert the truncate. 4986 SDValue ResVal = BSLoad; 4987 if (N->getValueType(0) == MVT::i16) 4988 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 4989 4990 // First, combine the bswap away. This makes the value produced by the 4991 // load dead. 4992 DCI.CombineTo(N, ResVal); 4993 4994 // Next, combine the load away, we give it a bogus result value but a real 4995 // chain result. The result value is dead because the bswap is dead. 4996 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 4997 4998 // Return N so it doesn't get rechecked! 4999 return SDValue(N, 0); 5000 } 5001 5002 break; 5003 case PPCISD::VCMP: { 5004 // If a VCMPo node already exists with exactly the same operands as this 5005 // node, use its result instead of this node (VCMPo computes both a CR6 and 5006 // a normal output). 5007 // 5008 if (!N->getOperand(0).hasOneUse() && 5009 !N->getOperand(1).hasOneUse() && 5010 !N->getOperand(2).hasOneUse()) { 5011 5012 // Scan all of the users of the LHS, looking for VCMPo's that match. 5013 SDNode *VCMPoNode = 0; 5014 5015 SDNode *LHSN = N->getOperand(0).getNode(); 5016 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 5017 UI != E; ++UI) 5018 if (UI->getOpcode() == PPCISD::VCMPo && 5019 UI->getOperand(1) == N->getOperand(1) && 5020 UI->getOperand(2) == N->getOperand(2) && 5021 UI->getOperand(0) == N->getOperand(0)) { 5022 VCMPoNode = *UI; 5023 break; 5024 } 5025 5026 // If there is no VCMPo node, or if the flag value has a single use, don't 5027 // transform this. 5028 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 5029 break; 5030 5031 // Look at the (necessarily single) use of the flag value. If it has a 5032 // chain, this transformation is more complex. Note that multiple things 5033 // could use the value result, which we should ignore. 5034 SDNode *FlagUser = 0; 5035 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 5036 FlagUser == 0; ++UI) { 5037 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 5038 SDNode *User = *UI; 5039 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 5040 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 5041 FlagUser = User; 5042 break; 5043 } 5044 } 5045 } 5046 5047 // If the user is a MFCR instruction, we know this is safe. Otherwise we 5048 // give up for right now. 5049 if (FlagUser->getOpcode() == PPCISD::MFCR) 5050 return SDValue(VCMPoNode, 0); 5051 } 5052 break; 5053 } 5054 case ISD::BR_CC: { 5055 // If this is a branch on an altivec predicate comparison, lower this so 5056 // that we don't have to do a MFCR: instead, branch directly on CR6. This 5057 // lowering is done pre-legalize, because the legalizer lowers the predicate 5058 // compare down to code that is difficult to reassemble. 5059 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 5060 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 5061 int CompareOpc; 5062 bool isDot; 5063 5064 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 5065 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 5066 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 5067 assert(isDot && "Can't compare against a vector result!"); 5068 5069 // If this is a comparison against something other than 0/1, then we know 5070 // that the condition is never/always true. 5071 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 5072 if (Val != 0 && Val != 1) { 5073 if (CC == ISD::SETEQ) // Cond never true, remove branch. 5074 return N->getOperand(0); 5075 // Always !=, turn it into an unconditional branch. 5076 return DAG.getNode(ISD::BR, dl, MVT::Other, 5077 N->getOperand(0), N->getOperand(4)); 5078 } 5079 5080 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 5081 5082 // Create the PPCISD altivec 'dot' comparison node. 5083 std::vector<EVT> VTs; 5084 SDValue Ops[] = { 5085 LHS.getOperand(2), // LHS of compare 5086 LHS.getOperand(3), // RHS of compare 5087 DAG.getConstant(CompareOpc, MVT::i32) 5088 }; 5089 VTs.push_back(LHS.getOperand(2).getValueType()); 5090 VTs.push_back(MVT::Flag); 5091 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5092 5093 // Unpack the result based on how the target uses it. 5094 PPC::Predicate CompOpc; 5095 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 5096 default: // Can't happen, don't crash on invalid number though. 5097 case 0: // Branch on the value of the EQ bit of CR6. 5098 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 5099 break; 5100 case 1: // Branch on the inverted value of the EQ bit of CR6. 5101 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 5102 break; 5103 case 2: // Branch on the value of the LT bit of CR6. 5104 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 5105 break; 5106 case 3: // Branch on the inverted value of the LT bit of CR6. 5107 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 5108 break; 5109 } 5110 5111 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 5112 DAG.getConstant(CompOpc, MVT::i32), 5113 DAG.getRegister(PPC::CR6, MVT::i32), 5114 N->getOperand(4), CompNode.getValue(1)); 5115 } 5116 break; 5117 } 5118 } 5119 5120 return SDValue(); 5121} 5122 5123//===----------------------------------------------------------------------===// 5124// Inline Assembly Support 5125//===----------------------------------------------------------------------===// 5126 5127void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5128 const APInt &Mask, 5129 APInt &KnownZero, 5130 APInt &KnownOne, 5131 const SelectionDAG &DAG, 5132 unsigned Depth) const { 5133 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5134 switch (Op.getOpcode()) { 5135 default: break; 5136 case PPCISD::LBRX: { 5137 // lhbrx is known to have the top bits cleared out. 5138 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16) 5139 KnownZero = 0xFFFF0000; 5140 break; 5141 } 5142 case ISD::INTRINSIC_WO_CHAIN: { 5143 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 5144 default: break; 5145 case Intrinsic::ppc_altivec_vcmpbfp_p: 5146 case Intrinsic::ppc_altivec_vcmpeqfp_p: 5147 case Intrinsic::ppc_altivec_vcmpequb_p: 5148 case Intrinsic::ppc_altivec_vcmpequh_p: 5149 case Intrinsic::ppc_altivec_vcmpequw_p: 5150 case Intrinsic::ppc_altivec_vcmpgefp_p: 5151 case Intrinsic::ppc_altivec_vcmpgtfp_p: 5152 case Intrinsic::ppc_altivec_vcmpgtsb_p: 5153 case Intrinsic::ppc_altivec_vcmpgtsh_p: 5154 case Intrinsic::ppc_altivec_vcmpgtsw_p: 5155 case Intrinsic::ppc_altivec_vcmpgtub_p: 5156 case Intrinsic::ppc_altivec_vcmpgtuh_p: 5157 case Intrinsic::ppc_altivec_vcmpgtuw_p: 5158 KnownZero = ~1U; // All bits but the low one are known to be zero. 5159 break; 5160 } 5161 } 5162 } 5163} 5164 5165 5166/// getConstraintType - Given a constraint, return the type of 5167/// constraint it is for this target. 5168PPCTargetLowering::ConstraintType 5169PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 5170 if (Constraint.size() == 1) { 5171 switch (Constraint[0]) { 5172 default: break; 5173 case 'b': 5174 case 'r': 5175 case 'f': 5176 case 'v': 5177 case 'y': 5178 return C_RegisterClass; 5179 } 5180 } 5181 return TargetLowering::getConstraintType(Constraint); 5182} 5183 5184std::pair<unsigned, const TargetRegisterClass*> 5185PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5186 EVT VT) const { 5187 if (Constraint.size() == 1) { 5188 // GCC RS6000 Constraint Letters 5189 switch (Constraint[0]) { 5190 case 'b': // R1-R31 5191 case 'r': // R0-R31 5192 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 5193 return std::make_pair(0U, PPC::G8RCRegisterClass); 5194 return std::make_pair(0U, PPC::GPRCRegisterClass); 5195 case 'f': 5196 if (VT == MVT::f32) 5197 return std::make_pair(0U, PPC::F4RCRegisterClass); 5198 else if (VT == MVT::f64) 5199 return std::make_pair(0U, PPC::F8RCRegisterClass); 5200 break; 5201 case 'v': 5202 return std::make_pair(0U, PPC::VRRCRegisterClass); 5203 case 'y': // crrc 5204 return std::make_pair(0U, PPC::CRRCRegisterClass); 5205 } 5206 } 5207 5208 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5209} 5210 5211 5212/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5213/// vector. If it is invalid, don't add anything to Ops. If hasMemory is true 5214/// it means one of the asm constraint of the inline asm instruction being 5215/// processed is 'm'. 5216void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter, 5217 bool hasMemory, 5218 std::vector<SDValue>&Ops, 5219 SelectionDAG &DAG) const { 5220 SDValue Result(0,0); 5221 switch (Letter) { 5222 default: break; 5223 case 'I': 5224 case 'J': 5225 case 'K': 5226 case 'L': 5227 case 'M': 5228 case 'N': 5229 case 'O': 5230 case 'P': { 5231 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 5232 if (!CST) return; // Must be an immediate to match. 5233 unsigned Value = CST->getZExtValue(); 5234 switch (Letter) { 5235 default: llvm_unreachable("Unknown constraint letter!"); 5236 case 'I': // "I" is a signed 16-bit constant. 5237 if ((short)Value == (int)Value) 5238 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5239 break; 5240 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 5241 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 5242 if ((short)Value == 0) 5243 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5244 break; 5245 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 5246 if ((Value >> 16) == 0) 5247 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5248 break; 5249 case 'M': // "M" is a constant that is greater than 31. 5250 if (Value > 31) 5251 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5252 break; 5253 case 'N': // "N" is a positive constant that is an exact power of two. 5254 if ((int)Value > 0 && isPowerOf2_32(Value)) 5255 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5256 break; 5257 case 'O': // "O" is the constant zero. 5258 if (Value == 0) 5259 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5260 break; 5261 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 5262 if ((short)-Value == (int)-Value) 5263 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5264 break; 5265 } 5266 break; 5267 } 5268 } 5269 5270 if (Result.getNode()) { 5271 Ops.push_back(Result); 5272 return; 5273 } 5274 5275 // Handle standard constraint letters. 5276 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG); 5277} 5278 5279// isLegalAddressingMode - Return true if the addressing mode represented 5280// by AM is legal for this target, for a load/store of the specified type. 5281bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5282 const Type *Ty) const { 5283 // FIXME: PPC does not allow r+i addressing modes for vectors! 5284 5285 // PPC allows a sign-extended 16-bit immediate field. 5286 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 5287 return false; 5288 5289 // No global is ever allowed as a base. 5290 if (AM.BaseGV) 5291 return false; 5292 5293 // PPC only support r+r, 5294 switch (AM.Scale) { 5295 case 0: // "r+i" or just "i", depending on HasBaseReg. 5296 break; 5297 case 1: 5298 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 5299 return false; 5300 // Otherwise we have r+r or r+i. 5301 break; 5302 case 2: 5303 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 5304 return false; 5305 // Allow 2*r as r+r. 5306 break; 5307 default: 5308 // No other scales are supported. 5309 return false; 5310 } 5311 5312 return true; 5313} 5314 5315/// isLegalAddressImmediate - Return true if the integer value can be used 5316/// as the offset of the target addressing mode for load / store of the 5317/// given type. 5318bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{ 5319 // PPC allows a sign-extended 16-bit immediate field. 5320 return (V > -(1 << 16) && V < (1 << 16)-1); 5321} 5322 5323bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 5324 return false; 5325} 5326 5327SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) { 5328 DebugLoc dl = Op.getDebugLoc(); 5329 // Depths > 0 not supported yet! 5330 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 5331 return SDValue(); 5332 5333 MachineFunction &MF = DAG.getMachineFunction(); 5334 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 5335 5336 // Just load the return address off the stack. 5337 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 5338 5339 // Make sure the function really does not optimize away the store of the RA 5340 // to the stack. 5341 FuncInfo->setLRStoreRequired(); 5342 return DAG.getLoad(getPointerTy(), dl, 5343 DAG.getEntryNode(), RetAddrFI, NULL, 0); 5344} 5345 5346SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { 5347 DebugLoc dl = Op.getDebugLoc(); 5348 // Depths > 0 not supported yet! 5349 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 5350 return SDValue(); 5351 5352 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5353 bool isPPC64 = PtrVT == MVT::i64; 5354 5355 MachineFunction &MF = DAG.getMachineFunction(); 5356 MachineFrameInfo *MFI = MF.getFrameInfo(); 5357 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 5358 && MFI->getStackSize(); 5359 5360 if (isPPC64) 5361 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::X31 : PPC::X1, 5362 MVT::i64); 5363 else 5364 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::R31 : PPC::R1, 5365 MVT::i32); 5366} 5367 5368bool 5369PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5370 // The PowerPC target isn't yet aware of offsets. 5371 return false; 5372} 5373 5374EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align, 5375 bool isSrcConst, bool isSrcStr, 5376 SelectionDAG &DAG) const { 5377 if (this->PPCSubTarget.isPPC64()) { 5378 return MVT::i64; 5379 } else { 5380 return MVT::i32; 5381 } 5382} 5383