PPCISelLowering.cpp revision f0757b0edc1ef3d1998485d3f74cadaa3f7180a0
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "PPCMachineFunctionInfo.h" 16#include "PPCPerfectShuffle.h" 17#include "PPCPredicates.h" 18#include "PPCTargetMachine.h" 19#include "llvm/ADT/STLExtras.h" 20#include "llvm/ADT/VectorExtras.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/PseudoSourceValue.h" 27#include "llvm/CodeGen/SelectionDAG.h" 28#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29#include "llvm/CallingConv.h" 30#include "llvm/Constants.h" 31#include "llvm/Function.h" 32#include "llvm/Intrinsics.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Target/TargetOptions.h" 35#include "llvm/Support/CommandLine.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/raw_ostream.h" 38#include "llvm/DerivedTypes.h" 39using namespace llvm; 40 41static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 42 CCValAssign::LocInfo &LocInfo, 43 ISD::ArgFlagsTy &ArgFlags, 44 CCState &State); 45static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT, 46 EVT &LocVT, 47 CCValAssign::LocInfo &LocInfo, 48 ISD::ArgFlagsTy &ArgFlags, 49 CCState &State); 50static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT, 51 EVT &LocVT, 52 CCValAssign::LocInfo &LocInfo, 53 ISD::ArgFlagsTy &ArgFlags, 54 CCState &State); 55 56static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 57cl::desc("enable preincrement load/store generation on PPC (experimental)"), 58 cl::Hidden); 59 60static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 61 if (TM.getSubtargetImpl()->isDarwin()) 62 return new TargetLoweringObjectFileMachO(); 63 64 return new TargetLoweringObjectFileELF(); 65} 66 67PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 68 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 69 70 setPow2DivIsCheap(); 71 72 // Use _setjmp/_longjmp instead of setjmp/longjmp. 73 setUseUnderscoreSetJmp(true); 74 setUseUnderscoreLongJmp(true); 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); 78 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); 79 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass); 80 81 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 82 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 83 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 84 85 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 86 87 // PowerPC has pre-inc load and store's. 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 92 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 98 99 // This is used in the ppcf128->int sequence. Note it has different semantics 100 // from FP_ROUND: that rounds to nearest, this rounds to zero. 101 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 102 103 // PowerPC has no SREM/UREM instructions 104 setOperationAction(ISD::SREM, MVT::i32, Expand); 105 setOperationAction(ISD::UREM, MVT::i32, Expand); 106 setOperationAction(ISD::SREM, MVT::i64, Expand); 107 setOperationAction(ISD::UREM, MVT::i64, Expand); 108 109 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 110 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 111 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 112 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 113 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 114 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 115 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 116 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 117 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 118 119 // We don't support sin/cos/sqrt/fmod/pow 120 setOperationAction(ISD::FSIN , MVT::f64, Expand); 121 setOperationAction(ISD::FCOS , MVT::f64, Expand); 122 setOperationAction(ISD::FREM , MVT::f64, Expand); 123 setOperationAction(ISD::FPOW , MVT::f64, Expand); 124 setOperationAction(ISD::FSIN , MVT::f32, Expand); 125 setOperationAction(ISD::FCOS , MVT::f32, Expand); 126 setOperationAction(ISD::FREM , MVT::f32, Expand); 127 setOperationAction(ISD::FPOW , MVT::f32, Expand); 128 129 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 130 131 // If we're enabling GP optimizations, use hardware square root 132 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) { 133 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 134 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 135 } 136 137 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 138 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 139 140 // PowerPC does not have BSWAP, CTPOP or CTTZ 141 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 142 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 143 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 144 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 145 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 146 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 147 148 // PowerPC does not have ROTR 149 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 150 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 151 152 // PowerPC does not have Select 153 setOperationAction(ISD::SELECT, MVT::i32, Expand); 154 setOperationAction(ISD::SELECT, MVT::i64, Expand); 155 setOperationAction(ISD::SELECT, MVT::f32, Expand); 156 setOperationAction(ISD::SELECT, MVT::f64, Expand); 157 158 // PowerPC wants to turn select_cc of FP into fsel when possible. 159 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 160 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 161 162 // PowerPC wants to optimize integer setcc a bit 163 setOperationAction(ISD::SETCC, MVT::i32, Custom); 164 165 // PowerPC does not have BRCOND which requires SetCC 166 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 167 168 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 169 170 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 171 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 172 173 // PowerPC does not have [U|S]INT_TO_FP 174 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 175 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 176 177 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); 178 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); 179 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); 180 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); 181 182 // We cannot sextinreg(i1). Expand to shifts. 183 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 184 185 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 186 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 187 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 188 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 189 190 191 // We want to legalize GlobalAddress and ConstantPool nodes into the 192 // appropriate instructions to materialize the address. 193 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 194 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 195 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 196 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 197 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 198 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 199 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 200 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 201 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 202 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 203 204 // TRAP is legal. 205 setOperationAction(ISD::TRAP, MVT::Other, Legal); 206 207 // TRAMPOLINE is custom lowered. 208 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom); 209 210 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 211 setOperationAction(ISD::VASTART , MVT::Other, Custom); 212 213 // VAARG is custom lowered with the 32-bit SVR4 ABI. 214 if ( TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 215 && !TM.getSubtarget<PPCSubtarget>().isPPC64()) 216 setOperationAction(ISD::VAARG, MVT::Other, Custom); 217 else 218 setOperationAction(ISD::VAARG, MVT::Other, Expand); 219 220 // Use the default implementation. 221 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 222 setOperationAction(ISD::VAEND , MVT::Other, Expand); 223 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 224 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 225 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 226 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 227 228 // We want to custom lower some of our intrinsics. 229 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 230 231 // Comparisons that require checking two conditions. 232 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 233 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 234 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 235 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 236 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 237 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 238 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 239 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 240 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 241 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 242 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 243 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 244 245 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 246 // They also have instructions for converting between i64 and fp. 247 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 248 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 249 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 250 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 251 // This is just the low 32 bits of a (signed) fp->i64 conversion. 252 // We cannot do this with Promote because i64 is not a legal type. 253 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 254 255 // FIXME: disable this lowered code. This generates 64-bit register values, 256 // and we don't model the fact that the top part is clobbered by calls. We 257 // need to flag these together so that the value isn't live across a call. 258 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 259 } else { 260 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 261 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 262 } 263 264 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) { 265 // 64-bit PowerPC implementations can support i64 types directly 266 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass); 267 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 268 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 269 // 64-bit PowerPC wants to expand i128 shifts itself. 270 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 271 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 272 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 273 } else { 274 // 32-bit PowerPC wants to expand i64 shifts itself. 275 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 276 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 277 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 278 } 279 280 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) { 281 // First set operation action for all vector types to expand. Then we 282 // will selectively turn on ones that can be effectively codegen'd. 283 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 284 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 285 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 286 287 // add/sub are legal for all supported vector VT's. 288 setOperationAction(ISD::ADD , VT, Legal); 289 setOperationAction(ISD::SUB , VT, Legal); 290 291 // We promote all shuffles to v16i8. 292 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 293 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 294 295 // We promote all non-typed operations to v4i32. 296 setOperationAction(ISD::AND , VT, Promote); 297 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 298 setOperationAction(ISD::OR , VT, Promote); 299 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 300 setOperationAction(ISD::XOR , VT, Promote); 301 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 302 setOperationAction(ISD::LOAD , VT, Promote); 303 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 304 setOperationAction(ISD::SELECT, VT, Promote); 305 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 306 setOperationAction(ISD::STORE, VT, Promote); 307 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 308 309 // No other operations are legal. 310 setOperationAction(ISD::MUL , VT, Expand); 311 setOperationAction(ISD::SDIV, VT, Expand); 312 setOperationAction(ISD::SREM, VT, Expand); 313 setOperationAction(ISD::UDIV, VT, Expand); 314 setOperationAction(ISD::UREM, VT, Expand); 315 setOperationAction(ISD::FDIV, VT, Expand); 316 setOperationAction(ISD::FNEG, VT, Expand); 317 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 318 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 319 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 320 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 321 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 322 setOperationAction(ISD::UDIVREM, VT, Expand); 323 setOperationAction(ISD::SDIVREM, VT, Expand); 324 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 325 setOperationAction(ISD::FPOW, VT, Expand); 326 setOperationAction(ISD::CTPOP, VT, Expand); 327 setOperationAction(ISD::CTLZ, VT, Expand); 328 setOperationAction(ISD::CTTZ, VT, Expand); 329 } 330 331 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 332 // with merges, splats, etc. 333 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 334 335 setOperationAction(ISD::AND , MVT::v4i32, Legal); 336 setOperationAction(ISD::OR , MVT::v4i32, Legal); 337 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 338 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 339 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 340 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 341 342 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass); 343 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass); 344 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass); 345 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass); 346 347 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 348 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 349 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 350 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 351 352 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 353 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 354 355 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 356 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 357 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 358 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 359 } 360 361 setShiftAmountType(MVT::i32); 362 setBooleanContents(ZeroOrOneBooleanContent); 363 364 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) { 365 setStackPointerRegisterToSaveRestore(PPC::X1); 366 setExceptionPointerRegister(PPC::X3); 367 setExceptionSelectorRegister(PPC::X4); 368 } else { 369 setStackPointerRegisterToSaveRestore(PPC::R1); 370 setExceptionPointerRegister(PPC::R3); 371 setExceptionSelectorRegister(PPC::R4); 372 } 373 374 // We have target-specific dag combine patterns for the following nodes: 375 setTargetDAGCombine(ISD::SINT_TO_FP); 376 setTargetDAGCombine(ISD::STORE); 377 setTargetDAGCombine(ISD::BR_CC); 378 setTargetDAGCombine(ISD::BSWAP); 379 380 // Darwin long double math library functions have $LDBL128 appended. 381 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) { 382 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 383 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 384 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 385 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 386 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 387 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 388 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 389 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 390 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 391 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 392 } 393 394 computeRegisterProperties(); 395} 396 397/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 398/// function arguments in the caller parameter area. 399unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const { 400 const TargetMachine &TM = getTargetMachine(); 401 // Darwin passes everything on 4 byte boundary. 402 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 403 return 4; 404 // FIXME SVR4 TBD 405 return 4; 406} 407 408const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 409 switch (Opcode) { 410 default: return 0; 411 case PPCISD::FSEL: return "PPCISD::FSEL"; 412 case PPCISD::FCFID: return "PPCISD::FCFID"; 413 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 414 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 415 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 416 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 417 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 418 case PPCISD::VPERM: return "PPCISD::VPERM"; 419 case PPCISD::Hi: return "PPCISD::Hi"; 420 case PPCISD::Lo: return "PPCISD::Lo"; 421 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 422 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 423 case PPCISD::LOAD: return "PPCISD::LOAD"; 424 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 425 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 426 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 427 case PPCISD::SRL: return "PPCISD::SRL"; 428 case PPCISD::SRA: return "PPCISD::SRA"; 429 case PPCISD::SHL: return "PPCISD::SHL"; 430 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32"; 431 case PPCISD::STD_32: return "PPCISD::STD_32"; 432 case PPCISD::CALL_SVR4: return "PPCISD::CALL_SVR4"; 433 case PPCISD::CALL_Darwin: return "PPCISD::CALL_Darwin"; 434 case PPCISD::NOP: return "PPCISD::NOP"; 435 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 436 case PPCISD::BCTRL_Darwin: return "PPCISD::BCTRL_Darwin"; 437 case PPCISD::BCTRL_SVR4: return "PPCISD::BCTRL_SVR4"; 438 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 439 case PPCISD::MFCR: return "PPCISD::MFCR"; 440 case PPCISD::VCMP: return "PPCISD::VCMP"; 441 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 442 case PPCISD::LBRX: return "PPCISD::LBRX"; 443 case PPCISD::STBRX: return "PPCISD::STBRX"; 444 case PPCISD::LARX: return "PPCISD::LARX"; 445 case PPCISD::STCX: return "PPCISD::STCX"; 446 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 447 case PPCISD::MFFS: return "PPCISD::MFFS"; 448 case PPCISD::MTFSB0: return "PPCISD::MTFSB0"; 449 case PPCISD::MTFSB1: return "PPCISD::MTFSB1"; 450 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 451 case PPCISD::MTFSF: return "PPCISD::MTFSF"; 452 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 453 } 454} 455 456MVT::SimpleValueType PPCTargetLowering::getSetCCResultType(EVT VT) const { 457 return MVT::i32; 458} 459 460/// getFunctionAlignment - Return the Log2 alignment of this function. 461unsigned PPCTargetLowering::getFunctionAlignment(const Function *F) const { 462 if (getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) 463 return F->hasFnAttr(Attribute::OptimizeForSize) ? 2 : 4; 464 else 465 return 2; 466} 467 468//===----------------------------------------------------------------------===// 469// Node matching predicates, for use by the tblgen matching code. 470//===----------------------------------------------------------------------===// 471 472/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 473static bool isFloatingPointZero(SDValue Op) { 474 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 475 return CFP->getValueAPF().isZero(); 476 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 477 // Maybe this has already been legalized into the constant pool? 478 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 479 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 480 return CFP->getValueAPF().isZero(); 481 } 482 return false; 483} 484 485/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 486/// true if Op is undef or if it matches the specified value. 487static bool isConstantOrUndef(int Op, int Val) { 488 return Op < 0 || Op == Val; 489} 490 491/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 492/// VPKUHUM instruction. 493bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 494 if (!isUnary) { 495 for (unsigned i = 0; i != 16; ++i) 496 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 497 return false; 498 } else { 499 for (unsigned i = 0; i != 8; ++i) 500 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 501 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 502 return false; 503 } 504 return true; 505} 506 507/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 508/// VPKUWUM instruction. 509bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 510 if (!isUnary) { 511 for (unsigned i = 0; i != 16; i += 2) 512 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 513 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 514 return false; 515 } else { 516 for (unsigned i = 0; i != 8; i += 2) 517 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 518 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 519 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 520 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 521 return false; 522 } 523 return true; 524} 525 526/// isVMerge - Common function, used to match vmrg* shuffles. 527/// 528static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 529 unsigned LHSStart, unsigned RHSStart) { 530 assert(N->getValueType(0) == MVT::v16i8 && 531 "PPC only supports shuffles by bytes!"); 532 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 533 "Unsupported merge size!"); 534 535 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 536 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 537 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 538 LHSStart+j+i*UnitSize) || 539 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 540 RHSStart+j+i*UnitSize)) 541 return false; 542 } 543 return true; 544} 545 546/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 547/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 548bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 549 bool isUnary) { 550 if (!isUnary) 551 return isVMerge(N, UnitSize, 8, 24); 552 return isVMerge(N, UnitSize, 8, 8); 553} 554 555/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 556/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 557bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 558 bool isUnary) { 559 if (!isUnary) 560 return isVMerge(N, UnitSize, 0, 16); 561 return isVMerge(N, UnitSize, 0, 0); 562} 563 564 565/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 566/// amount, otherwise return -1. 567int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 568 assert(N->getValueType(0) == MVT::v16i8 && 569 "PPC only supports shuffles by bytes!"); 570 571 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 572 573 // Find the first non-undef value in the shuffle mask. 574 unsigned i; 575 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 576 /*search*/; 577 578 if (i == 16) return -1; // all undef. 579 580 // Otherwise, check to see if the rest of the elements are consecutively 581 // numbered from this value. 582 unsigned ShiftAmt = SVOp->getMaskElt(i); 583 if (ShiftAmt < i) return -1; 584 ShiftAmt -= i; 585 586 if (!isUnary) { 587 // Check the rest of the elements to see if they are consecutive. 588 for (++i; i != 16; ++i) 589 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 590 return -1; 591 } else { 592 // Check the rest of the elements to see if they are consecutive. 593 for (++i; i != 16; ++i) 594 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 595 return -1; 596 } 597 return ShiftAmt; 598} 599 600/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 601/// specifies a splat of a single element that is suitable for input to 602/// VSPLTB/VSPLTH/VSPLTW. 603bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 604 assert(N->getValueType(0) == MVT::v16i8 && 605 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 606 607 // This is a splat operation if each element of the permute is the same, and 608 // if the value doesn't reference the second vector. 609 unsigned ElementBase = N->getMaskElt(0); 610 611 // FIXME: Handle UNDEF elements too! 612 if (ElementBase >= 16) 613 return false; 614 615 // Check that the indices are consecutive, in the case of a multi-byte element 616 // splatted with a v16i8 mask. 617 for (unsigned i = 1; i != EltSize; ++i) 618 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 619 return false; 620 621 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 622 if (N->getMaskElt(i) < 0) continue; 623 for (unsigned j = 0; j != EltSize; ++j) 624 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 625 return false; 626 } 627 return true; 628} 629 630/// isAllNegativeZeroVector - Returns true if all elements of build_vector 631/// are -0.0. 632bool PPC::isAllNegativeZeroVector(SDNode *N) { 633 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 634 635 APInt APVal, APUndef; 636 unsigned BitSize; 637 bool HasAnyUndefs; 638 639 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 640 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 641 return CFP->getValueAPF().isNegZero(); 642 643 return false; 644} 645 646/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 647/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 648unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 649 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 650 assert(isSplatShuffleMask(SVOp, EltSize)); 651 return SVOp->getMaskElt(0) / EltSize; 652} 653 654/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 655/// by using a vspltis[bhw] instruction of the specified element size, return 656/// the constant being splatted. The ByteSize field indicates the number of 657/// bytes of each element [124] -> [bhw]. 658SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 659 SDValue OpVal(0, 0); 660 661 // If ByteSize of the splat is bigger than the element size of the 662 // build_vector, then we have a case where we are checking for a splat where 663 // multiple elements of the buildvector are folded together into a single 664 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 665 unsigned EltSize = 16/N->getNumOperands(); 666 if (EltSize < ByteSize) { 667 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 668 SDValue UniquedVals[4]; 669 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 670 671 // See if all of the elements in the buildvector agree across. 672 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 673 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 674 // If the element isn't a constant, bail fully out. 675 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 676 677 678 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 679 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 680 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 681 return SDValue(); // no match. 682 } 683 684 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 685 // either constant or undef values that are identical for each chunk. See 686 // if these chunks can form into a larger vspltis*. 687 688 // Check to see if all of the leading entries are either 0 or -1. If 689 // neither, then this won't fit into the immediate field. 690 bool LeadingZero = true; 691 bool LeadingOnes = true; 692 for (unsigned i = 0; i != Multiple-1; ++i) { 693 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 694 695 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 696 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 697 } 698 // Finally, check the least significant entry. 699 if (LeadingZero) { 700 if (UniquedVals[Multiple-1].getNode() == 0) 701 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 702 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 703 if (Val < 16) 704 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 705 } 706 if (LeadingOnes) { 707 if (UniquedVals[Multiple-1].getNode() == 0) 708 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 709 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 710 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 711 return DAG.getTargetConstant(Val, MVT::i32); 712 } 713 714 return SDValue(); 715 } 716 717 // Check to see if this buildvec has a single non-undef value in its elements. 718 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 719 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 720 if (OpVal.getNode() == 0) 721 OpVal = N->getOperand(i); 722 else if (OpVal != N->getOperand(i)) 723 return SDValue(); 724 } 725 726 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 727 728 unsigned ValSizeInBytes = EltSize; 729 uint64_t Value = 0; 730 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 731 Value = CN->getZExtValue(); 732 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 733 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 734 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 735 } 736 737 // If the splat value is larger than the element value, then we can never do 738 // this splat. The only case that we could fit the replicated bits into our 739 // immediate field for would be zero, and we prefer to use vxor for it. 740 if (ValSizeInBytes < ByteSize) return SDValue(); 741 742 // If the element value is larger than the splat value, cut it in half and 743 // check to see if the two halves are equal. Continue doing this until we 744 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 745 while (ValSizeInBytes > ByteSize) { 746 ValSizeInBytes >>= 1; 747 748 // If the top half equals the bottom half, we're still ok. 749 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 750 (Value & ((1 << (8*ValSizeInBytes))-1))) 751 return SDValue(); 752 } 753 754 // Properly sign extend the value. 755 int ShAmt = (4-ByteSize)*8; 756 int MaskVal = ((int)Value << ShAmt) >> ShAmt; 757 758 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 759 if (MaskVal == 0) return SDValue(); 760 761 // Finally, if this value fits in a 5 bit sext field, return it 762 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) 763 return DAG.getTargetConstant(MaskVal, MVT::i32); 764 return SDValue(); 765} 766 767//===----------------------------------------------------------------------===// 768// Addressing Mode Selection 769//===----------------------------------------------------------------------===// 770 771/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 772/// or 64-bit immediate, and if the value can be accurately represented as a 773/// sign extension from a 16-bit value. If so, this returns true and the 774/// immediate. 775static bool isIntS16Immediate(SDNode *N, short &Imm) { 776 if (N->getOpcode() != ISD::Constant) 777 return false; 778 779 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 780 if (N->getValueType(0) == MVT::i32) 781 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 782 else 783 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 784} 785static bool isIntS16Immediate(SDValue Op, short &Imm) { 786 return isIntS16Immediate(Op.getNode(), Imm); 787} 788 789 790/// SelectAddressRegReg - Given the specified addressed, check to see if it 791/// can be represented as an indexed [r+r] operation. Returns false if it 792/// can be more efficiently represented with [r+imm]. 793bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 794 SDValue &Index, 795 SelectionDAG &DAG) const { 796 short imm = 0; 797 if (N.getOpcode() == ISD::ADD) { 798 if (isIntS16Immediate(N.getOperand(1), imm)) 799 return false; // r+i 800 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 801 return false; // r+i 802 803 Base = N.getOperand(0); 804 Index = N.getOperand(1); 805 return true; 806 } else if (N.getOpcode() == ISD::OR) { 807 if (isIntS16Immediate(N.getOperand(1), imm)) 808 return false; // r+i can fold it if we can. 809 810 // If this is an or of disjoint bitfields, we can codegen this as an add 811 // (for better address arithmetic) if the LHS and RHS of the OR are provably 812 // disjoint. 813 APInt LHSKnownZero, LHSKnownOne; 814 APInt RHSKnownZero, RHSKnownOne; 815 DAG.ComputeMaskedBits(N.getOperand(0), 816 APInt::getAllOnesValue(N.getOperand(0) 817 .getValueSizeInBits()), 818 LHSKnownZero, LHSKnownOne); 819 820 if (LHSKnownZero.getBoolValue()) { 821 DAG.ComputeMaskedBits(N.getOperand(1), 822 APInt::getAllOnesValue(N.getOperand(1) 823 .getValueSizeInBits()), 824 RHSKnownZero, RHSKnownOne); 825 // If all of the bits are known zero on the LHS or RHS, the add won't 826 // carry. 827 if (~(LHSKnownZero | RHSKnownZero) == 0) { 828 Base = N.getOperand(0); 829 Index = N.getOperand(1); 830 return true; 831 } 832 } 833 } 834 835 return false; 836} 837 838/// Returns true if the address N can be represented by a base register plus 839/// a signed 16-bit displacement [r+imm], and if it is not better 840/// represented as reg+reg. 841bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 842 SDValue &Base, 843 SelectionDAG &DAG) const { 844 // FIXME dl should come from parent load or store, not from address 845 DebugLoc dl = N.getDebugLoc(); 846 // If this can be more profitably realized as r+r, fail. 847 if (SelectAddressRegReg(N, Disp, Base, DAG)) 848 return false; 849 850 if (N.getOpcode() == ISD::ADD) { 851 short imm = 0; 852 if (isIntS16Immediate(N.getOperand(1), imm)) { 853 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 854 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 855 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 856 } else { 857 Base = N.getOperand(0); 858 } 859 return true; // [r+i] 860 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 861 // Match LOAD (ADD (X, Lo(G))). 862 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 863 && "Cannot handle constant offsets yet!"); 864 Disp = N.getOperand(1).getOperand(0); // The global address. 865 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 866 Disp.getOpcode() == ISD::TargetConstantPool || 867 Disp.getOpcode() == ISD::TargetJumpTable); 868 Base = N.getOperand(0); 869 return true; // [&g+r] 870 } 871 } else if (N.getOpcode() == ISD::OR) { 872 short imm = 0; 873 if (isIntS16Immediate(N.getOperand(1), imm)) { 874 // If this is an or of disjoint bitfields, we can codegen this as an add 875 // (for better address arithmetic) if the LHS and RHS of the OR are 876 // provably disjoint. 877 APInt LHSKnownZero, LHSKnownOne; 878 DAG.ComputeMaskedBits(N.getOperand(0), 879 APInt::getAllOnesValue(N.getOperand(0) 880 .getValueSizeInBits()), 881 LHSKnownZero, LHSKnownOne); 882 883 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 884 // If all of the bits are known zero on the LHS or RHS, the add won't 885 // carry. 886 Base = N.getOperand(0); 887 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32); 888 return true; 889 } 890 } 891 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 892 // Loading from a constant address. 893 894 // If this address fits entirely in a 16-bit sext immediate field, codegen 895 // this as "d, 0" 896 short Imm; 897 if (isIntS16Immediate(CN, Imm)) { 898 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 899 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 900 return true; 901 } 902 903 // Handle 32-bit sext immediates with LIS + addr mode. 904 if (CN->getValueType(0) == MVT::i32 || 905 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 906 int Addr = (int)CN->getZExtValue(); 907 908 // Otherwise, break this down into an LIS + disp. 909 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 910 911 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 912 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 913 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 914 return true; 915 } 916 } 917 918 Disp = DAG.getTargetConstant(0, getPointerTy()); 919 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 920 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 921 else 922 Base = N; 923 return true; // [r+0] 924} 925 926/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 927/// represented as an indexed [r+r] operation. 928bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 929 SDValue &Index, 930 SelectionDAG &DAG) const { 931 // Check to see if we can easily represent this as an [r+r] address. This 932 // will fail if it thinks that the address is more profitably represented as 933 // reg+imm, e.g. where imm = 0. 934 if (SelectAddressRegReg(N, Base, Index, DAG)) 935 return true; 936 937 // If the operand is an addition, always emit this as [r+r], since this is 938 // better (for code size, and execution, as the memop does the add for free) 939 // than emitting an explicit add. 940 if (N.getOpcode() == ISD::ADD) { 941 Base = N.getOperand(0); 942 Index = N.getOperand(1); 943 return true; 944 } 945 946 // Otherwise, do it the hard way, using R0 as the base register. 947 Base = DAG.getRegister(PPC::R0, N.getValueType()); 948 Index = N; 949 return true; 950} 951 952/// SelectAddressRegImmShift - Returns true if the address N can be 953/// represented by a base register plus a signed 14-bit displacement 954/// [r+imm*4]. Suitable for use by STD and friends. 955bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, 956 SDValue &Base, 957 SelectionDAG &DAG) const { 958 // FIXME dl should come from the parent load or store, not the address 959 DebugLoc dl = N.getDebugLoc(); 960 // If this can be more profitably realized as r+r, fail. 961 if (SelectAddressRegReg(N, Disp, Base, DAG)) 962 return false; 963 964 if (N.getOpcode() == ISD::ADD) { 965 short imm = 0; 966 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 967 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 968 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 969 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 970 } else { 971 Base = N.getOperand(0); 972 } 973 return true; // [r+i] 974 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 975 // Match LOAD (ADD (X, Lo(G))). 976 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 977 && "Cannot handle constant offsets yet!"); 978 Disp = N.getOperand(1).getOperand(0); // The global address. 979 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 980 Disp.getOpcode() == ISD::TargetConstantPool || 981 Disp.getOpcode() == ISD::TargetJumpTable); 982 Base = N.getOperand(0); 983 return true; // [&g+r] 984 } 985 } else if (N.getOpcode() == ISD::OR) { 986 short imm = 0; 987 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) { 988 // If this is an or of disjoint bitfields, we can codegen this as an add 989 // (for better address arithmetic) if the LHS and RHS of the OR are 990 // provably disjoint. 991 APInt LHSKnownZero, LHSKnownOne; 992 DAG.ComputeMaskedBits(N.getOperand(0), 993 APInt::getAllOnesValue(N.getOperand(0) 994 .getValueSizeInBits()), 995 LHSKnownZero, LHSKnownOne); 996 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 997 // If all of the bits are known zero on the LHS or RHS, the add won't 998 // carry. 999 Base = N.getOperand(0); 1000 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32); 1001 return true; 1002 } 1003 } 1004 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1005 // Loading from a constant address. Verify low two bits are clear. 1006 if ((CN->getZExtValue() & 3) == 0) { 1007 // If this address fits entirely in a 14-bit sext immediate field, codegen 1008 // this as "d, 0" 1009 short Imm; 1010 if (isIntS16Immediate(CN, Imm)) { 1011 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy()); 1012 Base = DAG.getRegister(PPC::R0, CN->getValueType(0)); 1013 return true; 1014 } 1015 1016 // Fold the low-part of 32-bit absolute addresses into addr mode. 1017 if (CN->getValueType(0) == MVT::i32 || 1018 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) { 1019 int Addr = (int)CN->getZExtValue(); 1020 1021 // Otherwise, break this down into an LIS + disp. 1022 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32); 1023 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); 1024 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1025 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0); 1026 return true; 1027 } 1028 } 1029 } 1030 1031 Disp = DAG.getTargetConstant(0, getPointerTy()); 1032 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 1033 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1034 else 1035 Base = N; 1036 return true; // [r+0] 1037} 1038 1039 1040/// getPreIndexedAddressParts - returns true by value, base pointer and 1041/// offset pointer and addressing mode by reference if the node's address 1042/// can be legally represented as pre-indexed load / store address. 1043bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1044 SDValue &Offset, 1045 ISD::MemIndexedMode &AM, 1046 SelectionDAG &DAG) const { 1047 // Disabled by default for now. 1048 if (!EnablePPCPreinc) return false; 1049 1050 SDValue Ptr; 1051 EVT VT; 1052 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1053 Ptr = LD->getBasePtr(); 1054 VT = LD->getMemoryVT(); 1055 1056 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1057 ST = ST; 1058 Ptr = ST->getBasePtr(); 1059 VT = ST->getMemoryVT(); 1060 } else 1061 return false; 1062 1063 // PowerPC doesn't have preinc load/store instructions for vectors. 1064 if (VT.isVector()) 1065 return false; 1066 1067 // TODO: Check reg+reg first. 1068 1069 // LDU/STU use reg+imm*4, others use reg+imm. 1070 if (VT != MVT::i64) { 1071 // reg + imm 1072 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG)) 1073 return false; 1074 } else { 1075 // reg + imm * 4. 1076 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG)) 1077 return false; 1078 } 1079 1080 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1081 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1082 // sext i32 to i64 when addr mode is r+i. 1083 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1084 LD->getExtensionType() == ISD::SEXTLOAD && 1085 isa<ConstantSDNode>(Offset)) 1086 return false; 1087 } 1088 1089 AM = ISD::PRE_INC; 1090 return true; 1091} 1092 1093//===----------------------------------------------------------------------===// 1094// LowerOperation implementation 1095//===----------------------------------------------------------------------===// 1096 1097SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1098 SelectionDAG &DAG) const { 1099 EVT PtrVT = Op.getValueType(); 1100 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1101 const Constant *C = CP->getConstVal(); 1102 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); 1103 SDValue Zero = DAG.getConstant(0, PtrVT); 1104 // FIXME there isn't really any debug info here 1105 DebugLoc dl = Op.getDebugLoc(); 1106 1107 const TargetMachine &TM = DAG.getTarget(); 1108 1109 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, CPI, Zero); 1110 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, CPI, Zero); 1111 1112 // If this is a non-darwin platform, we don't support non-static relo models 1113 // yet. 1114 if (TM.getRelocationModel() == Reloc::Static || 1115 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1116 // Generate non-pic code that has direct accesses to the constant pool. 1117 // The address of the global is just (hi(&g)+lo(&g)). 1118 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1119 } 1120 1121 if (TM.getRelocationModel() == Reloc::PIC_) { 1122 // With PIC, the first instruction is actually "GR+hi(&G)". 1123 Hi = DAG.getNode(ISD::ADD, dl, PtrVT, 1124 DAG.getNode(PPCISD::GlobalBaseReg, 1125 DebugLoc(), PtrVT), Hi); 1126 } 1127 1128 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1129 return Lo; 1130} 1131 1132SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1133 EVT PtrVT = Op.getValueType(); 1134 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1135 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1136 SDValue Zero = DAG.getConstant(0, PtrVT); 1137 // FIXME there isn't really any debug loc here 1138 DebugLoc dl = Op.getDebugLoc(); 1139 1140 const TargetMachine &TM = DAG.getTarget(); 1141 1142 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, JTI, Zero); 1143 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, JTI, Zero); 1144 1145 // If this is a non-darwin platform, we don't support non-static relo models 1146 // yet. 1147 if (TM.getRelocationModel() == Reloc::Static || 1148 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1149 // Generate non-pic code that has direct accesses to the constant pool. 1150 // The address of the global is just (hi(&g)+lo(&g)). 1151 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1152 } 1153 1154 if (TM.getRelocationModel() == Reloc::PIC_) { 1155 // With PIC, the first instruction is actually "GR+hi(&G)". 1156 Hi = DAG.getNode(ISD::ADD, dl, PtrVT, 1157 DAG.getNode(PPCISD::GlobalBaseReg, 1158 DebugLoc(), PtrVT), Hi); 1159 } 1160 1161 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1162 return Lo; 1163} 1164 1165SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1166 SelectionDAG &DAG) const { 1167 llvm_unreachable("TLS not implemented for PPC."); 1168 return SDValue(); // Not reached 1169} 1170 1171SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1172 SelectionDAG &DAG) const { 1173 EVT PtrVT = Op.getValueType(); 1174 DebugLoc DL = Op.getDebugLoc(); 1175 1176 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1177 SDValue TgtBA = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true); 1178 SDValue Zero = DAG.getConstant(0, PtrVT); 1179 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, TgtBA, Zero); 1180 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, TgtBA, Zero); 1181 1182 // If this is a non-darwin platform, we don't support non-static relo models 1183 // yet. 1184 const TargetMachine &TM = DAG.getTarget(); 1185 if (TM.getRelocationModel() == Reloc::Static || 1186 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1187 // Generate non-pic code that has direct accesses to globals. 1188 // The address of the global is just (hi(&g)+lo(&g)). 1189 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1190 } 1191 1192 if (TM.getRelocationModel() == Reloc::PIC_) { 1193 // With PIC, the first instruction is actually "GR+hi(&G)". 1194 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1195 DAG.getNode(PPCISD::GlobalBaseReg, 1196 DebugLoc(), PtrVT), Hi); 1197 } 1198 1199 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1200} 1201 1202SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1203 SelectionDAG &DAG) const { 1204 EVT PtrVT = Op.getValueType(); 1205 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1206 const GlobalValue *GV = GSDN->getGlobal(); 1207 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); 1208 SDValue Zero = DAG.getConstant(0, PtrVT); 1209 // FIXME there isn't really any debug info here 1210 DebugLoc dl = GSDN->getDebugLoc(); 1211 1212 const TargetMachine &TM = DAG.getTarget(); 1213 1214 // 64-bit SVR4 ABI code is always position-independent. 1215 // The actual address of the GlobalValue is stored in the TOC. 1216 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1217 return DAG.getNode(PPCISD::TOC_ENTRY, dl, MVT::i64, GA, 1218 DAG.getRegister(PPC::X2, MVT::i64)); 1219 } 1220 1221 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, GA, Zero); 1222 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, GA, Zero); 1223 1224 // If this is a non-darwin platform, we don't support non-static relo models 1225 // yet. 1226 if (TM.getRelocationModel() == Reloc::Static || 1227 !TM.getSubtarget<PPCSubtarget>().isDarwin()) { 1228 // Generate non-pic code that has direct accesses to globals. 1229 // The address of the global is just (hi(&g)+lo(&g)). 1230 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1231 } 1232 1233 if (TM.getRelocationModel() == Reloc::PIC_) { 1234 // With PIC, the first instruction is actually "GR+hi(&G)". 1235 Hi = DAG.getNode(ISD::ADD, dl, PtrVT, 1236 DAG.getNode(PPCISD::GlobalBaseReg, 1237 DebugLoc(), PtrVT), Hi); 1238 } 1239 1240 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); 1241 1242 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) 1243 return Lo; 1244 1245 // If the global is weak or external, we have to go through the lazy 1246 // resolution stub. 1247 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Lo, NULL, 0, 1248 false, false, 0); 1249} 1250 1251SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1252 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1253 DebugLoc dl = Op.getDebugLoc(); 1254 1255 // If we're comparing for equality to zero, expose the fact that this is 1256 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1257 // fold the new nodes. 1258 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1259 if (C->isNullValue() && CC == ISD::SETEQ) { 1260 EVT VT = Op.getOperand(0).getValueType(); 1261 SDValue Zext = Op.getOperand(0); 1262 if (VT.bitsLT(MVT::i32)) { 1263 VT = MVT::i32; 1264 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1265 } 1266 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1267 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1268 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1269 DAG.getConstant(Log2b, MVT::i32)); 1270 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1271 } 1272 // Leave comparisons against 0 and -1 alone for now, since they're usually 1273 // optimized. FIXME: revisit this when we can custom lower all setcc 1274 // optimizations. 1275 if (C->isAllOnesValue() || C->isNullValue()) 1276 return SDValue(); 1277 } 1278 1279 // If we have an integer seteq/setne, turn it into a compare against zero 1280 // by xor'ing the rhs with the lhs, which is faster than setting a 1281 // condition register, reading it back out, and masking the correct bit. The 1282 // normal approach here uses sub to do this instead of xor. Using xor exposes 1283 // the result to other bit-twiddling opportunities. 1284 EVT LHSVT = Op.getOperand(0).getValueType(); 1285 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1286 EVT VT = Op.getValueType(); 1287 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1288 Op.getOperand(1)); 1289 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1290 } 1291 return SDValue(); 1292} 1293 1294SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1295 const PPCSubtarget &Subtarget) const { 1296 1297 llvm_unreachable("VAARG not yet implemented for the SVR4 ABI!"); 1298 return SDValue(); // Not reached 1299} 1300 1301SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, 1302 SelectionDAG &DAG) const { 1303 SDValue Chain = Op.getOperand(0); 1304 SDValue Trmp = Op.getOperand(1); // trampoline 1305 SDValue FPtr = Op.getOperand(2); // nested function 1306 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1307 DebugLoc dl = Op.getDebugLoc(); 1308 1309 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1310 bool isPPC64 = (PtrVT == MVT::i64); 1311 const Type *IntPtrTy = 1312 DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType( 1313 *DAG.getContext()); 1314 1315 TargetLowering::ArgListTy Args; 1316 TargetLowering::ArgListEntry Entry; 1317 1318 Entry.Ty = IntPtrTy; 1319 Entry.Node = Trmp; Args.push_back(Entry); 1320 1321 // TrampSize == (isPPC64 ? 48 : 40); 1322 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1323 isPPC64 ? MVT::i64 : MVT::i32); 1324 Args.push_back(Entry); 1325 1326 Entry.Node = FPtr; Args.push_back(Entry); 1327 Entry.Node = Nest; Args.push_back(Entry); 1328 1329 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1330 std::pair<SDValue, SDValue> CallResult = 1331 LowerCallTo(Chain, Op.getValueType().getTypeForEVT(*DAG.getContext()), 1332 false, false, false, false, 0, CallingConv::C, false, 1333 /*isReturnValueUsed=*/true, 1334 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1335 Args, DAG, dl); 1336 1337 SDValue Ops[] = 1338 { CallResult.first, CallResult.second }; 1339 1340 return DAG.getMergeValues(Ops, 2, dl); 1341} 1342 1343SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1344 const PPCSubtarget &Subtarget) const { 1345 MachineFunction &MF = DAG.getMachineFunction(); 1346 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1347 1348 DebugLoc dl = Op.getDebugLoc(); 1349 1350 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1351 // vastart just stores the address of the VarArgsFrameIndex slot into the 1352 // memory location argument. 1353 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1354 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1355 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1356 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0, 1357 false, false, 0); 1358 } 1359 1360 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1361 // We suppose the given va_list is already allocated. 1362 // 1363 // typedef struct { 1364 // char gpr; /* index into the array of 8 GPRs 1365 // * stored in the register save area 1366 // * gpr=0 corresponds to r3, 1367 // * gpr=1 to r4, etc. 1368 // */ 1369 // char fpr; /* index into the array of 8 FPRs 1370 // * stored in the register save area 1371 // * fpr=0 corresponds to f1, 1372 // * fpr=1 to f2, etc. 1373 // */ 1374 // char *overflow_arg_area; 1375 // /* location on stack that holds 1376 // * the next overflow argument 1377 // */ 1378 // char *reg_save_area; 1379 // /* where r3:r10 and f1:f8 (if saved) 1380 // * are stored 1381 // */ 1382 // } va_list[1]; 1383 1384 1385 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1386 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1387 1388 1389 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1390 1391 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1392 PtrVT); 1393 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1394 PtrVT); 1395 1396 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1397 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1398 1399 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1400 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1401 1402 uint64_t FPROffset = 1; 1403 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1404 1405 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1406 1407 // Store first byte : number of int regs 1408 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1409 Op.getOperand(1), SV, 0, MVT::i8, 1410 false, false, 0); 1411 uint64_t nextOffset = FPROffset; 1412 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1413 ConstFPROffset); 1414 1415 // Store second byte : number of float regs 1416 SDValue secondStore = 1417 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset, MVT::i8, 1418 false, false, 0); 1419 nextOffset += StackOffset; 1420 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1421 1422 // Store second word : arguments given on stack 1423 SDValue thirdStore = 1424 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, SV, nextOffset, 1425 false, false, 0); 1426 nextOffset += FrameOffset; 1427 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1428 1429 // Store third word : arguments given in registers 1430 return DAG.getStore(thirdStore, dl, FR, nextPtr, SV, nextOffset, 1431 false, false, 0); 1432 1433} 1434 1435#include "PPCGenCallingConv.inc" 1436 1437static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT, 1438 CCValAssign::LocInfo &LocInfo, 1439 ISD::ArgFlagsTy &ArgFlags, 1440 CCState &State) { 1441 return true; 1442} 1443 1444static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT, 1445 EVT &LocVT, 1446 CCValAssign::LocInfo &LocInfo, 1447 ISD::ArgFlagsTy &ArgFlags, 1448 CCState &State) { 1449 static const unsigned ArgRegs[] = { 1450 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1451 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1452 }; 1453 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1454 1455 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1456 1457 // Skip one register if the first unallocated register has an even register 1458 // number and there are still argument registers available which have not been 1459 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1460 // need to skip a register if RegNum is odd. 1461 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1462 State.AllocateReg(ArgRegs[RegNum]); 1463 } 1464 1465 // Always return false here, as this function only makes sure that the first 1466 // unallocated register has an odd register number and does not actually 1467 // allocate a register for the current argument. 1468 return false; 1469} 1470 1471static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT, 1472 EVT &LocVT, 1473 CCValAssign::LocInfo &LocInfo, 1474 ISD::ArgFlagsTy &ArgFlags, 1475 CCState &State) { 1476 static const unsigned ArgRegs[] = { 1477 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1478 PPC::F8 1479 }; 1480 1481 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1482 1483 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1484 1485 // If there is only one Floating-point register left we need to put both f64 1486 // values of a split ppc_fp128 value on the stack. 1487 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1488 State.AllocateReg(ArgRegs[RegNum]); 1489 } 1490 1491 // Always return false here, as this function only makes sure that the two f64 1492 // values a ppc_fp128 value is split into are both passed in registers or both 1493 // passed on the stack and does not actually allocate a register for the 1494 // current argument. 1495 return false; 1496} 1497 1498/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1499/// on Darwin. 1500static const unsigned *GetFPR() { 1501 static const unsigned FPR[] = { 1502 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1503 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1504 }; 1505 1506 return FPR; 1507} 1508 1509/// CalculateStackSlotSize - Calculates the size reserved for this argument on 1510/// the stack. 1511static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1512 unsigned PtrByteSize) { 1513 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1514 if (Flags.isByVal()) 1515 ArgSize = Flags.getByValSize(); 1516 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1517 1518 return ArgSize; 1519} 1520 1521SDValue 1522PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1523 CallingConv::ID CallConv, bool isVarArg, 1524 const SmallVectorImpl<ISD::InputArg> 1525 &Ins, 1526 DebugLoc dl, SelectionDAG &DAG, 1527 SmallVectorImpl<SDValue> &InVals) 1528 const { 1529 if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) { 1530 return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins, 1531 dl, DAG, InVals); 1532 } else { 1533 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1534 dl, DAG, InVals); 1535 } 1536} 1537 1538SDValue 1539PPCTargetLowering::LowerFormalArguments_SVR4( 1540 SDValue Chain, 1541 CallingConv::ID CallConv, bool isVarArg, 1542 const SmallVectorImpl<ISD::InputArg> 1543 &Ins, 1544 DebugLoc dl, SelectionDAG &DAG, 1545 SmallVectorImpl<SDValue> &InVals) const { 1546 1547 // 32-bit SVR4 ABI Stack Frame Layout: 1548 // +-----------------------------------+ 1549 // +--> | Back chain | 1550 // | +-----------------------------------+ 1551 // | | Floating-point register save area | 1552 // | +-----------------------------------+ 1553 // | | General register save area | 1554 // | +-----------------------------------+ 1555 // | | CR save word | 1556 // | +-----------------------------------+ 1557 // | | VRSAVE save word | 1558 // | +-----------------------------------+ 1559 // | | Alignment padding | 1560 // | +-----------------------------------+ 1561 // | | Vector register save area | 1562 // | +-----------------------------------+ 1563 // | | Local variable space | 1564 // | +-----------------------------------+ 1565 // | | Parameter list area | 1566 // | +-----------------------------------+ 1567 // | | LR save word | 1568 // | +-----------------------------------+ 1569 // SP--> +--- | Back chain | 1570 // +-----------------------------------+ 1571 // 1572 // Specifications: 1573 // System V Application Binary Interface PowerPC Processor Supplement 1574 // AltiVec Technology Programming Interface Manual 1575 1576 MachineFunction &MF = DAG.getMachineFunction(); 1577 MachineFrameInfo *MFI = MF.getFrameInfo(); 1578 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1579 1580 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1581 // Potential tail calls could cause overwriting of argument stack slots. 1582 bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast)); 1583 unsigned PtrByteSize = 4; 1584 1585 // Assign locations to all of the incoming arguments. 1586 SmallVector<CCValAssign, 16> ArgLocs; 1587 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, 1588 *DAG.getContext()); 1589 1590 // Reserve space for the linkage area on the stack. 1591 CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); 1592 1593 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4); 1594 1595 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1596 CCValAssign &VA = ArgLocs[i]; 1597 1598 // Arguments stored in registers. 1599 if (VA.isRegLoc()) { 1600 TargetRegisterClass *RC; 1601 EVT ValVT = VA.getValVT(); 1602 1603 switch (ValVT.getSimpleVT().SimpleTy) { 1604 default: 1605 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1606 case MVT::i32: 1607 RC = PPC::GPRCRegisterClass; 1608 break; 1609 case MVT::f32: 1610 RC = PPC::F4RCRegisterClass; 1611 break; 1612 case MVT::f64: 1613 RC = PPC::F8RCRegisterClass; 1614 break; 1615 case MVT::v16i8: 1616 case MVT::v8i16: 1617 case MVT::v4i32: 1618 case MVT::v4f32: 1619 RC = PPC::VRRCRegisterClass; 1620 break; 1621 } 1622 1623 // Transform the arguments stored in physical registers into virtual ones. 1624 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1625 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 1626 1627 InVals.push_back(ArgValue); 1628 } else { 1629 // Argument stored in memory. 1630 assert(VA.isMemLoc()); 1631 1632 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 1633 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 1634 isImmutable, false); 1635 1636 // Create load nodes to retrieve arguments from the stack. 1637 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1638 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0, 1639 false, false, 0)); 1640 } 1641 } 1642 1643 // Assign locations to all of the incoming aggregate by value arguments. 1644 // Aggregates passed by value are stored in the local variable space of the 1645 // caller's stack frame, right above the parameter list area. 1646 SmallVector<CCValAssign, 16> ByValArgLocs; 1647 CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), 1648 ByValArgLocs, *DAG.getContext()); 1649 1650 // Reserve stack space for the allocations in CCInfo. 1651 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 1652 1653 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal); 1654 1655 // Area that is at least reserved in the caller of this function. 1656 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 1657 1658 // Set the size that is at least reserved in caller of this function. Tail 1659 // call optimized function's reserved stack space needs to be aligned so that 1660 // taking the difference between two stack areas will result in an aligned 1661 // stack. 1662 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1663 1664 MinReservedArea = 1665 std::max(MinReservedArea, 1666 PPCFrameInfo::getMinCallFrameSize(false, false)); 1667 1668 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 1669 getStackAlignment(); 1670 unsigned AlignMask = TargetAlign-1; 1671 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 1672 1673 FI->setMinReservedArea(MinReservedArea); 1674 1675 SmallVector<SDValue, 8> MemOps; 1676 1677 // If the function takes variable number of arguments, make a frame index for 1678 // the start of the first vararg value... for expansion of llvm.va_start. 1679 if (isVarArg) { 1680 static const unsigned GPArgRegs[] = { 1681 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1682 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1683 }; 1684 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 1685 1686 static const unsigned FPArgRegs[] = { 1687 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1688 PPC::F8 1689 }; 1690 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 1691 1692 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 1693 NumGPArgRegs)); 1694 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 1695 NumFPArgRegs)); 1696 1697 // Make room for NumGPArgRegs and NumFPArgRegs. 1698 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 1699 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 1700 1701 FuncInfo->setVarArgsStackOffset( 1702 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 1703 CCInfo.getNextStackOffset(), 1704 true, false)); 1705 1706 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 1707 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1708 1709 // The fixed integer arguments of a variadic function are 1710 // stored to the VarArgsFrameIndex on the stack. 1711 unsigned GPRIndex = 0; 1712 for (; GPRIndex != FuncInfo->getVarArgsNumGPR(); ++GPRIndex) { 1713 SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT); 1714 SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0, 1715 false, false, 0); 1716 MemOps.push_back(Store); 1717 // Increment the address by four for the next argument to store 1718 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1719 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1720 } 1721 1722 // If this function is vararg, store any remaining integer argument regs 1723 // to their spots on the stack so that they may be loaded by deferencing the 1724 // result of va_next. 1725 for (; GPRIndex != NumGPArgRegs; ++GPRIndex) { 1726 unsigned VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 1727 1728 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1729 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0, 1730 false, false, 0); 1731 MemOps.push_back(Store); 1732 // Increment the address by four for the next argument to store 1733 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 1734 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1735 } 1736 1737 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 1738 // is set. 1739 1740 // The double arguments are stored to the VarArgsFrameIndex 1741 // on the stack. 1742 unsigned FPRIndex = 0; 1743 for (FPRIndex = 0; FPRIndex != FuncInfo->getVarArgsNumFPR(); ++FPRIndex) { 1744 SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64); 1745 SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0, 1746 false, false, 0); 1747 MemOps.push_back(Store); 1748 // Increment the address by eight for the next argument to store 1749 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 1750 PtrVT); 1751 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1752 } 1753 1754 for (; FPRIndex != NumFPArgRegs; ++FPRIndex) { 1755 unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 1756 1757 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 1758 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0, 1759 false, false, 0); 1760 MemOps.push_back(Store); 1761 // Increment the address by eight for the next argument to store 1762 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 1763 PtrVT); 1764 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 1765 } 1766 } 1767 1768 if (!MemOps.empty()) 1769 Chain = DAG.getNode(ISD::TokenFactor, dl, 1770 MVT::Other, &MemOps[0], MemOps.size()); 1771 1772 return Chain; 1773} 1774 1775SDValue 1776PPCTargetLowering::LowerFormalArguments_Darwin( 1777 SDValue Chain, 1778 CallingConv::ID CallConv, bool isVarArg, 1779 const SmallVectorImpl<ISD::InputArg> 1780 &Ins, 1781 DebugLoc dl, SelectionDAG &DAG, 1782 SmallVectorImpl<SDValue> &InVals) const { 1783 // TODO: add description of PPC stack frame format, or at least some docs. 1784 // 1785 MachineFunction &MF = DAG.getMachineFunction(); 1786 MachineFrameInfo *MFI = MF.getFrameInfo(); 1787 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1788 1789 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1790 bool isPPC64 = PtrVT == MVT::i64; 1791 // Potential tail calls could cause overwriting of argument stack slots. 1792 bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast)); 1793 unsigned PtrByteSize = isPPC64 ? 8 : 4; 1794 1795 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true); 1796 // Area that is at least reserved in caller of this function. 1797 unsigned MinReservedArea = ArgOffset; 1798 1799 static const unsigned GPR_32[] = { // 32-bit registers. 1800 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1801 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1802 }; 1803 static const unsigned GPR_64[] = { // 64-bit registers. 1804 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 1805 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 1806 }; 1807 1808 static const unsigned *FPR = GetFPR(); 1809 1810 static const unsigned VR[] = { 1811 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 1812 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 1813 }; 1814 1815 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 1816 const unsigned Num_FPR_Regs = 13; 1817 const unsigned Num_VR_Regs = array_lengthof( VR); 1818 1819 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 1820 1821 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 1822 1823 // In 32-bit non-varargs functions, the stack space for vectors is after the 1824 // stack space for non-vectors. We do not use this space unless we have 1825 // too many vectors to fit in registers, something that only occurs in 1826 // constructed examples:), but we have to walk the arglist to figure 1827 // that out...for the pathological case, compute VecArgOffset as the 1828 // start of the vector parameter area. Computing VecArgOffset is the 1829 // entire point of the following loop. 1830 unsigned VecArgOffset = ArgOffset; 1831 if (!isVarArg && !isPPC64) { 1832 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 1833 ++ArgNo) { 1834 EVT ObjectVT = Ins[ArgNo].VT; 1835 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1836 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 1837 1838 if (Flags.isByVal()) { 1839 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 1840 ObjSize = Flags.getByValSize(); 1841 unsigned ArgSize = 1842 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1843 VecArgOffset += ArgSize; 1844 continue; 1845 } 1846 1847 switch(ObjectVT.getSimpleVT().SimpleTy) { 1848 default: llvm_unreachable("Unhandled argument type!"); 1849 case MVT::i32: 1850 case MVT::f32: 1851 VecArgOffset += isPPC64 ? 8 : 4; 1852 break; 1853 case MVT::i64: // PPC64 1854 case MVT::f64: 1855 VecArgOffset += 8; 1856 break; 1857 case MVT::v4f32: 1858 case MVT::v4i32: 1859 case MVT::v8i16: 1860 case MVT::v16i8: 1861 // Nothing to do, we're only looking at Nonvector args here. 1862 break; 1863 } 1864 } 1865 } 1866 // We've found where the vector parameter area in memory is. Skip the 1867 // first 12 parameters; these don't use that memory. 1868 VecArgOffset = ((VecArgOffset+15)/16)*16; 1869 VecArgOffset += 12*16; 1870 1871 // Add DAG nodes to load the arguments or copy them out of registers. On 1872 // entry to a function on PPC, the arguments start after the linkage area, 1873 // although the first ones are often in registers. 1874 1875 SmallVector<SDValue, 8> MemOps; 1876 unsigned nAltivecParamsAtEnd = 0; 1877 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 1878 SDValue ArgVal; 1879 bool needsLoad = false; 1880 EVT ObjectVT = Ins[ArgNo].VT; 1881 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 1882 unsigned ArgSize = ObjSize; 1883 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 1884 1885 unsigned CurArgOffset = ArgOffset; 1886 1887 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 1888 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 1889 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 1890 if (isVarArg || isPPC64) { 1891 MinReservedArea = ((MinReservedArea+15)/16)*16; 1892 MinReservedArea += CalculateStackSlotSize(ObjectVT, 1893 Flags, 1894 PtrByteSize); 1895 } else nAltivecParamsAtEnd++; 1896 } else 1897 // Calculate min reserved area. 1898 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 1899 Flags, 1900 PtrByteSize); 1901 1902 // FIXME the codegen can be much improved in some cases. 1903 // We do not have to keep everything in memory. 1904 if (Flags.isByVal()) { 1905 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 1906 ObjSize = Flags.getByValSize(); 1907 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1908 // Objects of size 1 and 2 are right justified, everything else is 1909 // left justified. This means the memory address is adjusted forwards. 1910 if (ObjSize==1 || ObjSize==2) { 1911 CurArgOffset = CurArgOffset + (4 - ObjSize); 1912 } 1913 // The value of the object is its address. 1914 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true, false); 1915 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1916 InVals.push_back(FIN); 1917 if (ObjSize==1 || ObjSize==2) { 1918 if (GPR_idx != Num_GPR_Regs) { 1919 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1920 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1921 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 1922 NULL, 0, 1923 ObjSize==1 ? MVT::i8 : MVT::i16, 1924 false, false, 0); 1925 MemOps.push_back(Store); 1926 ++GPR_idx; 1927 } 1928 1929 ArgOffset += PtrByteSize; 1930 1931 continue; 1932 } 1933 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 1934 // Store whatever pieces of the object are in registers 1935 // to memory. ArgVal will be address of the beginning of 1936 // the object. 1937 if (GPR_idx != Num_GPR_Regs) { 1938 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1939 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true, false); 1940 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1941 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 1942 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0, 1943 false, false, 0); 1944 MemOps.push_back(Store); 1945 ++GPR_idx; 1946 ArgOffset += PtrByteSize; 1947 } else { 1948 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 1949 break; 1950 } 1951 } 1952 continue; 1953 } 1954 1955 switch (ObjectVT.getSimpleVT().SimpleTy) { 1956 default: llvm_unreachable("Unhandled argument type!"); 1957 case MVT::i32: 1958 if (!isPPC64) { 1959 if (GPR_idx != Num_GPR_Regs) { 1960 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 1961 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1962 ++GPR_idx; 1963 } else { 1964 needsLoad = true; 1965 ArgSize = PtrByteSize; 1966 } 1967 // All int arguments reserve stack space in the Darwin ABI. 1968 ArgOffset += PtrByteSize; 1969 break; 1970 } 1971 // FALLTHROUGH 1972 case MVT::i64: // PPC64 1973 if (GPR_idx != Num_GPR_Regs) { 1974 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 1975 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 1976 1977 if (ObjectVT == MVT::i32) { 1978 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 1979 // value to MVT::i64 and then truncate to the correct register size. 1980 if (Flags.isSExt()) 1981 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 1982 DAG.getValueType(ObjectVT)); 1983 else if (Flags.isZExt()) 1984 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 1985 DAG.getValueType(ObjectVT)); 1986 1987 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 1988 } 1989 1990 ++GPR_idx; 1991 } else { 1992 needsLoad = true; 1993 ArgSize = PtrByteSize; 1994 } 1995 // All int arguments reserve stack space in the Darwin ABI. 1996 ArgOffset += 8; 1997 break; 1998 1999 case MVT::f32: 2000 case MVT::f64: 2001 // Every 4 bytes of argument space consumes one of the GPRs available for 2002 // argument passing. 2003 if (GPR_idx != Num_GPR_Regs) { 2004 ++GPR_idx; 2005 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2006 ++GPR_idx; 2007 } 2008 if (FPR_idx != Num_FPR_Regs) { 2009 unsigned VReg; 2010 2011 if (ObjectVT == MVT::f32) 2012 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2013 else 2014 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2015 2016 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2017 ++FPR_idx; 2018 } else { 2019 needsLoad = true; 2020 } 2021 2022 // All FP arguments reserve stack space in the Darwin ABI. 2023 ArgOffset += isPPC64 ? 8 : ObjSize; 2024 break; 2025 case MVT::v4f32: 2026 case MVT::v4i32: 2027 case MVT::v8i16: 2028 case MVT::v16i8: 2029 // Note that vector arguments in registers don't reserve stack space, 2030 // except in varargs functions. 2031 if (VR_idx != Num_VR_Regs) { 2032 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2033 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2034 if (isVarArg) { 2035 while ((ArgOffset % 16) != 0) { 2036 ArgOffset += PtrByteSize; 2037 if (GPR_idx != Num_GPR_Regs) 2038 GPR_idx++; 2039 } 2040 ArgOffset += 16; 2041 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2042 } 2043 ++VR_idx; 2044 } else { 2045 if (!isVarArg && !isPPC64) { 2046 // Vectors go after all the nonvectors. 2047 CurArgOffset = VecArgOffset; 2048 VecArgOffset += 16; 2049 } else { 2050 // Vectors are aligned. 2051 ArgOffset = ((ArgOffset+15)/16)*16; 2052 CurArgOffset = ArgOffset; 2053 ArgOffset += 16; 2054 } 2055 needsLoad = true; 2056 } 2057 break; 2058 } 2059 2060 // We need to load the argument to a virtual register if we determined above 2061 // that we ran out of physical registers of the appropriate type. 2062 if (needsLoad) { 2063 int FI = MFI->CreateFixedObject(ObjSize, 2064 CurArgOffset + (ArgSize - ObjSize), 2065 isImmutable, false); 2066 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2067 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, 2068 false, false, 0); 2069 } 2070 2071 InVals.push_back(ArgVal); 2072 } 2073 2074 // Set the size that is at least reserved in caller of this function. Tail 2075 // call optimized function's reserved stack space needs to be aligned so that 2076 // taking the difference between two stack areas will result in an aligned 2077 // stack. 2078 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2079 // Add the Altivec parameters at the end, if needed. 2080 if (nAltivecParamsAtEnd) { 2081 MinReservedArea = ((MinReservedArea+15)/16)*16; 2082 MinReservedArea += 16*nAltivecParamsAtEnd; 2083 } 2084 MinReservedArea = 2085 std::max(MinReservedArea, 2086 PPCFrameInfo::getMinCallFrameSize(isPPC64, true)); 2087 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 2088 getStackAlignment(); 2089 unsigned AlignMask = TargetAlign-1; 2090 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2091 FI->setMinReservedArea(MinReservedArea); 2092 2093 // If the function takes variable number of arguments, make a frame index for 2094 // the start of the first vararg value... for expansion of llvm.va_start. 2095 if (isVarArg) { 2096 int Depth = ArgOffset; 2097 2098 FuncInfo->setVarArgsFrameIndex( 2099 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2100 Depth, true, false)); 2101 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2102 2103 // If this function is vararg, store any remaining integer argument regs 2104 // to their spots on the stack so that they may be loaded by deferencing the 2105 // result of va_next. 2106 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2107 unsigned VReg; 2108 2109 if (isPPC64) 2110 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2111 else 2112 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2113 2114 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2115 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0, 2116 false, false, 0); 2117 MemOps.push_back(Store); 2118 // Increment the address by four for the next argument to store 2119 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2120 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2121 } 2122 } 2123 2124 if (!MemOps.empty()) 2125 Chain = DAG.getNode(ISD::TokenFactor, dl, 2126 MVT::Other, &MemOps[0], MemOps.size()); 2127 2128 return Chain; 2129} 2130 2131/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus 2132/// linkage area for the Darwin ABI. 2133static unsigned 2134CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2135 bool isPPC64, 2136 bool isVarArg, 2137 unsigned CC, 2138 const SmallVectorImpl<ISD::OutputArg> 2139 &Outs, 2140 unsigned &nAltivecParamsAtEnd) { 2141 // Count how many bytes are to be pushed on the stack, including the linkage 2142 // area, and parameter passing area. We start with 24/48 bytes, which is 2143 // prereserved space for [SP][CR][LR][3 x unused]. 2144 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, true); 2145 unsigned NumOps = Outs.size(); 2146 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2147 2148 // Add up all the space actually used. 2149 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2150 // they all go in registers, but we must reserve stack space for them for 2151 // possible use by the caller. In varargs or 64-bit calls, parameters are 2152 // assigned stack space in order, with padding so Altivec parameters are 2153 // 16-byte aligned. 2154 nAltivecParamsAtEnd = 0; 2155 for (unsigned i = 0; i != NumOps; ++i) { 2156 SDValue Arg = Outs[i].Val; 2157 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2158 EVT ArgVT = Arg.getValueType(); 2159 // Varargs Altivec parameters are padded to a 16 byte boundary. 2160 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2161 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2162 if (!isVarArg && !isPPC64) { 2163 // Non-varargs Altivec parameters go after all the non-Altivec 2164 // parameters; handle those later so we know how much padding we need. 2165 nAltivecParamsAtEnd++; 2166 continue; 2167 } 2168 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2169 NumBytes = ((NumBytes+15)/16)*16; 2170 } 2171 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2172 } 2173 2174 // Allow for Altivec parameters at the end, if needed. 2175 if (nAltivecParamsAtEnd) { 2176 NumBytes = ((NumBytes+15)/16)*16; 2177 NumBytes += 16*nAltivecParamsAtEnd; 2178 } 2179 2180 // The prolog code of the callee may store up to 8 GPR argument registers to 2181 // the stack, allowing va_start to index over them in memory if its varargs. 2182 // Because we cannot tell if this is needed on the caller side, we have to 2183 // conservatively assume that it is needed. As such, make sure we have at 2184 // least enough stack space for the caller to store the 8 GPRs. 2185 NumBytes = std::max(NumBytes, 2186 PPCFrameInfo::getMinCallFrameSize(isPPC64, true)); 2187 2188 // Tail call needs the stack to be aligned. 2189 if (CC==CallingConv::Fast && GuaranteedTailCallOpt) { 2190 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> 2191 getStackAlignment(); 2192 unsigned AlignMask = TargetAlign-1; 2193 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2194 } 2195 2196 return NumBytes; 2197} 2198 2199/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2200/// adjusted to accomodate the arguments for the tailcall. 2201static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2202 unsigned ParamSize) { 2203 2204 if (!isTailCall) return 0; 2205 2206 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2207 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2208 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2209 // Remember only if the new adjustement is bigger. 2210 if (SPDiff < FI->getTailCallSPDelta()) 2211 FI->setTailCallSPDelta(SPDiff); 2212 2213 return SPDiff; 2214} 2215 2216/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2217/// for tail call optimization. Targets which want to do tail call 2218/// optimization should implement this function. 2219bool 2220PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2221 CallingConv::ID CalleeCC, 2222 bool isVarArg, 2223 const SmallVectorImpl<ISD::InputArg> &Ins, 2224 SelectionDAG& DAG) const { 2225 if (!GuaranteedTailCallOpt) 2226 return false; 2227 2228 // Variable argument functions are not supported. 2229 if (isVarArg) 2230 return false; 2231 2232 MachineFunction &MF = DAG.getMachineFunction(); 2233 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2234 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2235 // Functions containing by val parameters are not supported. 2236 for (unsigned i = 0; i != Ins.size(); i++) { 2237 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2238 if (Flags.isByVal()) return false; 2239 } 2240 2241 // Non PIC/GOT tail calls are supported. 2242 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2243 return true; 2244 2245 // At the moment we can only do local tail calls (in same module, hidden 2246 // or protected) if we are generating PIC. 2247 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2248 return G->getGlobal()->hasHiddenVisibility() 2249 || G->getGlobal()->hasProtectedVisibility(); 2250 } 2251 2252 return false; 2253} 2254 2255/// isCallCompatibleAddress - Return the immediate to use if the specified 2256/// 32-bit value is representable in the immediate field of a BxA instruction. 2257static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2258 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2259 if (!C) return 0; 2260 2261 int Addr = C->getZExtValue(); 2262 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2263 (Addr << 6 >> 6) != Addr) 2264 return 0; // Top 6 bits have to be sext of immediate. 2265 2266 return DAG.getConstant((int)C->getZExtValue() >> 2, 2267 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2268} 2269 2270namespace { 2271 2272struct TailCallArgumentInfo { 2273 SDValue Arg; 2274 SDValue FrameIdxOp; 2275 int FrameIdx; 2276 2277 TailCallArgumentInfo() : FrameIdx(0) {} 2278}; 2279 2280} 2281 2282/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2283static void 2284StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2285 SDValue Chain, 2286 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, 2287 SmallVector<SDValue, 8> &MemOpChains, 2288 DebugLoc dl) { 2289 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2290 SDValue Arg = TailCallArgs[i].Arg; 2291 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2292 int FI = TailCallArgs[i].FrameIdx; 2293 // Store relative to framepointer. 2294 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2295 PseudoSourceValue::getFixedStack(FI), 2296 0, false, false, 0)); 2297 } 2298} 2299 2300/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2301/// the appropriate stack slot for the tail call optimized function call. 2302static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2303 MachineFunction &MF, 2304 SDValue Chain, 2305 SDValue OldRetAddr, 2306 SDValue OldFP, 2307 int SPDiff, 2308 bool isPPC64, 2309 bool isDarwinABI, 2310 DebugLoc dl) { 2311 if (SPDiff) { 2312 // Calculate the new stack slot for the return address. 2313 int SlotSize = isPPC64 ? 8 : 4; 2314 int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64, 2315 isDarwinABI); 2316 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 2317 NewRetAddrLoc, 2318 true, false); 2319 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2320 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 2321 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 2322 PseudoSourceValue::getFixedStack(NewRetAddr), 0, 2323 false, false, 0); 2324 2325 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 2326 // slot as the FP is never overwritten. 2327 if (isDarwinABI) { 2328 int NewFPLoc = 2329 SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI); 2330 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 2331 true, false); 2332 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 2333 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 2334 PseudoSourceValue::getFixedStack(NewFPIdx), 0, 2335 false, false, 0); 2336 } 2337 } 2338 return Chain; 2339} 2340 2341/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 2342/// the position of the argument. 2343static void 2344CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 2345 SDValue Arg, int SPDiff, unsigned ArgOffset, 2346 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 2347 int Offset = ArgOffset + SPDiff; 2348 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 2349 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true,false); 2350 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2351 SDValue FIN = DAG.getFrameIndex(FI, VT); 2352 TailCallArgumentInfo Info; 2353 Info.Arg = Arg; 2354 Info.FrameIdxOp = FIN; 2355 Info.FrameIdx = FI; 2356 TailCallArguments.push_back(Info); 2357} 2358 2359/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 2360/// stack slot. Returns the chain as result and the loaded frame pointers in 2361/// LROpOut/FPOpout. Used when tail calling. 2362SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 2363 int SPDiff, 2364 SDValue Chain, 2365 SDValue &LROpOut, 2366 SDValue &FPOpOut, 2367 bool isDarwinABI, 2368 DebugLoc dl) const { 2369 if (SPDiff) { 2370 // Load the LR and FP stack slot for later adjusting. 2371 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 2372 LROpOut = getReturnAddrFrameIndex(DAG); 2373 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0, 2374 false, false, 0); 2375 Chain = SDValue(LROpOut.getNode(), 1); 2376 2377 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 2378 // slot as the FP is never overwritten. 2379 if (isDarwinABI) { 2380 FPOpOut = getFramePointerFrameIndex(DAG); 2381 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0, 2382 false, false, 0); 2383 Chain = SDValue(FPOpOut.getNode(), 1); 2384 } 2385 } 2386 return Chain; 2387} 2388 2389/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 2390/// by "Src" to address "Dst" of size "Size". Alignment information is 2391/// specified by the specific parameter attribute. The copy will be passed as 2392/// a byval function parameter. 2393/// Sometimes what we are copying is the end of a larger object, the part that 2394/// does not fit in registers. 2395static SDValue 2396CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 2397 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 2398 DebugLoc dl) { 2399 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 2400 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 2401 false, false, NULL, 0, NULL, 0); 2402} 2403 2404/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 2405/// tail calls. 2406static void 2407LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 2408 SDValue Arg, SDValue PtrOff, int SPDiff, 2409 unsigned ArgOffset, bool isPPC64, bool isTailCall, 2410 bool isVector, SmallVector<SDValue, 8> &MemOpChains, 2411 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments, 2412 DebugLoc dl) { 2413 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2414 if (!isTailCall) { 2415 if (isVector) { 2416 SDValue StackPtr; 2417 if (isPPC64) 2418 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2419 else 2420 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2421 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 2422 DAG.getConstant(ArgOffset, PtrVT)); 2423 } 2424 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0, 2425 false, false, 0)); 2426 // Calculate and remember argument location. 2427 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 2428 TailCallArguments); 2429} 2430 2431static 2432void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 2433 DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 2434 SDValue LROp, SDValue FPOp, bool isDarwinABI, 2435 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { 2436 MachineFunction &MF = DAG.getMachineFunction(); 2437 2438 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 2439 // might overwrite each other in case of tail call optimization. 2440 SmallVector<SDValue, 8> MemOpChains2; 2441 // Do not flag preceeding copytoreg stuff together with the following stuff. 2442 InFlag = SDValue(); 2443 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 2444 MemOpChains2, dl); 2445 if (!MemOpChains2.empty()) 2446 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2447 &MemOpChains2[0], MemOpChains2.size()); 2448 2449 // Store the return address to the appropriate stack slot. 2450 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 2451 isPPC64, isDarwinABI, dl); 2452 2453 // Emit callseq_end just before tailcall node. 2454 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2455 DAG.getIntPtrConstant(0, true), InFlag); 2456 InFlag = Chain.getValue(1); 2457} 2458 2459static 2460unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 2461 SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall, 2462 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 2463 SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, 2464 bool isPPC64, bool isSVR4ABI) { 2465 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2466 NodeTys.push_back(MVT::Other); // Returns a chain 2467 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 2468 2469 unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin; 2470 2471 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2472 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2473 // node so that legalize doesn't hack it. 2474 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2475 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); 2476 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) 2477 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); 2478 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 2479 // If this is an absolute destination address, use the munged value. 2480 Callee = SDValue(Dest, 0); 2481 else { 2482 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 2483 // to do the call, we can't use PPCISD::CALL. 2484 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 2485 2486 if (isSVR4ABI && isPPC64) { 2487 // Function pointers in the 64-bit SVR4 ABI do not point to the function 2488 // entry point, but to the function descriptor (the function entry point 2489 // address is part of the function descriptor though). 2490 // The function descriptor is a three doubleword structure with the 2491 // following fields: function entry point, TOC base address and 2492 // environment pointer. 2493 // Thus for a call through a function pointer, the following actions need 2494 // to be performed: 2495 // 1. Save the TOC of the caller in the TOC save area of its stack 2496 // frame (this is done in LowerCall_Darwin()). 2497 // 2. Load the address of the function entry point from the function 2498 // descriptor. 2499 // 3. Load the TOC of the callee from the function descriptor into r2. 2500 // 4. Load the environment pointer from the function descriptor into 2501 // r11. 2502 // 5. Branch to the function entry point address. 2503 // 6. On return of the callee, the TOC of the caller needs to be 2504 // restored (this is done in FinishCall()). 2505 // 2506 // All those operations are flagged together to ensure that no other 2507 // operations can be scheduled in between. E.g. without flagging the 2508 // operations together, a TOC access in the caller could be scheduled 2509 // between the load of the callee TOC and the branch to the callee, which 2510 // results in the TOC access going through the TOC of the callee instead 2511 // of going through the TOC of the caller, which leads to incorrect code. 2512 2513 // Load the address of the function entry point from the function 2514 // descriptor. 2515 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Flag); 2516 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 2517 InFlag.getNode() ? 3 : 2); 2518 Chain = LoadFuncPtr.getValue(1); 2519 InFlag = LoadFuncPtr.getValue(2); 2520 2521 // Load environment pointer into r11. 2522 // Offset of the environment pointer within the function descriptor. 2523 SDValue PtrOff = DAG.getIntPtrConstant(16); 2524 2525 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 2526 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 2527 InFlag); 2528 Chain = LoadEnvPtr.getValue(1); 2529 InFlag = LoadEnvPtr.getValue(2); 2530 2531 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 2532 InFlag); 2533 Chain = EnvVal.getValue(0); 2534 InFlag = EnvVal.getValue(1); 2535 2536 // Load TOC of the callee into r2. We are using a target-specific load 2537 // with r2 hard coded, because the result of a target-independent load 2538 // would never go directly into r2, since r2 is a reserved register (which 2539 // prevents the register allocator from allocating it), resulting in an 2540 // additional register being allocated and an unnecessary move instruction 2541 // being generated. 2542 VTs = DAG.getVTList(MVT::Other, MVT::Flag); 2543 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 2544 Callee, InFlag); 2545 Chain = LoadTOCPtr.getValue(0); 2546 InFlag = LoadTOCPtr.getValue(1); 2547 2548 MTCTROps[0] = Chain; 2549 MTCTROps[1] = LoadFuncPtr; 2550 MTCTROps[2] = InFlag; 2551 } 2552 2553 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 2554 2 + (InFlag.getNode() != 0)); 2555 InFlag = Chain.getValue(1); 2556 2557 NodeTys.clear(); 2558 NodeTys.push_back(MVT::Other); 2559 NodeTys.push_back(MVT::Flag); 2560 Ops.push_back(Chain); 2561 CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin; 2562 Callee.setNode(0); 2563 // Add CTR register as callee so a bctr can be emitted later. 2564 if (isTailCall) 2565 Ops.push_back(DAG.getRegister(PPC::CTR, PtrVT)); 2566 } 2567 2568 // If this is a direct call, pass the chain and the callee. 2569 if (Callee.getNode()) { 2570 Ops.push_back(Chain); 2571 Ops.push_back(Callee); 2572 } 2573 // If this is a tail call add stack pointer delta. 2574 if (isTailCall) 2575 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 2576 2577 // Add argument registers to the end of the list so that they are known live 2578 // into the call. 2579 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2580 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2581 RegsToPass[i].second.getValueType())); 2582 2583 return CallOpc; 2584} 2585 2586SDValue 2587PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 2588 CallingConv::ID CallConv, bool isVarArg, 2589 const SmallVectorImpl<ISD::InputArg> &Ins, 2590 DebugLoc dl, SelectionDAG &DAG, 2591 SmallVectorImpl<SDValue> &InVals) const { 2592 2593 SmallVector<CCValAssign, 16> RVLocs; 2594 CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(), 2595 RVLocs, *DAG.getContext()); 2596 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 2597 2598 // Copy all of the result registers out of their specified physreg. 2599 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2600 CCValAssign &VA = RVLocs[i]; 2601 EVT VT = VA.getValVT(); 2602 assert(VA.isRegLoc() && "Can only return in registers!"); 2603 Chain = DAG.getCopyFromReg(Chain, dl, 2604 VA.getLocReg(), VT, InFlag).getValue(1); 2605 InVals.push_back(Chain.getValue(0)); 2606 InFlag = Chain.getValue(2); 2607 } 2608 2609 return Chain; 2610} 2611 2612SDValue 2613PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, 2614 bool isTailCall, bool isVarArg, 2615 SelectionDAG &DAG, 2616 SmallVector<std::pair<unsigned, SDValue>, 8> 2617 &RegsToPass, 2618 SDValue InFlag, SDValue Chain, 2619 SDValue &Callee, 2620 int SPDiff, unsigned NumBytes, 2621 const SmallVectorImpl<ISD::InputArg> &Ins, 2622 SmallVectorImpl<SDValue> &InVals) const { 2623 std::vector<EVT> NodeTys; 2624 SmallVector<SDValue, 8> Ops; 2625 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 2626 isTailCall, RegsToPass, Ops, NodeTys, 2627 PPCSubTarget.isPPC64(), 2628 PPCSubTarget.isSVR4ABI()); 2629 2630 // When performing tail call optimization the callee pops its arguments off 2631 // the stack. Account for this here so these bytes can be pushed back on in 2632 // PPCRegisterInfo::eliminateCallFramePseudoInstr. 2633 int BytesCalleePops = 2634 (CallConv==CallingConv::Fast && GuaranteedTailCallOpt) ? NumBytes : 0; 2635 2636 if (InFlag.getNode()) 2637 Ops.push_back(InFlag); 2638 2639 // Emit tail call. 2640 if (isTailCall) { 2641 // If this is the first return lowered for this function, add the regs 2642 // to the liveout set for the function. 2643 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 2644 SmallVector<CCValAssign, 16> RVLocs; 2645 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs, 2646 *DAG.getContext()); 2647 CCInfo.AnalyzeCallResult(Ins, RetCC_PPC); 2648 for (unsigned i = 0; i != RVLocs.size(); ++i) 2649 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 2650 } 2651 2652 assert(((Callee.getOpcode() == ISD::Register && 2653 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 2654 Callee.getOpcode() == ISD::TargetExternalSymbol || 2655 Callee.getOpcode() == ISD::TargetGlobalAddress || 2656 isa<ConstantSDNode>(Callee)) && 2657 "Expecting an global address, external symbol, absolute value or register"); 2658 2659 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 2660 } 2661 2662 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 2663 InFlag = Chain.getValue(1); 2664 2665 // Add a NOP immediately after the branch instruction when using the 64-bit 2666 // SVR4 ABI. At link time, if caller and callee are in a different module and 2667 // thus have a different TOC, the call will be replaced with a call to a stub 2668 // function which saves the current TOC, loads the TOC of the callee and 2669 // branches to the callee. The NOP will be replaced with a load instruction 2670 // which restores the TOC of the caller from the TOC save slot of the current 2671 // stack frame. If caller and callee belong to the same module (and have the 2672 // same TOC), the NOP will remain unchanged. 2673 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 2674 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Flag); 2675 if (CallOpc == PPCISD::BCTRL_SVR4) { 2676 // This is a call through a function pointer. 2677 // Restore the caller TOC from the save area into R2. 2678 // See PrepareCall() for more information about calls through function 2679 // pointers in the 64-bit SVR4 ABI. 2680 // We are using a target-specific load with r2 hard coded, because the 2681 // result of a target-independent load would never go directly into r2, 2682 // since r2 is a reserved register (which prevents the register allocator 2683 // from allocating it), resulting in an additional register being 2684 // allocated and an unnecessary move instruction being generated. 2685 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 2686 InFlag = Chain.getValue(1); 2687 } else { 2688 // Otherwise insert NOP. 2689 InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Flag, InFlag); 2690 } 2691 } 2692 2693 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2694 DAG.getIntPtrConstant(BytesCalleePops, true), 2695 InFlag); 2696 if (!Ins.empty()) 2697 InFlag = Chain.getValue(1); 2698 2699 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2700 Ins, dl, DAG, InVals); 2701} 2702 2703SDValue 2704PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 2705 CallingConv::ID CallConv, bool isVarArg, 2706 bool &isTailCall, 2707 const SmallVectorImpl<ISD::OutputArg> &Outs, 2708 const SmallVectorImpl<ISD::InputArg> &Ins, 2709 DebugLoc dl, SelectionDAG &DAG, 2710 SmallVectorImpl<SDValue> &InVals) const { 2711 if (isTailCall) 2712 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 2713 Ins, DAG); 2714 2715 if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) { 2716 return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg, 2717 isTailCall, Outs, Ins, 2718 dl, DAG, InVals); 2719 } else { 2720 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 2721 isTailCall, Outs, Ins, 2722 dl, DAG, InVals); 2723 } 2724} 2725 2726SDValue 2727PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, 2728 CallingConv::ID CallConv, bool isVarArg, 2729 bool isTailCall, 2730 const SmallVectorImpl<ISD::OutputArg> &Outs, 2731 const SmallVectorImpl<ISD::InputArg> &Ins, 2732 DebugLoc dl, SelectionDAG &DAG, 2733 SmallVectorImpl<SDValue> &InVals) const { 2734 // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description 2735 // of the 32-bit SVR4 ABI stack frame layout. 2736 2737 assert((CallConv == CallingConv::C || 2738 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 2739 2740 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2741 unsigned PtrByteSize = 4; 2742 2743 MachineFunction &MF = DAG.getMachineFunction(); 2744 2745 // Mark this function as potentially containing a function that contains a 2746 // tail call. As a consequence the frame pointer will be used for dynamicalloc 2747 // and restoring the callers stack pointer in this functions epilog. This is 2748 // done because by tail calling the called function might overwrite the value 2749 // in this function's (MF) stack pointer stack slot 0(SP). 2750 if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) 2751 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 2752 2753 // Count how many bytes are to be pushed on the stack, including the linkage 2754 // area, parameter list area and the part of the local variable space which 2755 // contains copies of aggregates which are passed by value. 2756 2757 // Assign locations to all of the outgoing arguments. 2758 SmallVector<CCValAssign, 16> ArgLocs; 2759 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 2760 ArgLocs, *DAG.getContext()); 2761 2762 // Reserve space for the linkage area on the stack. 2763 CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); 2764 2765 if (isVarArg) { 2766 // Handle fixed and variable vector arguments differently. 2767 // Fixed vector arguments go into registers as long as registers are 2768 // available. Variable vector arguments always go into memory. 2769 unsigned NumArgs = Outs.size(); 2770 2771 for (unsigned i = 0; i != NumArgs; ++i) { 2772 EVT ArgVT = Outs[i].Val.getValueType(); 2773 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2774 bool Result; 2775 2776 if (Outs[i].IsFixed) { 2777 Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 2778 CCInfo); 2779 } else { 2780 Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 2781 ArgFlags, CCInfo); 2782 } 2783 2784 if (Result) { 2785#ifndef NDEBUG 2786 errs() << "Call operand #" << i << " has unhandled type " 2787 << ArgVT.getEVTString() << "\n"; 2788#endif 2789 llvm_unreachable(0); 2790 } 2791 } 2792 } else { 2793 // All arguments are treated the same. 2794 CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4); 2795 } 2796 2797 // Assign locations to all of the outgoing aggregate by value arguments. 2798 SmallVector<CCValAssign, 16> ByValArgLocs; 2799 CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs, 2800 *DAG.getContext()); 2801 2802 // Reserve stack space for the allocations in CCInfo. 2803 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2804 2805 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal); 2806 2807 // Size of the linkage area, parameter list area and the part of the local 2808 // space variable where copies of aggregates which are passed by value are 2809 // stored. 2810 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 2811 2812 // Calculate by how many bytes the stack has to be adjusted in case of tail 2813 // call optimization. 2814 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 2815 2816 // Adjust the stack pointer for the new arguments... 2817 // These operations are automatically eliminated by the prolog/epilog pass 2818 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2819 SDValue CallSeqStart = Chain; 2820 2821 // Load the return address and frame pointer so it can be moved somewhere else 2822 // later. 2823 SDValue LROp, FPOp; 2824 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 2825 dl); 2826 2827 // Set up a copy of the stack pointer for use loading and storing any 2828 // arguments that may not fit in the registers available for argument 2829 // passing. 2830 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2831 2832 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2833 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 2834 SmallVector<SDValue, 8> MemOpChains; 2835 2836 // Walk the register/memloc assignments, inserting copies/loads. 2837 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 2838 i != e; 2839 ++i) { 2840 CCValAssign &VA = ArgLocs[i]; 2841 SDValue Arg = Outs[i].Val; 2842 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2843 2844 if (Flags.isByVal()) { 2845 // Argument is an aggregate which is passed by value, thus we need to 2846 // create a copy of it in the local variable space of the current stack 2847 // frame (which is the stack frame of the caller) and pass the address of 2848 // this copy to the callee. 2849 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 2850 CCValAssign &ByValVA = ByValArgLocs[j++]; 2851 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 2852 2853 // Memory reserved in the local variable space of the callers stack frame. 2854 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 2855 2856 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2857 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2858 2859 // Create a copy of the argument in the local area of the current 2860 // stack frame. 2861 SDValue MemcpyCall = 2862 CreateCopyOfByValArgument(Arg, PtrOff, 2863 CallSeqStart.getNode()->getOperand(0), 2864 Flags, DAG, dl); 2865 2866 // This must go outside the CALLSEQ_START..END. 2867 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 2868 CallSeqStart.getNode()->getOperand(1)); 2869 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 2870 NewCallSeqStart.getNode()); 2871 Chain = CallSeqStart = NewCallSeqStart; 2872 2873 // Pass the address of the aggregate copy on the stack either in a 2874 // physical register or in the parameter list area of the current stack 2875 // frame to the callee. 2876 Arg = PtrOff; 2877 } 2878 2879 if (VA.isRegLoc()) { 2880 // Put argument in a physical register. 2881 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2882 } else { 2883 // Put argument in the parameter list area of the current stack frame. 2884 assert(VA.isMemLoc()); 2885 unsigned LocMemOffset = VA.getLocMemOffset(); 2886 2887 if (!isTailCall) { 2888 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2889 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2890 2891 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 2892 PseudoSourceValue::getStack(), LocMemOffset, 2893 false, false, 0)); 2894 } else { 2895 // Calculate and remember argument location. 2896 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 2897 TailCallArguments); 2898 } 2899 } 2900 } 2901 2902 if (!MemOpChains.empty()) 2903 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2904 &MemOpChains[0], MemOpChains.size()); 2905 2906 // Build a sequence of copy-to-reg nodes chained together with token chain 2907 // and flag operands which copy the outgoing args into the appropriate regs. 2908 SDValue InFlag; 2909 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2910 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2911 RegsToPass[i].second, InFlag); 2912 InFlag = Chain.getValue(1); 2913 } 2914 2915 // Set CR6 to true if this is a vararg call. 2916 if (isVarArg) { 2917 SDValue SetCR(DAG.getMachineNode(PPC::CRSET, dl, MVT::i32), 0); 2918 Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag); 2919 InFlag = Chain.getValue(1); 2920 } 2921 2922 if (isTailCall) { 2923 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 2924 false, TailCallArguments); 2925 } 2926 2927 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 2928 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 2929 Ins, InVals); 2930} 2931 2932SDValue 2933PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 2934 CallingConv::ID CallConv, bool isVarArg, 2935 bool isTailCall, 2936 const SmallVectorImpl<ISD::OutputArg> &Outs, 2937 const SmallVectorImpl<ISD::InputArg> &Ins, 2938 DebugLoc dl, SelectionDAG &DAG, 2939 SmallVectorImpl<SDValue> &InVals) const { 2940 2941 unsigned NumOps = Outs.size(); 2942 2943 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2944 bool isPPC64 = PtrVT == MVT::i64; 2945 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2946 2947 MachineFunction &MF = DAG.getMachineFunction(); 2948 2949 // Mark this function as potentially containing a function that contains a 2950 // tail call. As a consequence the frame pointer will be used for dynamicalloc 2951 // and restoring the callers stack pointer in this functions epilog. This is 2952 // done because by tail calling the called function might overwrite the value 2953 // in this function's (MF) stack pointer stack slot 0(SP). 2954 if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) 2955 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 2956 2957 unsigned nAltivecParamsAtEnd = 0; 2958 2959 // Count how many bytes are to be pushed on the stack, including the linkage 2960 // area, and parameter passing area. We start with 24/48 bytes, which is 2961 // prereserved space for [SP][CR][LR][3 x unused]. 2962 unsigned NumBytes = 2963 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 2964 Outs, 2965 nAltivecParamsAtEnd); 2966 2967 // Calculate by how many bytes the stack has to be adjusted in case of tail 2968 // call optimization. 2969 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 2970 2971 // To protect arguments on the stack from being clobbered in a tail call, 2972 // force all the loads to happen before doing any other lowering. 2973 if (isTailCall) 2974 Chain = DAG.getStackArgumentTokenFactor(Chain); 2975 2976 // Adjust the stack pointer for the new arguments... 2977 // These operations are automatically eliminated by the prolog/epilog pass 2978 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2979 SDValue CallSeqStart = Chain; 2980 2981 // Load the return address and frame pointer so it can be move somewhere else 2982 // later. 2983 SDValue LROp, FPOp; 2984 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 2985 dl); 2986 2987 // Set up a copy of the stack pointer for use loading and storing any 2988 // arguments that may not fit in the registers available for argument 2989 // passing. 2990 SDValue StackPtr; 2991 if (isPPC64) 2992 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 2993 else 2994 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 2995 2996 // Figure out which arguments are going to go in registers, and which in 2997 // memory. Also, if this is a vararg function, floating point operations 2998 // must be stored to our stack, and loaded into integer regs as well, if 2999 // any integer regs are available for argument passing. 3000 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, true); 3001 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3002 3003 static const unsigned GPR_32[] = { // 32-bit registers. 3004 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3005 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3006 }; 3007 static const unsigned GPR_64[] = { // 64-bit registers. 3008 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3009 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3010 }; 3011 static const unsigned *FPR = GetFPR(); 3012 3013 static const unsigned VR[] = { 3014 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3015 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3016 }; 3017 const unsigned NumGPRs = array_lengthof(GPR_32); 3018 const unsigned NumFPRs = 13; 3019 const unsigned NumVRs = array_lengthof(VR); 3020 3021 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; 3022 3023 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3024 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3025 3026 SmallVector<SDValue, 8> MemOpChains; 3027 for (unsigned i = 0; i != NumOps; ++i) { 3028 SDValue Arg = Outs[i].Val; 3029 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3030 3031 // PtrOff will be used to store the current argument to the stack if a 3032 // register cannot be found for it. 3033 SDValue PtrOff; 3034 3035 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3036 3037 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3038 3039 // On PPC64, promote integers to 64-bit values. 3040 if (isPPC64 && Arg.getValueType() == MVT::i32) { 3041 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3042 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3043 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3044 } 3045 3046 // FIXME memcpy is used way more than necessary. Correctness first. 3047 if (Flags.isByVal()) { 3048 unsigned Size = Flags.getByValSize(); 3049 if (Size==1 || Size==2) { 3050 // Very small objects are passed right-justified. 3051 // Everything else is passed left-justified. 3052 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 3053 if (GPR_idx != NumGPRs) { 3054 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3055 NULL, 0, VT, false, false, 0); 3056 MemOpChains.push_back(Load.getValue(1)); 3057 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3058 3059 ArgOffset += PtrByteSize; 3060 } else { 3061 SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); 3062 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3063 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, 3064 CallSeqStart.getNode()->getOperand(0), 3065 Flags, DAG, dl); 3066 // This must go outside the CALLSEQ_START..END. 3067 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3068 CallSeqStart.getNode()->getOperand(1)); 3069 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3070 NewCallSeqStart.getNode()); 3071 Chain = CallSeqStart = NewCallSeqStart; 3072 ArgOffset += PtrByteSize; 3073 } 3074 continue; 3075 } 3076 // Copy entire object into memory. There are cases where gcc-generated 3077 // code assumes it is there, even if it could be put entirely into 3078 // registers. (This is not what the doc says.) 3079 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3080 CallSeqStart.getNode()->getOperand(0), 3081 Flags, DAG, dl); 3082 // This must go outside the CALLSEQ_START..END. 3083 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3084 CallSeqStart.getNode()->getOperand(1)); 3085 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode()); 3086 Chain = CallSeqStart = NewCallSeqStart; 3087 // And copy the pieces of it that fit into registers. 3088 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3089 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3090 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3091 if (GPR_idx != NumGPRs) { 3092 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, NULL, 0, 3093 false, false, 0); 3094 MemOpChains.push_back(Load.getValue(1)); 3095 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3096 ArgOffset += PtrByteSize; 3097 } else { 3098 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 3099 break; 3100 } 3101 } 3102 continue; 3103 } 3104 3105 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 3106 default: llvm_unreachable("Unexpected ValueType for argument!"); 3107 case MVT::i32: 3108 case MVT::i64: 3109 if (GPR_idx != NumGPRs) { 3110 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 3111 } else { 3112 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3113 isPPC64, isTailCall, false, MemOpChains, 3114 TailCallArguments, dl); 3115 } 3116 ArgOffset += PtrByteSize; 3117 break; 3118 case MVT::f32: 3119 case MVT::f64: 3120 if (FPR_idx != NumFPRs) { 3121 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3122 3123 if (isVarArg) { 3124 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0, 3125 false, false, 0); 3126 MemOpChains.push_back(Store); 3127 3128 // Float varargs are always shadowed in available integer registers 3129 if (GPR_idx != NumGPRs) { 3130 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0, 3131 false, false, 0); 3132 MemOpChains.push_back(Load.getValue(1)); 3133 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3134 } 3135 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 3136 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3137 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3138 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0, 3139 false, false, 0); 3140 MemOpChains.push_back(Load.getValue(1)); 3141 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3142 } 3143 } else { 3144 // If we have any FPRs remaining, we may also have GPRs remaining. 3145 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 3146 // GPRs. 3147 if (GPR_idx != NumGPRs) 3148 ++GPR_idx; 3149 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 3150 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 3151 ++GPR_idx; 3152 } 3153 } else { 3154 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3155 isPPC64, isTailCall, false, MemOpChains, 3156 TailCallArguments, dl); 3157 } 3158 if (isPPC64) 3159 ArgOffset += 8; 3160 else 3161 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 3162 break; 3163 case MVT::v4f32: 3164 case MVT::v4i32: 3165 case MVT::v8i16: 3166 case MVT::v16i8: 3167 if (isVarArg) { 3168 // These go aligned on the stack, or in the corresponding R registers 3169 // when within range. The Darwin PPC ABI doc claims they also go in 3170 // V registers; in fact gcc does this only for arguments that are 3171 // prototyped, not for those that match the ... We do it for all 3172 // arguments, seems to work. 3173 while (ArgOffset % 16 !=0) { 3174 ArgOffset += PtrByteSize; 3175 if (GPR_idx != NumGPRs) 3176 GPR_idx++; 3177 } 3178 // We could elide this store in the case where the object fits 3179 // entirely in R registers. Maybe later. 3180 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3181 DAG.getConstant(ArgOffset, PtrVT)); 3182 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0, 3183 false, false, 0); 3184 MemOpChains.push_back(Store); 3185 if (VR_idx != NumVRs) { 3186 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, NULL, 0, 3187 false, false, 0); 3188 MemOpChains.push_back(Load.getValue(1)); 3189 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 3190 } 3191 ArgOffset += 16; 3192 for (unsigned i=0; i<16; i+=PtrByteSize) { 3193 if (GPR_idx == NumGPRs) 3194 break; 3195 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 3196 DAG.getConstant(i, PtrVT)); 3197 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, NULL, 0, 3198 false, false, 0); 3199 MemOpChains.push_back(Load.getValue(1)); 3200 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3201 } 3202 break; 3203 } 3204 3205 // Non-varargs Altivec params generally go in registers, but have 3206 // stack space allocated at the end. 3207 if (VR_idx != NumVRs) { 3208 // Doesn't have GPR space allocated. 3209 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 3210 } else if (nAltivecParamsAtEnd==0) { 3211 // We are emitting Altivec params in order. 3212 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3213 isPPC64, isTailCall, true, MemOpChains, 3214 TailCallArguments, dl); 3215 ArgOffset += 16; 3216 } 3217 break; 3218 } 3219 } 3220 // If all Altivec parameters fit in registers, as they usually do, 3221 // they get stack space following the non-Altivec parameters. We 3222 // don't track this here because nobody below needs it. 3223 // If there are more Altivec parameters than fit in registers emit 3224 // the stores here. 3225 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 3226 unsigned j = 0; 3227 // Offset is aligned; skip 1st 12 params which go in V registers. 3228 ArgOffset = ((ArgOffset+15)/16)*16; 3229 ArgOffset += 12*16; 3230 for (unsigned i = 0; i != NumOps; ++i) { 3231 SDValue Arg = Outs[i].Val; 3232 EVT ArgType = Arg.getValueType(); 3233 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 3234 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 3235 if (++j > NumVRs) { 3236 SDValue PtrOff; 3237 // We are emitting Altivec params in order. 3238 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3239 isPPC64, isTailCall, true, MemOpChains, 3240 TailCallArguments, dl); 3241 ArgOffset += 16; 3242 } 3243 } 3244 } 3245 } 3246 3247 if (!MemOpChains.empty()) 3248 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3249 &MemOpChains[0], MemOpChains.size()); 3250 3251 // Check if this is an indirect call (MTCTR/BCTRL). 3252 // See PrepareCall() for more information about calls through function 3253 // pointers in the 64-bit SVR4 ABI. 3254 if (!isTailCall && isPPC64 && PPCSubTarget.isSVR4ABI() && 3255 !dyn_cast<GlobalAddressSDNode>(Callee) && 3256 !dyn_cast<ExternalSymbolSDNode>(Callee) && 3257 !isBLACompatibleAddress(Callee, DAG)) { 3258 // Load r2 into a virtual register and store it to the TOC save area. 3259 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 3260 // TOC save area offset. 3261 SDValue PtrOff = DAG.getIntPtrConstant(40); 3262 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3263 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, NULL, 0, 3264 false, false, 0); 3265 } 3266 3267 // On Darwin, R12 must contain the address of an indirect callee. This does 3268 // not mean the MTCTR instruction must use R12; it's easier to model this as 3269 // an extra parameter, so do that. 3270 if (!isTailCall && 3271 !dyn_cast<GlobalAddressSDNode>(Callee) && 3272 !dyn_cast<ExternalSymbolSDNode>(Callee) && 3273 !isBLACompatibleAddress(Callee, DAG)) 3274 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 3275 PPC::R12), Callee)); 3276 3277 // Build a sequence of copy-to-reg nodes chained together with token chain 3278 // and flag operands which copy the outgoing args into the appropriate regs. 3279 SDValue InFlag; 3280 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3281 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3282 RegsToPass[i].second, InFlag); 3283 InFlag = Chain.getValue(1); 3284 } 3285 3286 if (isTailCall) { 3287 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 3288 FPOp, true, TailCallArguments); 3289 } 3290 3291 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3292 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3293 Ins, InVals); 3294} 3295 3296SDValue 3297PPCTargetLowering::LowerReturn(SDValue Chain, 3298 CallingConv::ID CallConv, bool isVarArg, 3299 const SmallVectorImpl<ISD::OutputArg> &Outs, 3300 DebugLoc dl, SelectionDAG &DAG) const { 3301 3302 SmallVector<CCValAssign, 16> RVLocs; 3303 CCState CCInfo(CallConv, isVarArg, getTargetMachine(), 3304 RVLocs, *DAG.getContext()); 3305 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 3306 3307 // If this is the first return lowered for this function, add the regs to the 3308 // liveout set for the function. 3309 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 3310 for (unsigned i = 0; i != RVLocs.size(); ++i) 3311 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 3312 } 3313 3314 SDValue Flag; 3315 3316 // Copy the result values into the output registers. 3317 for (unsigned i = 0; i != RVLocs.size(); ++i) { 3318 CCValAssign &VA = RVLocs[i]; 3319 assert(VA.isRegLoc() && "Can only return in registers!"); 3320 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3321 Outs[i].Val, Flag); 3322 Flag = Chain.getValue(1); 3323 } 3324 3325 if (Flag.getNode()) 3326 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag); 3327 else 3328 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain); 3329} 3330 3331SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 3332 const PPCSubtarget &Subtarget) const { 3333 // When we pop the dynamic allocation we need to restore the SP link. 3334 DebugLoc dl = Op.getDebugLoc(); 3335 3336 // Get the corect type for pointers. 3337 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3338 3339 // Construct the stack pointer operand. 3340 bool isPPC64 = Subtarget.isPPC64(); 3341 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 3342 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 3343 3344 // Get the operands for the STACKRESTORE. 3345 SDValue Chain = Op.getOperand(0); 3346 SDValue SaveSP = Op.getOperand(1); 3347 3348 // Load the old link SP. 3349 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, NULL, 0, 3350 false, false, 0); 3351 3352 // Restore the stack pointer. 3353 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 3354 3355 // Store the old link SP. 3356 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, NULL, 0, 3357 false, false, 0); 3358} 3359 3360 3361 3362SDValue 3363PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 3364 MachineFunction &MF = DAG.getMachineFunction(); 3365 bool isPPC64 = PPCSubTarget.isPPC64(); 3366 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 3367 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3368 3369 // Get current frame pointer save index. The users of this index will be 3370 // primarily DYNALLOC instructions. 3371 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 3372 int RASI = FI->getReturnAddrSaveIndex(); 3373 3374 // If the frame pointer save index hasn't been defined yet. 3375 if (!RASI) { 3376 // Find out what the fix offset of the frame pointer save area. 3377 int LROffset = PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI); 3378 // Allocate the frame index for frame pointer save area. 3379 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, 3380 true, false); 3381 // Save the result. 3382 FI->setReturnAddrSaveIndex(RASI); 3383 } 3384 return DAG.getFrameIndex(RASI, PtrVT); 3385} 3386 3387SDValue 3388PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 3389 MachineFunction &MF = DAG.getMachineFunction(); 3390 bool isPPC64 = PPCSubTarget.isPPC64(); 3391 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 3392 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3393 3394 // Get current frame pointer save index. The users of this index will be 3395 // primarily DYNALLOC instructions. 3396 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 3397 int FPSI = FI->getFramePointerSaveIndex(); 3398 3399 // If the frame pointer save index hasn't been defined yet. 3400 if (!FPSI) { 3401 // Find out what the fix offset of the frame pointer save area. 3402 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64, 3403 isDarwinABI); 3404 3405 // Allocate the frame index for frame pointer save area. 3406 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, 3407 true, false); 3408 // Save the result. 3409 FI->setFramePointerSaveIndex(FPSI); 3410 } 3411 return DAG.getFrameIndex(FPSI, PtrVT); 3412} 3413 3414SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 3415 SelectionDAG &DAG, 3416 const PPCSubtarget &Subtarget) const { 3417 // Get the inputs. 3418 SDValue Chain = Op.getOperand(0); 3419 SDValue Size = Op.getOperand(1); 3420 DebugLoc dl = Op.getDebugLoc(); 3421 3422 // Get the corect type for pointers. 3423 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3424 // Negate the size. 3425 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 3426 DAG.getConstant(0, PtrVT), Size); 3427 // Construct a node for the frame pointer save index. 3428 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 3429 // Build a DYNALLOC node. 3430 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 3431 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 3432 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 3433} 3434 3435/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 3436/// possible. 3437SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3438 // Not FP? Not a fsel. 3439 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 3440 !Op.getOperand(2).getValueType().isFloatingPoint()) 3441 return Op; 3442 3443 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3444 3445 // Cannot handle SETEQ/SETNE. 3446 if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op; 3447 3448 EVT ResVT = Op.getValueType(); 3449 EVT CmpVT = Op.getOperand(0).getValueType(); 3450 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 3451 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 3452 DebugLoc dl = Op.getDebugLoc(); 3453 3454 // If the RHS of the comparison is a 0.0, we don't need to do the 3455 // subtraction at all. 3456 if (isFloatingPointZero(RHS)) 3457 switch (CC) { 3458 default: break; // SETUO etc aren't handled by fsel. 3459 case ISD::SETULT: 3460 case ISD::SETLT: 3461 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 3462 case ISD::SETOGE: 3463 case ISD::SETGE: 3464 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 3465 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 3466 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 3467 case ISD::SETUGT: 3468 case ISD::SETGT: 3469 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 3470 case ISD::SETOLE: 3471 case ISD::SETLE: 3472 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 3473 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 3474 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 3475 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 3476 } 3477 3478 SDValue Cmp; 3479 switch (CC) { 3480 default: break; // SETUO etc aren't handled by fsel. 3481 case ISD::SETULT: 3482 case ISD::SETLT: 3483 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 3484 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3485 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3486 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 3487 case ISD::SETOGE: 3488 case ISD::SETGE: 3489 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 3490 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3491 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3492 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 3493 case ISD::SETUGT: 3494 case ISD::SETGT: 3495 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 3496 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3497 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3498 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 3499 case ISD::SETOLE: 3500 case ISD::SETLE: 3501 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 3502 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 3503 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 3504 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 3505 } 3506 return Op; 3507} 3508 3509// FIXME: Split this code up when LegalizeDAGTypes lands. 3510SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 3511 DebugLoc dl) const { 3512 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 3513 SDValue Src = Op.getOperand(0); 3514 if (Src.getValueType() == MVT::f32) 3515 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 3516 3517 SDValue Tmp; 3518 switch (Op.getValueType().getSimpleVT().SimpleTy) { 3519 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 3520 case MVT::i32: 3521 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 3522 PPCISD::FCTIDZ, 3523 dl, MVT::f64, Src); 3524 break; 3525 case MVT::i64: 3526 Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src); 3527 break; 3528 } 3529 3530 // Convert the FP value to an int value through memory. 3531 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); 3532 3533 // Emit a store to the stack slot. 3534 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, NULL, 0, 3535 false, false, 0); 3536 3537 // Result is a load from the stack slot. If loading 4 bytes, make sure to 3538 // add in a bias. 3539 if (Op.getValueType() == MVT::i32) 3540 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 3541 DAG.getConstant(4, FIPtr.getValueType())); 3542 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, NULL, 0, 3543 false, false, 0); 3544} 3545 3546SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, 3547 SelectionDAG &DAG) const { 3548 DebugLoc dl = Op.getDebugLoc(); 3549 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 3550 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 3551 return SDValue(); 3552 3553 if (Op.getOperand(0).getValueType() == MVT::i64) { 3554 SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl, 3555 MVT::f64, Op.getOperand(0)); 3556 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); 3557 if (Op.getValueType() == MVT::f32) 3558 FP = DAG.getNode(ISD::FP_ROUND, dl, 3559 MVT::f32, FP, DAG.getIntPtrConstant(0)); 3560 return FP; 3561 } 3562 3563 assert(Op.getOperand(0).getValueType() == MVT::i32 && 3564 "Unhandled SINT_TO_FP type in custom expander!"); 3565 // Since we only generate this in 64-bit mode, we can take advantage of 3566 // 64-bit registers. In particular, sign extend the input value into the 3567 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 3568 // then lfd it and fcfid it. 3569 MachineFunction &MF = DAG.getMachineFunction(); 3570 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 3571 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 3572 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3573 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 3574 3575 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32, 3576 Op.getOperand(0)); 3577 3578 // STD the extended value into the stack slot. 3579 MachineMemOperand *MMO = 3580 MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx), 3581 MachineMemOperand::MOStore, 0, 8, 8); 3582 SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx }; 3583 SDValue Store = 3584 DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other), 3585 Ops, 4, MVT::i64, MMO); 3586 // Load the value as a double. 3587 SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0, false, false, 0); 3588 3589 // FCFID it and return it. 3590 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld); 3591 if (Op.getValueType() == MVT::f32) 3592 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 3593 return FP; 3594} 3595 3596SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3597 SelectionDAG &DAG) const { 3598 DebugLoc dl = Op.getDebugLoc(); 3599 /* 3600 The rounding mode is in bits 30:31 of FPSR, and has the following 3601 settings: 3602 00 Round to nearest 3603 01 Round to 0 3604 10 Round to +inf 3605 11 Round to -inf 3606 3607 FLT_ROUNDS, on the other hand, expects the following: 3608 -1 Undefined 3609 0 Round to 0 3610 1 Round to nearest 3611 2 Round to +inf 3612 3 Round to -inf 3613 3614 To perform the conversion, we do: 3615 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 3616 */ 3617 3618 MachineFunction &MF = DAG.getMachineFunction(); 3619 EVT VT = Op.getValueType(); 3620 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3621 std::vector<EVT> NodeTys; 3622 SDValue MFFSreg, InFlag; 3623 3624 // Save FP Control Word to register 3625 NodeTys.push_back(MVT::f64); // return register 3626 NodeTys.push_back(MVT::Flag); // unused in this context 3627 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 3628 3629 // Save FP register to stack slot 3630 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 3631 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 3632 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 3633 StackSlot, NULL, 0, false, false, 0); 3634 3635 // Load FP Control Word from low 32 bits of stack slot. 3636 SDValue Four = DAG.getConstant(4, PtrVT); 3637 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 3638 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, NULL, 0, 3639 false, false, 0); 3640 3641 // Transform as necessary 3642 SDValue CWD1 = 3643 DAG.getNode(ISD::AND, dl, MVT::i32, 3644 CWD, DAG.getConstant(3, MVT::i32)); 3645 SDValue CWD2 = 3646 DAG.getNode(ISD::SRL, dl, MVT::i32, 3647 DAG.getNode(ISD::AND, dl, MVT::i32, 3648 DAG.getNode(ISD::XOR, dl, MVT::i32, 3649 CWD, DAG.getConstant(3, MVT::i32)), 3650 DAG.getConstant(3, MVT::i32)), 3651 DAG.getConstant(1, MVT::i32)); 3652 3653 SDValue RetVal = 3654 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 3655 3656 return DAG.getNode((VT.getSizeInBits() < 16 ? 3657 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 3658} 3659 3660SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 3661 EVT VT = Op.getValueType(); 3662 unsigned BitWidth = VT.getSizeInBits(); 3663 DebugLoc dl = Op.getDebugLoc(); 3664 assert(Op.getNumOperands() == 3 && 3665 VT == Op.getOperand(1).getValueType() && 3666 "Unexpected SHL!"); 3667 3668 // Expand into a bunch of logical ops. Note that these ops 3669 // depend on the PPC behavior for oversized shift amounts. 3670 SDValue Lo = Op.getOperand(0); 3671 SDValue Hi = Op.getOperand(1); 3672 SDValue Amt = Op.getOperand(2); 3673 EVT AmtVT = Amt.getValueType(); 3674 3675 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3676 DAG.getConstant(BitWidth, AmtVT), Amt); 3677 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 3678 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 3679 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 3680 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3681 DAG.getConstant(-BitWidth, AmtVT)); 3682 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 3683 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 3684 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 3685 SDValue OutOps[] = { OutLo, OutHi }; 3686 return DAG.getMergeValues(OutOps, 2, dl); 3687} 3688 3689SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 3690 EVT VT = Op.getValueType(); 3691 DebugLoc dl = Op.getDebugLoc(); 3692 unsigned BitWidth = VT.getSizeInBits(); 3693 assert(Op.getNumOperands() == 3 && 3694 VT == Op.getOperand(1).getValueType() && 3695 "Unexpected SRL!"); 3696 3697 // Expand into a bunch of logical ops. Note that these ops 3698 // depend on the PPC behavior for oversized shift amounts. 3699 SDValue Lo = Op.getOperand(0); 3700 SDValue Hi = Op.getOperand(1); 3701 SDValue Amt = Op.getOperand(2); 3702 EVT AmtVT = Amt.getValueType(); 3703 3704 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3705 DAG.getConstant(BitWidth, AmtVT), Amt); 3706 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 3707 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 3708 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 3709 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3710 DAG.getConstant(-BitWidth, AmtVT)); 3711 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 3712 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 3713 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 3714 SDValue OutOps[] = { OutLo, OutHi }; 3715 return DAG.getMergeValues(OutOps, 2, dl); 3716} 3717 3718SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 3719 DebugLoc dl = Op.getDebugLoc(); 3720 EVT VT = Op.getValueType(); 3721 unsigned BitWidth = VT.getSizeInBits(); 3722 assert(Op.getNumOperands() == 3 && 3723 VT == Op.getOperand(1).getValueType() && 3724 "Unexpected SRA!"); 3725 3726 // Expand into a bunch of logical ops, followed by a select_cc. 3727 SDValue Lo = Op.getOperand(0); 3728 SDValue Hi = Op.getOperand(1); 3729 SDValue Amt = Op.getOperand(2); 3730 EVT AmtVT = Amt.getValueType(); 3731 3732 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 3733 DAG.getConstant(BitWidth, AmtVT), Amt); 3734 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 3735 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 3736 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 3737 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 3738 DAG.getConstant(-BitWidth, AmtVT)); 3739 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 3740 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 3741 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 3742 Tmp4, Tmp6, ISD::SETLE); 3743 SDValue OutOps[] = { OutLo, OutHi }; 3744 return DAG.getMergeValues(OutOps, 2, dl); 3745} 3746 3747//===----------------------------------------------------------------------===// 3748// Vector related lowering. 3749// 3750 3751/// BuildSplatI - Build a canonical splati of Val with an element size of 3752/// SplatSize. Cast the result to VT. 3753static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 3754 SelectionDAG &DAG, DebugLoc dl) { 3755 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 3756 3757 static const EVT VTys[] = { // canonical VT to use for each size. 3758 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 3759 }; 3760 3761 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 3762 3763 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 3764 if (Val == -1) 3765 SplatSize = 1; 3766 3767 EVT CanonicalVT = VTys[SplatSize-1]; 3768 3769 // Build a canonical splat for this value. 3770 SDValue Elt = DAG.getConstant(Val, MVT::i32); 3771 SmallVector<SDValue, 8> Ops; 3772 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 3773 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 3774 &Ops[0], Ops.size()); 3775 return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res); 3776} 3777 3778/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 3779/// specified intrinsic ID. 3780static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 3781 SelectionDAG &DAG, DebugLoc dl, 3782 EVT DestVT = MVT::Other) { 3783 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 3784 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 3785 DAG.getConstant(IID, MVT::i32), LHS, RHS); 3786} 3787 3788/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 3789/// specified intrinsic ID. 3790static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 3791 SDValue Op2, SelectionDAG &DAG, 3792 DebugLoc dl, EVT DestVT = MVT::Other) { 3793 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 3794 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 3795 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 3796} 3797 3798 3799/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 3800/// amount. The result has the specified value type. 3801static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 3802 EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3803 // Force LHS/RHS to be the right type. 3804 LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS); 3805 RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS); 3806 3807 int Ops[16]; 3808 for (unsigned i = 0; i != 16; ++i) 3809 Ops[i] = i + Amt; 3810 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 3811 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); 3812} 3813 3814// If this is a case we can't handle, return null and let the default 3815// expansion code take care of it. If we CAN select this case, and if it 3816// selects to a single instruction, return Op. Otherwise, if we can codegen 3817// this case more efficiently than a constant pool load, lower it to the 3818// sequence of ops that should be used. 3819SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 3820 SelectionDAG &DAG) const { 3821 DebugLoc dl = Op.getDebugLoc(); 3822 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3823 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 3824 3825 // Check if this is a splat of a constant value. 3826 APInt APSplatBits, APSplatUndef; 3827 unsigned SplatBitSize; 3828 bool HasAnyUndefs; 3829 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 3830 HasAnyUndefs, 0, true) || SplatBitSize > 32) 3831 return SDValue(); 3832 3833 unsigned SplatBits = APSplatBits.getZExtValue(); 3834 unsigned SplatUndef = APSplatUndef.getZExtValue(); 3835 unsigned SplatSize = SplatBitSize / 8; 3836 3837 // First, handle single instruction cases. 3838 3839 // All zeros? 3840 if (SplatBits == 0) { 3841 // Canonicalize all zero vectors to be v4i32. 3842 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 3843 SDValue Z = DAG.getConstant(0, MVT::i32); 3844 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 3845 Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z); 3846 } 3847 return Op; 3848 } 3849 3850 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 3851 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 3852 (32-SplatBitSize)); 3853 if (SextVal >= -16 && SextVal <= 15) 3854 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 3855 3856 3857 // Two instruction sequences. 3858 3859 // If this value is in the range [-32,30] and is even, use: 3860 // tmp = VSPLTI[bhw], result = add tmp, tmp 3861 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { 3862 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl); 3863 Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res); 3864 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3865 } 3866 3867 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 3868 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 3869 // for fneg/fabs. 3870 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 3871 // Make -1 and vspltisw -1: 3872 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 3873 3874 // Make the VSLW intrinsic, computing 0x8000_0000. 3875 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 3876 OnesV, DAG, dl); 3877 3878 // xor by OnesV to invert it. 3879 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 3880 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3881 } 3882 3883 // Check to see if this is a wide variety of vsplti*, binop self cases. 3884 static const signed char SplatCsts[] = { 3885 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 3886 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 3887 }; 3888 3889 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 3890 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 3891 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 3892 int i = SplatCsts[idx]; 3893 3894 // Figure out what shift amount will be used by altivec if shifted by i in 3895 // this splat size. 3896 unsigned TypeShiftAmt = i & (SplatBitSize-1); 3897 3898 // vsplti + shl self. 3899 if (SextVal == (i << (int)TypeShiftAmt)) { 3900 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3901 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3902 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 3903 Intrinsic::ppc_altivec_vslw 3904 }; 3905 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3906 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3907 } 3908 3909 // vsplti + srl self. 3910 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3911 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3912 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3913 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 3914 Intrinsic::ppc_altivec_vsrw 3915 }; 3916 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3917 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3918 } 3919 3920 // vsplti + sra self. 3921 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 3922 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3923 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3924 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 3925 Intrinsic::ppc_altivec_vsraw 3926 }; 3927 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3928 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3929 } 3930 3931 // vsplti + rol self. 3932 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 3933 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 3934 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 3935 static const unsigned IIDs[] = { // Intrinsic to use for each size. 3936 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 3937 Intrinsic::ppc_altivec_vrlw 3938 }; 3939 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 3940 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); 3941 } 3942 3943 // t = vsplti c, result = vsldoi t, t, 1 3944 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { 3945 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 3946 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 3947 } 3948 // t = vsplti c, result = vsldoi t, t, 2 3949 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { 3950 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 3951 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 3952 } 3953 // t = vsplti c, result = vsldoi t, t, 3 3954 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { 3955 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 3956 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 3957 } 3958 } 3959 3960 // Three instruction sequences. 3961 3962 // Odd, in range [17,31]: (vsplti C)-(vsplti -16). 3963 if (SextVal >= 0 && SextVal <= 31) { 3964 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl); 3965 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 3966 LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS); 3967 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); 3968 } 3969 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). 3970 if (SextVal >= -31 && SextVal <= 0) { 3971 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl); 3972 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); 3973 LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS); 3974 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); 3975 } 3976 3977 return SDValue(); 3978} 3979 3980/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 3981/// the specified operations to build the shuffle. 3982static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 3983 SDValue RHS, SelectionDAG &DAG, 3984 DebugLoc dl) { 3985 unsigned OpNum = (PFEntry >> 26) & 0x0F; 3986 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 3987 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 3988 3989 enum { 3990 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 3991 OP_VMRGHW, 3992 OP_VMRGLW, 3993 OP_VSPLTISW0, 3994 OP_VSPLTISW1, 3995 OP_VSPLTISW2, 3996 OP_VSPLTISW3, 3997 OP_VSLDOI4, 3998 OP_VSLDOI8, 3999 OP_VSLDOI12 4000 }; 4001 4002 if (OpNum == OP_COPY) { 4003 if (LHSID == (1*9+2)*9+3) return LHS; 4004 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4005 return RHS; 4006 } 4007 4008 SDValue OpLHS, OpRHS; 4009 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4010 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4011 4012 int ShufIdxs[16]; 4013 switch (OpNum) { 4014 default: llvm_unreachable("Unknown i32 permute!"); 4015 case OP_VMRGHW: 4016 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 4017 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 4018 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 4019 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 4020 break; 4021 case OP_VMRGLW: 4022 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 4023 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 4024 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 4025 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 4026 break; 4027 case OP_VSPLTISW0: 4028 for (unsigned i = 0; i != 16; ++i) 4029 ShufIdxs[i] = (i&3)+0; 4030 break; 4031 case OP_VSPLTISW1: 4032 for (unsigned i = 0; i != 16; ++i) 4033 ShufIdxs[i] = (i&3)+4; 4034 break; 4035 case OP_VSPLTISW2: 4036 for (unsigned i = 0; i != 16; ++i) 4037 ShufIdxs[i] = (i&3)+8; 4038 break; 4039 case OP_VSPLTISW3: 4040 for (unsigned i = 0; i != 16; ++i) 4041 ShufIdxs[i] = (i&3)+12; 4042 break; 4043 case OP_VSLDOI4: 4044 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 4045 case OP_VSLDOI8: 4046 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 4047 case OP_VSLDOI12: 4048 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 4049 } 4050 EVT VT = OpLHS.getValueType(); 4051 OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS); 4052 OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS); 4053 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 4054 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); 4055} 4056 4057/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 4058/// is a shuffle we can handle in a single instruction, return it. Otherwise, 4059/// return the code it can be lowered into. Worst case, it can always be 4060/// lowered into a vperm. 4061SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 4062 SelectionDAG &DAG) const { 4063 DebugLoc dl = Op.getDebugLoc(); 4064 SDValue V1 = Op.getOperand(0); 4065 SDValue V2 = Op.getOperand(1); 4066 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 4067 EVT VT = Op.getValueType(); 4068 4069 // Cases that are handled by instructions that take permute immediates 4070 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 4071 // selected by the instruction selector. 4072 if (V2.getOpcode() == ISD::UNDEF) { 4073 if (PPC::isSplatShuffleMask(SVOp, 1) || 4074 PPC::isSplatShuffleMask(SVOp, 2) || 4075 PPC::isSplatShuffleMask(SVOp, 4) || 4076 PPC::isVPKUWUMShuffleMask(SVOp, true) || 4077 PPC::isVPKUHUMShuffleMask(SVOp, true) || 4078 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 4079 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 4080 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 4081 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 4082 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 4083 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 4084 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 4085 return Op; 4086 } 4087 } 4088 4089 // Altivec has a variety of "shuffle immediates" that take two vector inputs 4090 // and produce a fixed permutation. If any of these match, do not lower to 4091 // VPERM. 4092 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 4093 PPC::isVPKUHUMShuffleMask(SVOp, false) || 4094 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 4095 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 4096 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 4097 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 4098 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 4099 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 4100 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 4101 return Op; 4102 4103 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 4104 // perfect shuffle table to emit an optimal matching sequence. 4105 SmallVector<int, 16> PermMask; 4106 SVOp->getMask(PermMask); 4107 4108 unsigned PFIndexes[4]; 4109 bool isFourElementShuffle = true; 4110 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 4111 unsigned EltNo = 8; // Start out undef. 4112 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 4113 if (PermMask[i*4+j] < 0) 4114 continue; // Undef, ignore it. 4115 4116 unsigned ByteSource = PermMask[i*4+j]; 4117 if ((ByteSource & 3) != j) { 4118 isFourElementShuffle = false; 4119 break; 4120 } 4121 4122 if (EltNo == 8) { 4123 EltNo = ByteSource/4; 4124 } else if (EltNo != ByteSource/4) { 4125 isFourElementShuffle = false; 4126 break; 4127 } 4128 } 4129 PFIndexes[i] = EltNo; 4130 } 4131 4132 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 4133 // perfect shuffle vector to determine if it is cost effective to do this as 4134 // discrete instructions, or whether we should use a vperm. 4135 if (isFourElementShuffle) { 4136 // Compute the index in the perfect shuffle table. 4137 unsigned PFTableIndex = 4138 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4139 4140 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4141 unsigned Cost = (PFEntry >> 30); 4142 4143 // Determining when to avoid vperm is tricky. Many things affect the cost 4144 // of vperm, particularly how many times the perm mask needs to be computed. 4145 // For example, if the perm mask can be hoisted out of a loop or is already 4146 // used (perhaps because there are multiple permutes with the same shuffle 4147 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 4148 // the loop requires an extra register. 4149 // 4150 // As a compromise, we only emit discrete instructions if the shuffle can be 4151 // generated in 3 or fewer operations. When we have loop information 4152 // available, if this block is within a loop, we should avoid using vperm 4153 // for 3-operation perms and use a constant pool load instead. 4154 if (Cost < 3) 4155 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 4156 } 4157 4158 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 4159 // vector that will get spilled to the constant pool. 4160 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 4161 4162 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 4163 // that it is in input element units, not in bytes. Convert now. 4164 EVT EltVT = V1.getValueType().getVectorElementType(); 4165 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 4166 4167 SmallVector<SDValue, 16> ResultMask; 4168 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 4169 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 4170 4171 for (unsigned j = 0; j != BytesPerElement; ++j) 4172 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 4173 MVT::i32)); 4174 } 4175 4176 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 4177 &ResultMask[0], ResultMask.size()); 4178 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 4179} 4180 4181/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 4182/// altivec comparison. If it is, return true and fill in Opc/isDot with 4183/// information about the intrinsic. 4184static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 4185 bool &isDot) { 4186 unsigned IntrinsicID = 4187 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 4188 CompareOpc = -1; 4189 isDot = false; 4190 switch (IntrinsicID) { 4191 default: return false; 4192 // Comparison predicates. 4193 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 4194 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 4195 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 4196 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 4197 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 4198 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 4199 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 4200 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 4201 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 4202 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 4203 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 4204 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 4205 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 4206 4207 // Normal Comparisons. 4208 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 4209 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 4210 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 4211 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 4212 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 4213 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 4214 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 4215 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 4216 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 4217 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 4218 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 4219 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 4220 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 4221 } 4222 return true; 4223} 4224 4225/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 4226/// lower, do it, otherwise return null. 4227SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4228 SelectionDAG &DAG) const { 4229 // If this is a lowered altivec predicate compare, CompareOpc is set to the 4230 // opcode number of the comparison. 4231 DebugLoc dl = Op.getDebugLoc(); 4232 int CompareOpc; 4233 bool isDot; 4234 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 4235 return SDValue(); // Don't custom lower most intrinsics. 4236 4237 // If this is a non-dot comparison, make the VCMP node and we are done. 4238 if (!isDot) { 4239 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 4240 Op.getOperand(1), Op.getOperand(2), 4241 DAG.getConstant(CompareOpc, MVT::i32)); 4242 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp); 4243 } 4244 4245 // Create the PPCISD altivec 'dot' comparison node. 4246 SDValue Ops[] = { 4247 Op.getOperand(2), // LHS 4248 Op.getOperand(3), // RHS 4249 DAG.getConstant(CompareOpc, MVT::i32) 4250 }; 4251 std::vector<EVT> VTs; 4252 VTs.push_back(Op.getOperand(2).getValueType()); 4253 VTs.push_back(MVT::Flag); 4254 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 4255 4256 // Now that we have the comparison, emit a copy from the CR to a GPR. 4257 // This is flagged to the above dot comparison. 4258 SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32, 4259 DAG.getRegister(PPC::CR6, MVT::i32), 4260 CompNode.getValue(1)); 4261 4262 // Unpack the result based on how the target uses it. 4263 unsigned BitNo; // Bit # of CR6. 4264 bool InvertBit; // Invert result? 4265 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 4266 default: // Can't happen, don't crash on invalid number though. 4267 case 0: // Return the value of the EQ bit of CR6. 4268 BitNo = 0; InvertBit = false; 4269 break; 4270 case 1: // Return the inverted value of the EQ bit of CR6. 4271 BitNo = 0; InvertBit = true; 4272 break; 4273 case 2: // Return the value of the LT bit of CR6. 4274 BitNo = 2; InvertBit = false; 4275 break; 4276 case 3: // Return the inverted value of the LT bit of CR6. 4277 BitNo = 2; InvertBit = true; 4278 break; 4279 } 4280 4281 // Shift the bit into the low position. 4282 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 4283 DAG.getConstant(8-(3-BitNo), MVT::i32)); 4284 // Isolate the bit. 4285 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 4286 DAG.getConstant(1, MVT::i32)); 4287 4288 // If we are supposed to, toggle the bit. 4289 if (InvertBit) 4290 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 4291 DAG.getConstant(1, MVT::i32)); 4292 return Flags; 4293} 4294 4295SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 4296 SelectionDAG &DAG) const { 4297 DebugLoc dl = Op.getDebugLoc(); 4298 // Create a stack slot that is 16-byte aligned. 4299 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 4300 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 4301 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4302 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4303 4304 // Store the input value into Value#0 of the stack slot. 4305 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 4306 Op.getOperand(0), FIdx, NULL, 0, 4307 false, false, 0); 4308 // Load it out. 4309 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, NULL, 0, 4310 false, false, 0); 4311} 4312 4313SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 4314 DebugLoc dl = Op.getDebugLoc(); 4315 if (Op.getValueType() == MVT::v4i32) { 4316 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4317 4318 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 4319 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 4320 4321 SDValue RHSSwap = // = vrlw RHS, 16 4322 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 4323 4324 // Shrinkify inputs to v8i16. 4325 LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS); 4326 RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS); 4327 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap); 4328 4329 // Low parts multiplied together, generating 32-bit results (we ignore the 4330 // top parts). 4331 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 4332 LHS, RHS, DAG, dl, MVT::v4i32); 4333 4334 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 4335 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 4336 // Shift the high parts up 16 bits. 4337 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 4338 Neg16, DAG, dl); 4339 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 4340 } else if (Op.getValueType() == MVT::v8i16) { 4341 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4342 4343 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 4344 4345 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 4346 LHS, RHS, Zero, DAG, dl); 4347 } else if (Op.getValueType() == MVT::v16i8) { 4348 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4349 4350 // Multiply the even 8-bit parts, producing 16-bit sums. 4351 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 4352 LHS, RHS, DAG, dl, MVT::v8i16); 4353 EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts); 4354 4355 // Multiply the odd 8-bit parts, producing 16-bit sums. 4356 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 4357 LHS, RHS, DAG, dl, MVT::v8i16); 4358 OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts); 4359 4360 // Merge the results together. 4361 int Ops[16]; 4362 for (unsigned i = 0; i != 8; ++i) { 4363 Ops[i*2 ] = 2*i+1; 4364 Ops[i*2+1] = 2*i+1+16; 4365 } 4366 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 4367 } else { 4368 llvm_unreachable("Unknown mul to lower!"); 4369 } 4370} 4371 4372/// LowerOperation - Provide custom lowering hooks for some operations. 4373/// 4374SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 4375 switch (Op.getOpcode()) { 4376 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 4377 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 4378 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 4379 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 4380 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 4381 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 4382 case ISD::SETCC: return LowerSETCC(Op, DAG); 4383 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG); 4384 case ISD::VASTART: 4385 return LowerVASTART(Op, DAG, PPCSubTarget); 4386 4387 case ISD::VAARG: 4388 return LowerVAARG(Op, DAG, PPCSubTarget); 4389 4390 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 4391 case ISD::DYNAMIC_STACKALLOC: 4392 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 4393 4394 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 4395 case ISD::FP_TO_UINT: 4396 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 4397 Op.getDebugLoc()); 4398 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 4399 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 4400 4401 // Lower 64-bit shifts. 4402 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 4403 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 4404 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 4405 4406 // Vector-related lowering. 4407 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 4408 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 4409 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 4410 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 4411 case ISD::MUL: return LowerMUL(Op, DAG); 4412 4413 // Frame & Return address. 4414 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 4415 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 4416 } 4417 return SDValue(); 4418} 4419 4420void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 4421 SmallVectorImpl<SDValue>&Results, 4422 SelectionDAG &DAG) const { 4423 DebugLoc dl = N->getDebugLoc(); 4424 switch (N->getOpcode()) { 4425 default: 4426 assert(false && "Do not know how to custom type legalize this operation!"); 4427 return; 4428 case ISD::FP_ROUND_INREG: { 4429 assert(N->getValueType(0) == MVT::ppcf128); 4430 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 4431 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 4432 MVT::f64, N->getOperand(0), 4433 DAG.getIntPtrConstant(0)); 4434 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 4435 MVT::f64, N->getOperand(0), 4436 DAG.getIntPtrConstant(1)); 4437 4438 // This sequence changes FPSCR to do round-to-zero, adds the two halves 4439 // of the long double, and puts FPSCR back the way it was. We do not 4440 // actually model FPSCR. 4441 std::vector<EVT> NodeTys; 4442 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; 4443 4444 NodeTys.push_back(MVT::f64); // Return register 4445 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns 4446 Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4447 MFFSreg = Result.getValue(0); 4448 InFlag = Result.getValue(1); 4449 4450 NodeTys.clear(); 4451 NodeTys.push_back(MVT::Flag); // Returns a flag 4452 Ops[0] = DAG.getConstant(31, MVT::i32); 4453 Ops[1] = InFlag; 4454 Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2); 4455 InFlag = Result.getValue(0); 4456 4457 NodeTys.clear(); 4458 NodeTys.push_back(MVT::Flag); // Returns a flag 4459 Ops[0] = DAG.getConstant(30, MVT::i32); 4460 Ops[1] = InFlag; 4461 Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2); 4462 InFlag = Result.getValue(0); 4463 4464 NodeTys.clear(); 4465 NodeTys.push_back(MVT::f64); // result of add 4466 NodeTys.push_back(MVT::Flag); // Returns a flag 4467 Ops[0] = Lo; 4468 Ops[1] = Hi; 4469 Ops[2] = InFlag; 4470 Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3); 4471 FPreg = Result.getValue(0); 4472 InFlag = Result.getValue(1); 4473 4474 NodeTys.clear(); 4475 NodeTys.push_back(MVT::f64); 4476 Ops[0] = DAG.getConstant(1, MVT::i32); 4477 Ops[1] = MFFSreg; 4478 Ops[2] = FPreg; 4479 Ops[3] = InFlag; 4480 Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4); 4481 FPreg = Result.getValue(0); 4482 4483 // We know the low half is about to be thrown away, so just use something 4484 // convenient. 4485 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 4486 FPreg, FPreg)); 4487 return; 4488 } 4489 case ISD::FP_TO_SINT: 4490 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 4491 return; 4492 } 4493} 4494 4495 4496//===----------------------------------------------------------------------===// 4497// Other Lowering Code 4498//===----------------------------------------------------------------------===// 4499 4500MachineBasicBlock * 4501PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 4502 bool is64bit, unsigned BinOpcode) const { 4503 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4504 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4505 4506 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4507 MachineFunction *F = BB->getParent(); 4508 MachineFunction::iterator It = BB; 4509 ++It; 4510 4511 unsigned dest = MI->getOperand(0).getReg(); 4512 unsigned ptrA = MI->getOperand(1).getReg(); 4513 unsigned ptrB = MI->getOperand(2).getReg(); 4514 unsigned incr = MI->getOperand(3).getReg(); 4515 DebugLoc dl = MI->getDebugLoc(); 4516 4517 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 4518 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4519 F->insert(It, loopMBB); 4520 F->insert(It, exitMBB); 4521 exitMBB->transferSuccessors(BB); 4522 4523 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4524 unsigned TmpReg = (!BinOpcode) ? incr : 4525 RegInfo.createVirtualRegister( 4526 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4527 (const TargetRegisterClass *) &PPC::GPRCRegClass); 4528 4529 // thisMBB: 4530 // ... 4531 // fallthrough --> loopMBB 4532 BB->addSuccessor(loopMBB); 4533 4534 // loopMBB: 4535 // l[wd]arx dest, ptr 4536 // add r0, dest, incr 4537 // st[wd]cx. r0, ptr 4538 // bne- loopMBB 4539 // fallthrough --> exitMBB 4540 BB = loopMBB; 4541 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 4542 .addReg(ptrA).addReg(ptrB); 4543 if (BinOpcode) 4544 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 4545 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4546 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 4547 BuildMI(BB, dl, TII->get(PPC::BCC)) 4548 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4549 BB->addSuccessor(loopMBB); 4550 BB->addSuccessor(exitMBB); 4551 4552 // exitMBB: 4553 // ... 4554 BB = exitMBB; 4555 return BB; 4556} 4557 4558MachineBasicBlock * 4559PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 4560 MachineBasicBlock *BB, 4561 bool is8bit, // operation 4562 unsigned BinOpcode) const { 4563 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 4564 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4565 // In 64 bit mode we have to use 64 bits for addresses, even though the 4566 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 4567 // registers without caring whether they're 32 or 64, but here we're 4568 // doing actual arithmetic on the addresses. 4569 bool is64bit = PPCSubTarget.isPPC64(); 4570 4571 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4572 MachineFunction *F = BB->getParent(); 4573 MachineFunction::iterator It = BB; 4574 ++It; 4575 4576 unsigned dest = MI->getOperand(0).getReg(); 4577 unsigned ptrA = MI->getOperand(1).getReg(); 4578 unsigned ptrB = MI->getOperand(2).getReg(); 4579 unsigned incr = MI->getOperand(3).getReg(); 4580 DebugLoc dl = MI->getDebugLoc(); 4581 4582 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 4583 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4584 F->insert(It, loopMBB); 4585 F->insert(It, exitMBB); 4586 exitMBB->transferSuccessors(BB); 4587 4588 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4589 const TargetRegisterClass *RC = 4590 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4591 (const TargetRegisterClass *) &PPC::GPRCRegClass; 4592 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 4593 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 4594 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 4595 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 4596 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 4597 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 4598 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 4599 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 4600 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 4601 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 4602 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 4603 unsigned Ptr1Reg; 4604 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 4605 4606 // thisMBB: 4607 // ... 4608 // fallthrough --> loopMBB 4609 BB->addSuccessor(loopMBB); 4610 4611 // The 4-byte load must be aligned, while a char or short may be 4612 // anywhere in the word. Hence all this nasty bookkeeping code. 4613 // add ptr1, ptrA, ptrB [copy if ptrA==0] 4614 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 4615 // xori shift, shift1, 24 [16] 4616 // rlwinm ptr, ptr1, 0, 0, 29 4617 // slw incr2, incr, shift 4618 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 4619 // slw mask, mask2, shift 4620 // loopMBB: 4621 // lwarx tmpDest, ptr 4622 // add tmp, tmpDest, incr2 4623 // andc tmp2, tmpDest, mask 4624 // and tmp3, tmp, mask 4625 // or tmp4, tmp3, tmp2 4626 // stwcx. tmp4, ptr 4627 // bne- loopMBB 4628 // fallthrough --> exitMBB 4629 // srw dest, tmpDest, shift 4630 4631 if (ptrA!=PPC::R0) { 4632 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4633 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4634 .addReg(ptrA).addReg(ptrB); 4635 } else { 4636 Ptr1Reg = ptrB; 4637 } 4638 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4639 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4640 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4641 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4642 if (is64bit) 4643 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 4644 .addReg(Ptr1Reg).addImm(0).addImm(61); 4645 else 4646 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 4647 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4648 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 4649 .addReg(incr).addReg(ShiftReg); 4650 if (is8bit) 4651 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 4652 else { 4653 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 4654 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 4655 } 4656 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 4657 .addReg(Mask2Reg).addReg(ShiftReg); 4658 4659 BB = loopMBB; 4660 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 4661 .addReg(PPC::R0).addReg(PtrReg); 4662 if (BinOpcode) 4663 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 4664 .addReg(Incr2Reg).addReg(TmpDestReg); 4665 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 4666 .addReg(TmpDestReg).addReg(MaskReg); 4667 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 4668 .addReg(TmpReg).addReg(MaskReg); 4669 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 4670 .addReg(Tmp3Reg).addReg(Tmp2Reg); 4671 BuildMI(BB, dl, TII->get(PPC::STWCX)) 4672 .addReg(Tmp4Reg).addReg(PPC::R0).addReg(PtrReg); 4673 BuildMI(BB, dl, TII->get(PPC::BCC)) 4674 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 4675 BB->addSuccessor(loopMBB); 4676 BB->addSuccessor(exitMBB); 4677 4678 // exitMBB: 4679 // ... 4680 BB = exitMBB; 4681 BuildMI(BB, dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg).addReg(ShiftReg); 4682 return BB; 4683} 4684 4685MachineBasicBlock * 4686PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 4687 MachineBasicBlock *BB, 4688 DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const { 4689 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 4690 4691 // To "insert" these instructions we actually have to insert their 4692 // control-flow patterns. 4693 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4694 MachineFunction::iterator It = BB; 4695 ++It; 4696 4697 MachineFunction *F = BB->getParent(); 4698 4699 if (MI->getOpcode() == PPC::SELECT_CC_I4 || 4700 MI->getOpcode() == PPC::SELECT_CC_I8 || 4701 MI->getOpcode() == PPC::SELECT_CC_F4 || 4702 MI->getOpcode() == PPC::SELECT_CC_F8 || 4703 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 4704 4705 // The incoming instruction knows the destination vreg to set, the 4706 // condition code register to branch on, the true/false values to 4707 // select between, and a branch opcode to use. 4708 4709 // thisMBB: 4710 // ... 4711 // TrueVal = ... 4712 // cmpTY ccX, r1, r2 4713 // bCC copy1MBB 4714 // fallthrough --> copy0MBB 4715 MachineBasicBlock *thisMBB = BB; 4716 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 4717 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 4718 unsigned SelectPred = MI->getOperand(4).getImm(); 4719 DebugLoc dl = MI->getDebugLoc(); 4720 BuildMI(BB, dl, TII->get(PPC::BCC)) 4721 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 4722 F->insert(It, copy0MBB); 4723 F->insert(It, sinkMBB); 4724 // Update machine-CFG edges by first adding all successors of the current 4725 // block to the new block which will contain the Phi node for the select. 4726 // Also inform sdisel of the edge changes. 4727 for (MachineBasicBlock::succ_iterator I = BB->succ_begin(), 4728 E = BB->succ_end(); I != E; ++I) { 4729 EM->insert(std::make_pair(*I, sinkMBB)); 4730 sinkMBB->addSuccessor(*I); 4731 } 4732 // Next, remove all successors of the current block, and add the true 4733 // and fallthrough blocks as its successors. 4734 while (!BB->succ_empty()) 4735 BB->removeSuccessor(BB->succ_begin()); 4736 // Next, add the true and fallthrough blocks as its successors. 4737 BB->addSuccessor(copy0MBB); 4738 BB->addSuccessor(sinkMBB); 4739 4740 // copy0MBB: 4741 // %FalseValue = ... 4742 // # fallthrough to sinkMBB 4743 BB = copy0MBB; 4744 4745 // Update machine-CFG edges 4746 BB->addSuccessor(sinkMBB); 4747 4748 // sinkMBB: 4749 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 4750 // ... 4751 BB = sinkMBB; 4752 BuildMI(BB, dl, TII->get(PPC::PHI), MI->getOperand(0).getReg()) 4753 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 4754 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 4755 } 4756 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 4757 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 4758 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 4759 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 4760 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 4761 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 4762 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 4763 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 4764 4765 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 4766 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 4767 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 4768 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 4769 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 4770 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 4771 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 4772 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 4773 4774 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 4775 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 4776 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 4777 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 4778 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 4779 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 4780 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 4781 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 4782 4783 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 4784 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 4785 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 4786 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 4787 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 4788 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 4789 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 4790 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 4791 4792 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 4793 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 4794 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 4795 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 4796 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 4797 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 4798 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 4799 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 4800 4801 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 4802 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 4803 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 4804 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 4805 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 4806 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 4807 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 4808 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 4809 4810 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 4811 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 4812 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 4813 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 4814 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 4815 BB = EmitAtomicBinary(MI, BB, false, 0); 4816 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 4817 BB = EmitAtomicBinary(MI, BB, true, 0); 4818 4819 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 4820 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 4821 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 4822 4823 unsigned dest = MI->getOperand(0).getReg(); 4824 unsigned ptrA = MI->getOperand(1).getReg(); 4825 unsigned ptrB = MI->getOperand(2).getReg(); 4826 unsigned oldval = MI->getOperand(3).getReg(); 4827 unsigned newval = MI->getOperand(4).getReg(); 4828 DebugLoc dl = MI->getDebugLoc(); 4829 4830 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4831 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4832 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4833 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4834 F->insert(It, loop1MBB); 4835 F->insert(It, loop2MBB); 4836 F->insert(It, midMBB); 4837 F->insert(It, exitMBB); 4838 exitMBB->transferSuccessors(BB); 4839 4840 // thisMBB: 4841 // ... 4842 // fallthrough --> loopMBB 4843 BB->addSuccessor(loop1MBB); 4844 4845 // loop1MBB: 4846 // l[wd]arx dest, ptr 4847 // cmp[wd] dest, oldval 4848 // bne- midMBB 4849 // loop2MBB: 4850 // st[wd]cx. newval, ptr 4851 // bne- loopMBB 4852 // b exitBB 4853 // midMBB: 4854 // st[wd]cx. dest, ptr 4855 // exitBB: 4856 BB = loop1MBB; 4857 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 4858 .addReg(ptrA).addReg(ptrB); 4859 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 4860 .addReg(oldval).addReg(dest); 4861 BuildMI(BB, dl, TII->get(PPC::BCC)) 4862 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 4863 BB->addSuccessor(loop2MBB); 4864 BB->addSuccessor(midMBB); 4865 4866 BB = loop2MBB; 4867 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4868 .addReg(newval).addReg(ptrA).addReg(ptrB); 4869 BuildMI(BB, dl, TII->get(PPC::BCC)) 4870 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 4871 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 4872 BB->addSuccessor(loop1MBB); 4873 BB->addSuccessor(exitMBB); 4874 4875 BB = midMBB; 4876 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 4877 .addReg(dest).addReg(ptrA).addReg(ptrB); 4878 BB->addSuccessor(exitMBB); 4879 4880 // exitMBB: 4881 // ... 4882 BB = exitMBB; 4883 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 4884 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 4885 // We must use 64-bit registers for addresses when targeting 64-bit, 4886 // since we're actually doing arithmetic on them. Other registers 4887 // can be 32-bit. 4888 bool is64bit = PPCSubTarget.isPPC64(); 4889 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 4890 4891 unsigned dest = MI->getOperand(0).getReg(); 4892 unsigned ptrA = MI->getOperand(1).getReg(); 4893 unsigned ptrB = MI->getOperand(2).getReg(); 4894 unsigned oldval = MI->getOperand(3).getReg(); 4895 unsigned newval = MI->getOperand(4).getReg(); 4896 DebugLoc dl = MI->getDebugLoc(); 4897 4898 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 4899 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 4900 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 4901 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 4902 F->insert(It, loop1MBB); 4903 F->insert(It, loop2MBB); 4904 F->insert(It, midMBB); 4905 F->insert(It, exitMBB); 4906 exitMBB->transferSuccessors(BB); 4907 4908 MachineRegisterInfo &RegInfo = F->getRegInfo(); 4909 const TargetRegisterClass *RC = 4910 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 4911 (const TargetRegisterClass *) &PPC::GPRCRegClass; 4912 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 4913 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 4914 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 4915 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 4916 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 4917 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 4918 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 4919 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 4920 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 4921 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 4922 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 4923 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 4924 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 4925 unsigned Ptr1Reg; 4926 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 4927 // thisMBB: 4928 // ... 4929 // fallthrough --> loopMBB 4930 BB->addSuccessor(loop1MBB); 4931 4932 // The 4-byte load must be aligned, while a char or short may be 4933 // anywhere in the word. Hence all this nasty bookkeeping code. 4934 // add ptr1, ptrA, ptrB [copy if ptrA==0] 4935 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 4936 // xori shift, shift1, 24 [16] 4937 // rlwinm ptr, ptr1, 0, 0, 29 4938 // slw newval2, newval, shift 4939 // slw oldval2, oldval,shift 4940 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 4941 // slw mask, mask2, shift 4942 // and newval3, newval2, mask 4943 // and oldval3, oldval2, mask 4944 // loop1MBB: 4945 // lwarx tmpDest, ptr 4946 // and tmp, tmpDest, mask 4947 // cmpw tmp, oldval3 4948 // bne- midMBB 4949 // loop2MBB: 4950 // andc tmp2, tmpDest, mask 4951 // or tmp4, tmp2, newval3 4952 // stwcx. tmp4, ptr 4953 // bne- loop1MBB 4954 // b exitBB 4955 // midMBB: 4956 // stwcx. tmpDest, ptr 4957 // exitBB: 4958 // srw dest, tmpDest, shift 4959 if (ptrA!=PPC::R0) { 4960 Ptr1Reg = RegInfo.createVirtualRegister(RC); 4961 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 4962 .addReg(ptrA).addReg(ptrB); 4963 } else { 4964 Ptr1Reg = ptrB; 4965 } 4966 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 4967 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 4968 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 4969 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 4970 if (is64bit) 4971 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 4972 .addReg(Ptr1Reg).addImm(0).addImm(61); 4973 else 4974 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 4975 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 4976 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 4977 .addReg(newval).addReg(ShiftReg); 4978 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 4979 .addReg(oldval).addReg(ShiftReg); 4980 if (is8bit) 4981 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 4982 else { 4983 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 4984 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 4985 .addReg(Mask3Reg).addImm(65535); 4986 } 4987 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 4988 .addReg(Mask2Reg).addReg(ShiftReg); 4989 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 4990 .addReg(NewVal2Reg).addReg(MaskReg); 4991 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 4992 .addReg(OldVal2Reg).addReg(MaskReg); 4993 4994 BB = loop1MBB; 4995 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 4996 .addReg(PPC::R0).addReg(PtrReg); 4997 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 4998 .addReg(TmpDestReg).addReg(MaskReg); 4999 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 5000 .addReg(TmpReg).addReg(OldVal3Reg); 5001 BuildMI(BB, dl, TII->get(PPC::BCC)) 5002 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 5003 BB->addSuccessor(loop2MBB); 5004 BB->addSuccessor(midMBB); 5005 5006 BB = loop2MBB; 5007 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 5008 .addReg(TmpDestReg).addReg(MaskReg); 5009 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 5010 .addReg(Tmp2Reg).addReg(NewVal3Reg); 5011 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 5012 .addReg(PPC::R0).addReg(PtrReg); 5013 BuildMI(BB, dl, TII->get(PPC::BCC)) 5014 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 5015 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 5016 BB->addSuccessor(loop1MBB); 5017 BB->addSuccessor(exitMBB); 5018 5019 BB = midMBB; 5020 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 5021 .addReg(PPC::R0).addReg(PtrReg); 5022 BB->addSuccessor(exitMBB); 5023 5024 // exitMBB: 5025 // ... 5026 BB = exitMBB; 5027 BuildMI(BB, dl, TII->get(PPC::SRW),dest).addReg(TmpReg).addReg(ShiftReg); 5028 } else { 5029 llvm_unreachable("Unexpected instr type to insert"); 5030 } 5031 5032 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now. 5033 return BB; 5034} 5035 5036//===----------------------------------------------------------------------===// 5037// Target Optimization Hooks 5038//===----------------------------------------------------------------------===// 5039 5040SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 5041 DAGCombinerInfo &DCI) const { 5042 const TargetMachine &TM = getTargetMachine(); 5043 SelectionDAG &DAG = DCI.DAG; 5044 DebugLoc dl = N->getDebugLoc(); 5045 switch (N->getOpcode()) { 5046 default: break; 5047 case PPCISD::SHL: 5048 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 5049 if (C->getZExtValue() == 0) // 0 << V -> 0. 5050 return N->getOperand(0); 5051 } 5052 break; 5053 case PPCISD::SRL: 5054 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 5055 if (C->getZExtValue() == 0) // 0 >>u V -> 0. 5056 return N->getOperand(0); 5057 } 5058 break; 5059 case PPCISD::SRA: 5060 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 5061 if (C->getZExtValue() == 0 || // 0 >>s V -> 0. 5062 C->isAllOnesValue()) // -1 >>s V -> -1. 5063 return N->getOperand(0); 5064 } 5065 break; 5066 5067 case ISD::SINT_TO_FP: 5068 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 5069 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 5070 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 5071 // We allow the src/dst to be either f32/f64, but the intermediate 5072 // type must be i64. 5073 if (N->getOperand(0).getValueType() == MVT::i64 && 5074 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 5075 SDValue Val = N->getOperand(0).getOperand(0); 5076 if (Val.getValueType() == MVT::f32) { 5077 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 5078 DCI.AddToWorklist(Val.getNode()); 5079 } 5080 5081 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 5082 DCI.AddToWorklist(Val.getNode()); 5083 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 5084 DCI.AddToWorklist(Val.getNode()); 5085 if (N->getValueType(0) == MVT::f32) { 5086 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 5087 DAG.getIntPtrConstant(0)); 5088 DCI.AddToWorklist(Val.getNode()); 5089 } 5090 return Val; 5091 } else if (N->getOperand(0).getValueType() == MVT::i32) { 5092 // If the intermediate type is i32, we can avoid the load/store here 5093 // too. 5094 } 5095 } 5096 } 5097 break; 5098 case ISD::STORE: 5099 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 5100 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 5101 !cast<StoreSDNode>(N)->isTruncatingStore() && 5102 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 5103 N->getOperand(1).getValueType() == MVT::i32 && 5104 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 5105 SDValue Val = N->getOperand(1).getOperand(0); 5106 if (Val.getValueType() == MVT::f32) { 5107 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 5108 DCI.AddToWorklist(Val.getNode()); 5109 } 5110 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 5111 DCI.AddToWorklist(Val.getNode()); 5112 5113 Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val, 5114 N->getOperand(2), N->getOperand(3)); 5115 DCI.AddToWorklist(Val.getNode()); 5116 return Val; 5117 } 5118 5119 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 5120 if (cast<StoreSDNode>(N)->isUnindexed() && 5121 N->getOperand(1).getOpcode() == ISD::BSWAP && 5122 N->getOperand(1).getNode()->hasOneUse() && 5123 (N->getOperand(1).getValueType() == MVT::i32 || 5124 N->getOperand(1).getValueType() == MVT::i16)) { 5125 SDValue BSwapOp = N->getOperand(1).getOperand(0); 5126 // Do an any-extend to 32-bits if this is a half-word input. 5127 if (BSwapOp.getValueType() == MVT::i16) 5128 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 5129 5130 SDValue Ops[] = { 5131 N->getOperand(0), BSwapOp, N->getOperand(2), 5132 DAG.getValueType(N->getOperand(1).getValueType()) 5133 }; 5134 return 5135 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 5136 Ops, array_lengthof(Ops), 5137 cast<StoreSDNode>(N)->getMemoryVT(), 5138 cast<StoreSDNode>(N)->getMemOperand()); 5139 } 5140 break; 5141 case ISD::BSWAP: 5142 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 5143 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 5144 N->getOperand(0).hasOneUse() && 5145 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { 5146 SDValue Load = N->getOperand(0); 5147 LoadSDNode *LD = cast<LoadSDNode>(Load); 5148 // Create the byte-swapping load. 5149 SDValue Ops[] = { 5150 LD->getChain(), // Chain 5151 LD->getBasePtr(), // Ptr 5152 DAG.getValueType(N->getValueType(0)) // VT 5153 }; 5154 SDValue BSLoad = 5155 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 5156 DAG.getVTList(MVT::i32, MVT::Other), Ops, 3, 5157 LD->getMemoryVT(), LD->getMemOperand()); 5158 5159 // If this is an i16 load, insert the truncate. 5160 SDValue ResVal = BSLoad; 5161 if (N->getValueType(0) == MVT::i16) 5162 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 5163 5164 // First, combine the bswap away. This makes the value produced by the 5165 // load dead. 5166 DCI.CombineTo(N, ResVal); 5167 5168 // Next, combine the load away, we give it a bogus result value but a real 5169 // chain result. The result value is dead because the bswap is dead. 5170 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 5171 5172 // Return N so it doesn't get rechecked! 5173 return SDValue(N, 0); 5174 } 5175 5176 break; 5177 case PPCISD::VCMP: { 5178 // If a VCMPo node already exists with exactly the same operands as this 5179 // node, use its result instead of this node (VCMPo computes both a CR6 and 5180 // a normal output). 5181 // 5182 if (!N->getOperand(0).hasOneUse() && 5183 !N->getOperand(1).hasOneUse() && 5184 !N->getOperand(2).hasOneUse()) { 5185 5186 // Scan all of the users of the LHS, looking for VCMPo's that match. 5187 SDNode *VCMPoNode = 0; 5188 5189 SDNode *LHSN = N->getOperand(0).getNode(); 5190 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 5191 UI != E; ++UI) 5192 if (UI->getOpcode() == PPCISD::VCMPo && 5193 UI->getOperand(1) == N->getOperand(1) && 5194 UI->getOperand(2) == N->getOperand(2) && 5195 UI->getOperand(0) == N->getOperand(0)) { 5196 VCMPoNode = *UI; 5197 break; 5198 } 5199 5200 // If there is no VCMPo node, or if the flag value has a single use, don't 5201 // transform this. 5202 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 5203 break; 5204 5205 // Look at the (necessarily single) use of the flag value. If it has a 5206 // chain, this transformation is more complex. Note that multiple things 5207 // could use the value result, which we should ignore. 5208 SDNode *FlagUser = 0; 5209 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 5210 FlagUser == 0; ++UI) { 5211 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 5212 SDNode *User = *UI; 5213 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 5214 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 5215 FlagUser = User; 5216 break; 5217 } 5218 } 5219 } 5220 5221 // If the user is a MFCR instruction, we know this is safe. Otherwise we 5222 // give up for right now. 5223 if (FlagUser->getOpcode() == PPCISD::MFCR) 5224 return SDValue(VCMPoNode, 0); 5225 } 5226 break; 5227 } 5228 case ISD::BR_CC: { 5229 // If this is a branch on an altivec predicate comparison, lower this so 5230 // that we don't have to do a MFCR: instead, branch directly on CR6. This 5231 // lowering is done pre-legalize, because the legalizer lowers the predicate 5232 // compare down to code that is difficult to reassemble. 5233 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 5234 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 5235 int CompareOpc; 5236 bool isDot; 5237 5238 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 5239 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 5240 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 5241 assert(isDot && "Can't compare against a vector result!"); 5242 5243 // If this is a comparison against something other than 0/1, then we know 5244 // that the condition is never/always true. 5245 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 5246 if (Val != 0 && Val != 1) { 5247 if (CC == ISD::SETEQ) // Cond never true, remove branch. 5248 return N->getOperand(0); 5249 // Always !=, turn it into an unconditional branch. 5250 return DAG.getNode(ISD::BR, dl, MVT::Other, 5251 N->getOperand(0), N->getOperand(4)); 5252 } 5253 5254 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 5255 5256 // Create the PPCISD altivec 'dot' comparison node. 5257 std::vector<EVT> VTs; 5258 SDValue Ops[] = { 5259 LHS.getOperand(2), // LHS of compare 5260 LHS.getOperand(3), // RHS of compare 5261 DAG.getConstant(CompareOpc, MVT::i32) 5262 }; 5263 VTs.push_back(LHS.getOperand(2).getValueType()); 5264 VTs.push_back(MVT::Flag); 5265 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5266 5267 // Unpack the result based on how the target uses it. 5268 PPC::Predicate CompOpc; 5269 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 5270 default: // Can't happen, don't crash on invalid number though. 5271 case 0: // Branch on the value of the EQ bit of CR6. 5272 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 5273 break; 5274 case 1: // Branch on the inverted value of the EQ bit of CR6. 5275 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 5276 break; 5277 case 2: // Branch on the value of the LT bit of CR6. 5278 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 5279 break; 5280 case 3: // Branch on the inverted value of the LT bit of CR6. 5281 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 5282 break; 5283 } 5284 5285 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 5286 DAG.getConstant(CompOpc, MVT::i32), 5287 DAG.getRegister(PPC::CR6, MVT::i32), 5288 N->getOperand(4), CompNode.getValue(1)); 5289 } 5290 break; 5291 } 5292 } 5293 5294 return SDValue(); 5295} 5296 5297//===----------------------------------------------------------------------===// 5298// Inline Assembly Support 5299//===----------------------------------------------------------------------===// 5300 5301void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 5302 const APInt &Mask, 5303 APInt &KnownZero, 5304 APInt &KnownOne, 5305 const SelectionDAG &DAG, 5306 unsigned Depth) const { 5307 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 5308 switch (Op.getOpcode()) { 5309 default: break; 5310 case PPCISD::LBRX: { 5311 // lhbrx is known to have the top bits cleared out. 5312 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 5313 KnownZero = 0xFFFF0000; 5314 break; 5315 } 5316 case ISD::INTRINSIC_WO_CHAIN: { 5317 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 5318 default: break; 5319 case Intrinsic::ppc_altivec_vcmpbfp_p: 5320 case Intrinsic::ppc_altivec_vcmpeqfp_p: 5321 case Intrinsic::ppc_altivec_vcmpequb_p: 5322 case Intrinsic::ppc_altivec_vcmpequh_p: 5323 case Intrinsic::ppc_altivec_vcmpequw_p: 5324 case Intrinsic::ppc_altivec_vcmpgefp_p: 5325 case Intrinsic::ppc_altivec_vcmpgtfp_p: 5326 case Intrinsic::ppc_altivec_vcmpgtsb_p: 5327 case Intrinsic::ppc_altivec_vcmpgtsh_p: 5328 case Intrinsic::ppc_altivec_vcmpgtsw_p: 5329 case Intrinsic::ppc_altivec_vcmpgtub_p: 5330 case Intrinsic::ppc_altivec_vcmpgtuh_p: 5331 case Intrinsic::ppc_altivec_vcmpgtuw_p: 5332 KnownZero = ~1U; // All bits but the low one are known to be zero. 5333 break; 5334 } 5335 } 5336 } 5337} 5338 5339 5340/// getConstraintType - Given a constraint, return the type of 5341/// constraint it is for this target. 5342PPCTargetLowering::ConstraintType 5343PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 5344 if (Constraint.size() == 1) { 5345 switch (Constraint[0]) { 5346 default: break; 5347 case 'b': 5348 case 'r': 5349 case 'f': 5350 case 'v': 5351 case 'y': 5352 return C_RegisterClass; 5353 } 5354 } 5355 return TargetLowering::getConstraintType(Constraint); 5356} 5357 5358std::pair<unsigned, const TargetRegisterClass*> 5359PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 5360 EVT VT) const { 5361 if (Constraint.size() == 1) { 5362 // GCC RS6000 Constraint Letters 5363 switch (Constraint[0]) { 5364 case 'b': // R1-R31 5365 case 'r': // R0-R31 5366 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 5367 return std::make_pair(0U, PPC::G8RCRegisterClass); 5368 return std::make_pair(0U, PPC::GPRCRegisterClass); 5369 case 'f': 5370 if (VT == MVT::f32) 5371 return std::make_pair(0U, PPC::F4RCRegisterClass); 5372 else if (VT == MVT::f64) 5373 return std::make_pair(0U, PPC::F8RCRegisterClass); 5374 break; 5375 case 'v': 5376 return std::make_pair(0U, PPC::VRRCRegisterClass); 5377 case 'y': // crrc 5378 return std::make_pair(0U, PPC::CRRCRegisterClass); 5379 } 5380 } 5381 5382 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 5383} 5384 5385 5386/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5387/// vector. If it is invalid, don't add anything to Ops. If hasMemory is true 5388/// it means one of the asm constraint of the inline asm instruction being 5389/// processed is 'm'. 5390void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter, 5391 bool hasMemory, 5392 std::vector<SDValue>&Ops, 5393 SelectionDAG &DAG) const { 5394 SDValue Result(0,0); 5395 switch (Letter) { 5396 default: break; 5397 case 'I': 5398 case 'J': 5399 case 'K': 5400 case 'L': 5401 case 'M': 5402 case 'N': 5403 case 'O': 5404 case 'P': { 5405 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 5406 if (!CST) return; // Must be an immediate to match. 5407 unsigned Value = CST->getZExtValue(); 5408 switch (Letter) { 5409 default: llvm_unreachable("Unknown constraint letter!"); 5410 case 'I': // "I" is a signed 16-bit constant. 5411 if ((short)Value == (int)Value) 5412 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5413 break; 5414 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 5415 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 5416 if ((short)Value == 0) 5417 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5418 break; 5419 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 5420 if ((Value >> 16) == 0) 5421 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5422 break; 5423 case 'M': // "M" is a constant that is greater than 31. 5424 if (Value > 31) 5425 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5426 break; 5427 case 'N': // "N" is a positive constant that is an exact power of two. 5428 if ((int)Value > 0 && isPowerOf2_32(Value)) 5429 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5430 break; 5431 case 'O': // "O" is the constant zero. 5432 if (Value == 0) 5433 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5434 break; 5435 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 5436 if ((short)-Value == (int)-Value) 5437 Result = DAG.getTargetConstant(Value, Op.getValueType()); 5438 break; 5439 } 5440 break; 5441 } 5442 } 5443 5444 if (Result.getNode()) { 5445 Ops.push_back(Result); 5446 return; 5447 } 5448 5449 // Handle standard constraint letters. 5450 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG); 5451} 5452 5453// isLegalAddressingMode - Return true if the addressing mode represented 5454// by AM is legal for this target, for a load/store of the specified type. 5455bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 5456 const Type *Ty) const { 5457 // FIXME: PPC does not allow r+i addressing modes for vectors! 5458 5459 // PPC allows a sign-extended 16-bit immediate field. 5460 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 5461 return false; 5462 5463 // No global is ever allowed as a base. 5464 if (AM.BaseGV) 5465 return false; 5466 5467 // PPC only support r+r, 5468 switch (AM.Scale) { 5469 case 0: // "r+i" or just "i", depending on HasBaseReg. 5470 break; 5471 case 1: 5472 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 5473 return false; 5474 // Otherwise we have r+r or r+i. 5475 break; 5476 case 2: 5477 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 5478 return false; 5479 // Allow 2*r as r+r. 5480 break; 5481 default: 5482 // No other scales are supported. 5483 return false; 5484 } 5485 5486 return true; 5487} 5488 5489/// isLegalAddressImmediate - Return true if the integer value can be used 5490/// as the offset of the target addressing mode for load / store of the 5491/// given type. 5492bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{ 5493 // PPC allows a sign-extended 16-bit immediate field. 5494 return (V > -(1 << 16) && V < (1 << 16)-1); 5495} 5496 5497bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { 5498 return false; 5499} 5500 5501SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 5502 SelectionDAG &DAG) const { 5503 DebugLoc dl = Op.getDebugLoc(); 5504 // Depths > 0 not supported yet! 5505 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 5506 return SDValue(); 5507 5508 MachineFunction &MF = DAG.getMachineFunction(); 5509 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 5510 5511 // Just load the return address off the stack. 5512 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 5513 5514 // Make sure the function really does not optimize away the store of the RA 5515 // to the stack. 5516 FuncInfo->setLRStoreRequired(); 5517 return DAG.getLoad(getPointerTy(), dl, 5518 DAG.getEntryNode(), RetAddrFI, NULL, 0, 5519 false, false, 0); 5520} 5521 5522SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 5523 SelectionDAG &DAG) const { 5524 DebugLoc dl = Op.getDebugLoc(); 5525 // Depths > 0 not supported yet! 5526 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 5527 return SDValue(); 5528 5529 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 5530 bool isPPC64 = PtrVT == MVT::i64; 5531 5532 MachineFunction &MF = DAG.getMachineFunction(); 5533 MachineFrameInfo *MFI = MF.getFrameInfo(); 5534 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 5535 && MFI->getStackSize(); 5536 5537 if (isPPC64) 5538 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::X31 : PPC::X1, 5539 MVT::i64); 5540 else 5541 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::R31 : PPC::R1, 5542 MVT::i32); 5543} 5544 5545bool 5546PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 5547 // The PowerPC target isn't yet aware of offsets. 5548 return false; 5549} 5550 5551/// getOptimalMemOpType - Returns the target specific optimal type for load 5552/// and store operations as a result of memset, memcpy, and memmove 5553/// lowering. If DstAlign is zero that means it's safe to destination 5554/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 5555/// means there isn't a need to check it against alignment requirement, 5556/// probably because the source does not need to be loaded. If 5557/// 'NonScalarIntSafe' is true, that means it's safe to return a 5558/// non-scalar-integer type, e.g. empty string source, constant, or loaded 5559/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 5560/// constant so it does not need to be loaded. 5561/// It returns EVT::Other if the type should be determined using generic 5562/// target-independent logic. 5563EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 5564 unsigned DstAlign, unsigned SrcAlign, 5565 bool NonScalarIntSafe, 5566 bool MemcpyStrSrc, 5567 MachineFunction &MF) const { 5568 if (this->PPCSubTarget.isPPC64()) { 5569 return MVT::i64; 5570 } else { 5571 return MVT::i32; 5572 } 5573} 5574