PPCISelLowering.cpp revision ad3b34d1bc4eaa92a95c56fe32fd18a6f36f62f4
1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the PPCISelLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "PPCISelLowering.h" 15#include "MCTargetDesc/PPCPredicates.h" 16#include "PPCMachineFunctionInfo.h" 17#include "PPCPerfectShuffle.h" 18#include "PPCTargetMachine.h" 19#include "PPCTargetObjectFile.h" 20#include "llvm/ADT/STLExtras.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/SelectionDAG.h" 27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28#include "llvm/IR/CallingConv.h" 29#include "llvm/IR/Constants.h" 30#include "llvm/IR/DerivedTypes.h" 31#include "llvm/IR/Function.h" 32#include "llvm/IR/Intrinsics.h" 33#include "llvm/Support/CommandLine.h" 34#include "llvm/Support/ErrorHandling.h" 35#include "llvm/Support/MathExtras.h" 36#include "llvm/Support/raw_ostream.h" 37#include "llvm/Target/TargetOptions.h" 38using namespace llvm; 39 40static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 41cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 42 43static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 44cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 45 46static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 47cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 48 49static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) { 50 if (TM.getSubtargetImpl()->isDarwin()) 51 return new TargetLoweringObjectFileMachO(); 52 53 if (TM.getSubtargetImpl()->isSVR4ABI()) 54 return new PPC64LinuxTargetObjectFile(); 55 56 return new TargetLoweringObjectFileELF(); 57} 58 59PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) 60 : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) { 61 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>(); 62 63 setPow2DivIsCheap(); 64 65 // Use _setjmp/_longjmp instead of setjmp/longjmp. 66 setUseUnderscoreSetJmp(true); 67 setUseUnderscoreLongJmp(true); 68 69 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 70 // arguments are at least 4/8 bytes aligned. 71 bool isPPC64 = Subtarget->isPPC64(); 72 setMinStackArgumentAlignment(isPPC64 ? 8:4); 73 74 // Set up the register classes. 75 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 76 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 77 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 78 79 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 80 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 81 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 82 83 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 84 85 // PowerPC has pre-inc load and store's. 86 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 91 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 92 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 96 97 // This is used in the ppcf128->int sequence. Note it has different semantics 98 // from FP_ROUND: that rounds to nearest, this rounds to zero. 99 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 100 101 // We do not currently implement these libm ops for PowerPC. 102 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 103 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 104 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 105 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 106 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 107 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 108 109 // PowerPC has no SREM/UREM instructions 110 setOperationAction(ISD::SREM, MVT::i32, Expand); 111 setOperationAction(ISD::UREM, MVT::i32, Expand); 112 setOperationAction(ISD::SREM, MVT::i64, Expand); 113 setOperationAction(ISD::UREM, MVT::i64, Expand); 114 115 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 116 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 117 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 118 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 119 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 120 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 121 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 122 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 123 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 124 125 // We don't support sin/cos/sqrt/fmod/pow 126 setOperationAction(ISD::FSIN , MVT::f64, Expand); 127 setOperationAction(ISD::FCOS , MVT::f64, Expand); 128 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 129 setOperationAction(ISD::FREM , MVT::f64, Expand); 130 setOperationAction(ISD::FPOW , MVT::f64, Expand); 131 setOperationAction(ISD::FMA , MVT::f64, Legal); 132 setOperationAction(ISD::FSIN , MVT::f32, Expand); 133 setOperationAction(ISD::FCOS , MVT::f32, Expand); 134 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 135 setOperationAction(ISD::FREM , MVT::f32, Expand); 136 setOperationAction(ISD::FPOW , MVT::f32, Expand); 137 setOperationAction(ISD::FMA , MVT::f32, Legal); 138 139 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 140 141 // If we're enabling GP optimizations, use hardware square root 142 if (!Subtarget->hasFSQRT() && 143 !(TM.Options.UnsafeFPMath && 144 Subtarget->hasFRSQRTE() && Subtarget->hasFRE())) 145 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 146 147 if (!Subtarget->hasFSQRT() && 148 !(TM.Options.UnsafeFPMath && 149 Subtarget->hasFRSQRTES() && Subtarget->hasFRES())) 150 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 151 152 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 153 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 154 155 if (Subtarget->hasFPRND()) { 156 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 157 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 158 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 159 160 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 161 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 162 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 163 164 // frin does not implement "ties to even." Thus, this is safe only in 165 // fast-math mode. 166 if (TM.Options.UnsafeFPMath) { 167 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 168 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 169 170 // These need to set FE_INEXACT, and use a custom inserter. 171 setOperationAction(ISD::FRINT, MVT::f64, Legal); 172 setOperationAction(ISD::FRINT, MVT::f32, Legal); 173 } 174 } 175 176 // PowerPC does not have BSWAP, CTPOP or CTTZ 177 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 178 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 179 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 180 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 181 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 182 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 183 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 184 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 185 186 if (Subtarget->hasPOPCNTD()) { 187 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 188 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 189 } else { 190 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 191 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 192 } 193 194 // PowerPC does not have ROTR 195 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 196 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 197 198 // PowerPC does not have Select 199 setOperationAction(ISD::SELECT, MVT::i32, Expand); 200 setOperationAction(ISD::SELECT, MVT::i64, Expand); 201 setOperationAction(ISD::SELECT, MVT::f32, Expand); 202 setOperationAction(ISD::SELECT, MVT::f64, Expand); 203 204 // PowerPC wants to turn select_cc of FP into fsel when possible. 205 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 206 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 207 208 // PowerPC wants to optimize integer setcc a bit 209 setOperationAction(ISD::SETCC, MVT::i32, Custom); 210 211 // PowerPC does not have BRCOND which requires SetCC 212 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 213 214 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 215 216 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 217 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 218 219 // PowerPC does not have [U|S]INT_TO_FP 220 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 221 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 222 223 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 224 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 225 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 226 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 227 228 // We cannot sextinreg(i1). Expand to shifts. 229 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 230 231 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 232 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 233 // support continuation, user-level threading, and etc.. As a result, no 234 // other SjLj exception interfaces are implemented and please don't build 235 // your own exception handling based on them. 236 // LLVM/Clang supports zero-cost DWARF exception handling. 237 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 238 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 239 240 // We want to legalize GlobalAddress and ConstantPool nodes into the 241 // appropriate instructions to materialize the address. 242 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 243 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 244 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 245 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 246 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 247 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 248 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 249 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 250 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 251 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 252 253 // TRAP is legal. 254 setOperationAction(ISD::TRAP, MVT::Other, Legal); 255 256 // TRAMPOLINE is custom lowered. 257 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 258 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 259 260 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 261 setOperationAction(ISD::VASTART , MVT::Other, Custom); 262 263 if (Subtarget->isSVR4ABI()) { 264 if (isPPC64) { 265 // VAARG always uses double-word chunks, so promote anything smaller. 266 setOperationAction(ISD::VAARG, MVT::i1, Promote); 267 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 268 setOperationAction(ISD::VAARG, MVT::i8, Promote); 269 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 270 setOperationAction(ISD::VAARG, MVT::i16, Promote); 271 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 272 setOperationAction(ISD::VAARG, MVT::i32, Promote); 273 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 274 setOperationAction(ISD::VAARG, MVT::Other, Expand); 275 } else { 276 // VAARG is custom lowered with the 32-bit SVR4 ABI. 277 setOperationAction(ISD::VAARG, MVT::Other, Custom); 278 setOperationAction(ISD::VAARG, MVT::i64, Custom); 279 } 280 } else 281 setOperationAction(ISD::VAARG, MVT::Other, Expand); 282 283 // Use the default implementation. 284 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 285 setOperationAction(ISD::VAEND , MVT::Other, Expand); 286 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 287 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 288 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 289 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 290 291 // We want to custom lower some of our intrinsics. 292 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 293 294 // To handle counter-based loop conditions. 295 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 296 297 // Comparisons that require checking two conditions. 298 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 299 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 300 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 301 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 302 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 303 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 304 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 305 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 306 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 307 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 308 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 309 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 310 311 if (Subtarget->has64BitSupport()) { 312 // They also have instructions for converting between i64 and fp. 313 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 314 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 315 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 316 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 317 // This is just the low 32 bits of a (signed) fp->i64 conversion. 318 // We cannot do this with Promote because i64 is not a legal type. 319 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 320 321 if (PPCSubTarget.hasLFIWAX() || Subtarget->isPPC64()) 322 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 323 } else { 324 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 325 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 326 } 327 328 // With the instructions enabled under FPCVT, we can do everything. 329 if (PPCSubTarget.hasFPCVT()) { 330 if (Subtarget->has64BitSupport()) { 331 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 332 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 333 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 334 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 335 } 336 337 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 338 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 339 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 340 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 341 } 342 343 if (Subtarget->use64BitRegs()) { 344 // 64-bit PowerPC implementations can support i64 types directly 345 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 346 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 347 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 348 // 64-bit PowerPC wants to expand i128 shifts itself. 349 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 350 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 351 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 352 } else { 353 // 32-bit PowerPC wants to expand i64 shifts itself. 354 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 355 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 356 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 357 } 358 359 if (Subtarget->hasAltivec()) { 360 // First set operation action for all vector types to expand. Then we 361 // will selectively turn on ones that can be effectively codegen'd. 362 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 363 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 364 MVT::SimpleValueType VT = (MVT::SimpleValueType)i; 365 366 // add/sub are legal for all supported vector VT's. 367 setOperationAction(ISD::ADD , VT, Legal); 368 setOperationAction(ISD::SUB , VT, Legal); 369 370 // We promote all shuffles to v16i8. 371 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 372 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 373 374 // We promote all non-typed operations to v4i32. 375 setOperationAction(ISD::AND , VT, Promote); 376 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 377 setOperationAction(ISD::OR , VT, Promote); 378 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 379 setOperationAction(ISD::XOR , VT, Promote); 380 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 381 setOperationAction(ISD::LOAD , VT, Promote); 382 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 383 setOperationAction(ISD::SELECT, VT, Promote); 384 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 385 setOperationAction(ISD::STORE, VT, Promote); 386 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 387 388 // No other operations are legal. 389 setOperationAction(ISD::MUL , VT, Expand); 390 setOperationAction(ISD::SDIV, VT, Expand); 391 setOperationAction(ISD::SREM, VT, Expand); 392 setOperationAction(ISD::UDIV, VT, Expand); 393 setOperationAction(ISD::UREM, VT, Expand); 394 setOperationAction(ISD::FDIV, VT, Expand); 395 setOperationAction(ISD::FREM, VT, Expand); 396 setOperationAction(ISD::FNEG, VT, Expand); 397 setOperationAction(ISD::FSQRT, VT, Expand); 398 setOperationAction(ISD::FLOG, VT, Expand); 399 setOperationAction(ISD::FLOG10, VT, Expand); 400 setOperationAction(ISD::FLOG2, VT, Expand); 401 setOperationAction(ISD::FEXP, VT, Expand); 402 setOperationAction(ISD::FEXP2, VT, Expand); 403 setOperationAction(ISD::FSIN, VT, Expand); 404 setOperationAction(ISD::FCOS, VT, Expand); 405 setOperationAction(ISD::FABS, VT, Expand); 406 setOperationAction(ISD::FPOWI, VT, Expand); 407 setOperationAction(ISD::FFLOOR, VT, Expand); 408 setOperationAction(ISD::FCEIL, VT, Expand); 409 setOperationAction(ISD::FTRUNC, VT, Expand); 410 setOperationAction(ISD::FRINT, VT, Expand); 411 setOperationAction(ISD::FNEARBYINT, VT, Expand); 412 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 413 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 414 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 415 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 416 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 417 setOperationAction(ISD::UDIVREM, VT, Expand); 418 setOperationAction(ISD::SDIVREM, VT, Expand); 419 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 420 setOperationAction(ISD::FPOW, VT, Expand); 421 setOperationAction(ISD::CTPOP, VT, Expand); 422 setOperationAction(ISD::CTLZ, VT, Expand); 423 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 424 setOperationAction(ISD::CTTZ, VT, Expand); 425 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 426 setOperationAction(ISD::VSELECT, VT, Expand); 427 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 428 429 for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 430 j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { 431 MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; 432 setTruncStoreAction(VT, InnerVT, Expand); 433 } 434 setLoadExtAction(ISD::SEXTLOAD, VT, Expand); 435 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); 436 setLoadExtAction(ISD::EXTLOAD, VT, Expand); 437 } 438 439 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 440 // with merges, splats, etc. 441 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 442 443 setOperationAction(ISD::AND , MVT::v4i32, Legal); 444 setOperationAction(ISD::OR , MVT::v4i32, Legal); 445 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 446 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 447 setOperationAction(ISD::SELECT, MVT::v4i32, Expand); 448 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 449 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 450 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 451 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 452 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 453 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 454 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 455 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 456 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 457 458 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 459 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 460 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 461 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 462 463 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 464 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 465 466 if (TM.Options.UnsafeFPMath) { 467 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 468 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 469 } 470 471 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 472 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 473 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 474 475 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 476 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 477 478 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 479 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 480 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 481 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 482 483 // Altivec does not contain unordered floating-point compare instructions 484 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 485 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 486 setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); 487 setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); 488 setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); 489 setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); 490 } 491 492 if (Subtarget->has64BitSupport()) { 493 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 494 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 495 } 496 497 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); 498 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); 499 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 500 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 501 502 setBooleanContents(ZeroOrOneBooleanContent); 503 // Altivec instructions set fields to all zeros or all ones. 504 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 505 506 if (isPPC64) { 507 setStackPointerRegisterToSaveRestore(PPC::X1); 508 setExceptionPointerRegister(PPC::X3); 509 setExceptionSelectorRegister(PPC::X4); 510 } else { 511 setStackPointerRegisterToSaveRestore(PPC::R1); 512 setExceptionPointerRegister(PPC::R3); 513 setExceptionSelectorRegister(PPC::R4); 514 } 515 516 // We have target-specific dag combine patterns for the following nodes: 517 setTargetDAGCombine(ISD::SINT_TO_FP); 518 setTargetDAGCombine(ISD::LOAD); 519 setTargetDAGCombine(ISD::STORE); 520 setTargetDAGCombine(ISD::BR_CC); 521 setTargetDAGCombine(ISD::BSWAP); 522 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 523 524 // Use reciprocal estimates. 525 if (TM.Options.UnsafeFPMath) { 526 setTargetDAGCombine(ISD::FDIV); 527 setTargetDAGCombine(ISD::FSQRT); 528 } 529 530 // Darwin long double math library functions have $LDBL128 appended. 531 if (Subtarget->isDarwin()) { 532 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 533 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 534 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 535 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 536 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 537 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 538 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 539 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 540 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 541 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 542 } 543 544 setMinFunctionAlignment(2); 545 if (PPCSubTarget.isDarwin()) 546 setPrefFunctionAlignment(4); 547 548 if (isPPC64 && Subtarget->isJITCodeModel()) 549 // Temporary workaround for the inability of PPC64 JIT to handle jump 550 // tables. 551 setSupportJumpTables(false); 552 553 setInsertFencesForAtomic(true); 554 555 setSchedulingPreference(Sched::Hybrid); 556 557 computeRegisterProperties(); 558 559 // The Freescale cores does better with aggressive inlining of memcpy and 560 // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). 561 if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc || 562 Subtarget->getDarwinDirective() == PPC::DIR_E5500) { 563 MaxStoresPerMemset = 32; 564 MaxStoresPerMemsetOptSize = 16; 565 MaxStoresPerMemcpy = 32; 566 MaxStoresPerMemcpyOptSize = 8; 567 MaxStoresPerMemmove = 32; 568 MaxStoresPerMemmoveOptSize = 8; 569 570 setPrefFunctionAlignment(4); 571 } 572} 573 574/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 575/// function arguments in the caller parameter area. 576unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const { 577 const TargetMachine &TM = getTargetMachine(); 578 // Darwin passes everything on 4 byte boundary. 579 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) 580 return 4; 581 582 // 16byte and wider vectors are passed on 16byte boundary. 583 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) 584 if (VTy->getBitWidth() >= 128) 585 return 16; 586 587 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 588 if (PPCSubTarget.isPPC64()) 589 return 8; 590 591 return 4; 592} 593 594const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 595 switch (Opcode) { 596 default: return 0; 597 case PPCISD::FSEL: return "PPCISD::FSEL"; 598 case PPCISD::FCFID: return "PPCISD::FCFID"; 599 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 600 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 601 case PPCISD::FRE: return "PPCISD::FRE"; 602 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 603 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 604 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 605 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 606 case PPCISD::VPERM: return "PPCISD::VPERM"; 607 case PPCISD::Hi: return "PPCISD::Hi"; 608 case PPCISD::Lo: return "PPCISD::Lo"; 609 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 610 case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; 611 case PPCISD::LOAD: return "PPCISD::LOAD"; 612 case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; 613 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 614 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 615 case PPCISD::SRL: return "PPCISD::SRL"; 616 case PPCISD::SRA: return "PPCISD::SRA"; 617 case PPCISD::SHL: return "PPCISD::SHL"; 618 case PPCISD::CALL: return "PPCISD::CALL"; 619 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 620 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 621 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 622 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 623 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 624 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 625 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 626 case PPCISD::VCMP: return "PPCISD::VCMP"; 627 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 628 case PPCISD::LBRX: return "PPCISD::LBRX"; 629 case PPCISD::STBRX: return "PPCISD::STBRX"; 630 case PPCISD::LARX: return "PPCISD::LARX"; 631 case PPCISD::STCX: return "PPCISD::STCX"; 632 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 633 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 634 case PPCISD::BDZ: return "PPCISD::BDZ"; 635 case PPCISD::MFFS: return "PPCISD::MFFS"; 636 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 637 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 638 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 639 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 640 case PPCISD::ADDIS_TOC_HA: return "PPCISD::ADDIS_TOC_HA"; 641 case PPCISD::LD_TOC_L: return "PPCISD::LD_TOC_L"; 642 case PPCISD::ADDI_TOC_L: return "PPCISD::ADDI_TOC_L"; 643 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 644 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 645 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 646 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 647 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 648 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 649 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 650 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 651 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 652 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 653 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 654 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 655 case PPCISD::SC: return "PPCISD::SC"; 656 } 657} 658 659EVT PPCTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 660 if (!VT.isVector()) 661 return MVT::i32; 662 return VT.changeVectorElementTypeToInteger(); 663} 664 665//===----------------------------------------------------------------------===// 666// Node matching predicates, for use by the tblgen matching code. 667//===----------------------------------------------------------------------===// 668 669/// isFloatingPointZero - Return true if this is 0.0 or -0.0. 670static bool isFloatingPointZero(SDValue Op) { 671 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 672 return CFP->getValueAPF().isZero(); 673 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 674 // Maybe this has already been legalized into the constant pool? 675 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 676 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 677 return CFP->getValueAPF().isZero(); 678 } 679 return false; 680} 681 682/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 683/// true if Op is undef or if it matches the specified value. 684static bool isConstantOrUndef(int Op, int Val) { 685 return Op < 0 || Op == Val; 686} 687 688/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 689/// VPKUHUM instruction. 690bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 691 if (!isUnary) { 692 for (unsigned i = 0; i != 16; ++i) 693 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 694 return false; 695 } else { 696 for (unsigned i = 0; i != 8; ++i) 697 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) || 698 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1)) 699 return false; 700 } 701 return true; 702} 703 704/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 705/// VPKUWUM instruction. 706bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) { 707 if (!isUnary) { 708 for (unsigned i = 0; i != 16; i += 2) 709 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 710 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 711 return false; 712 } else { 713 for (unsigned i = 0; i != 8; i += 2) 714 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 715 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) || 716 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) || 717 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3)) 718 return false; 719 } 720 return true; 721} 722 723/// isVMerge - Common function, used to match vmrg* shuffles. 724/// 725static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 726 unsigned LHSStart, unsigned RHSStart) { 727 assert(N->getValueType(0) == MVT::v16i8 && 728 "PPC only supports shuffles by bytes!"); 729 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 730 "Unsupported merge size!"); 731 732 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 733 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 734 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 735 LHSStart+j+i*UnitSize) || 736 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 737 RHSStart+j+i*UnitSize)) 738 return false; 739 } 740 return true; 741} 742 743/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 744/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 745bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 746 bool isUnary) { 747 if (!isUnary) 748 return isVMerge(N, UnitSize, 8, 24); 749 return isVMerge(N, UnitSize, 8, 8); 750} 751 752/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 753/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 754bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 755 bool isUnary) { 756 if (!isUnary) 757 return isVMerge(N, UnitSize, 0, 16); 758 return isVMerge(N, UnitSize, 0, 0); 759} 760 761 762/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 763/// amount, otherwise return -1. 764int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) { 765 assert(N->getValueType(0) == MVT::v16i8 && 766 "PPC only supports shuffles by bytes!"); 767 768 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 769 770 // Find the first non-undef value in the shuffle mask. 771 unsigned i; 772 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 773 /*search*/; 774 775 if (i == 16) return -1; // all undef. 776 777 // Otherwise, check to see if the rest of the elements are consecutively 778 // numbered from this value. 779 unsigned ShiftAmt = SVOp->getMaskElt(i); 780 if (ShiftAmt < i) return -1; 781 ShiftAmt -= i; 782 783 if (!isUnary) { 784 // Check the rest of the elements to see if they are consecutive. 785 for (++i; i != 16; ++i) 786 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 787 return -1; 788 } else { 789 // Check the rest of the elements to see if they are consecutive. 790 for (++i; i != 16; ++i) 791 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 792 return -1; 793 } 794 return ShiftAmt; 795} 796 797/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 798/// specifies a splat of a single element that is suitable for input to 799/// VSPLTB/VSPLTH/VSPLTW. 800bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 801 assert(N->getValueType(0) == MVT::v16i8 && 802 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 803 804 // This is a splat operation if each element of the permute is the same, and 805 // if the value doesn't reference the second vector. 806 unsigned ElementBase = N->getMaskElt(0); 807 808 // FIXME: Handle UNDEF elements too! 809 if (ElementBase >= 16) 810 return false; 811 812 // Check that the indices are consecutive, in the case of a multi-byte element 813 // splatted with a v16i8 mask. 814 for (unsigned i = 1; i != EltSize; ++i) 815 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 816 return false; 817 818 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 819 if (N->getMaskElt(i) < 0) continue; 820 for (unsigned j = 0; j != EltSize; ++j) 821 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 822 return false; 823 } 824 return true; 825} 826 827/// isAllNegativeZeroVector - Returns true if all elements of build_vector 828/// are -0.0. 829bool PPC::isAllNegativeZeroVector(SDNode *N) { 830 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N); 831 832 APInt APVal, APUndef; 833 unsigned BitSize; 834 bool HasAnyUndefs; 835 836 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) 837 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) 838 return CFP->getValueAPF().isNegZero(); 839 840 return false; 841} 842 843/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 844/// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 845unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { 846 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 847 assert(isSplatShuffleMask(SVOp, EltSize)); 848 return SVOp->getMaskElt(0) / EltSize; 849} 850 851/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 852/// by using a vspltis[bhw] instruction of the specified element size, return 853/// the constant being splatted. The ByteSize field indicates the number of 854/// bytes of each element [124] -> [bhw]. 855SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 856 SDValue OpVal(0, 0); 857 858 // If ByteSize of the splat is bigger than the element size of the 859 // build_vector, then we have a case where we are checking for a splat where 860 // multiple elements of the buildvector are folded together into a single 861 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 862 unsigned EltSize = 16/N->getNumOperands(); 863 if (EltSize < ByteSize) { 864 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 865 SDValue UniquedVals[4]; 866 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 867 868 // See if all of the elements in the buildvector agree across. 869 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 870 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 871 // If the element isn't a constant, bail fully out. 872 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 873 874 875 if (UniquedVals[i&(Multiple-1)].getNode() == 0) 876 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 877 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 878 return SDValue(); // no match. 879 } 880 881 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 882 // either constant or undef values that are identical for each chunk. See 883 // if these chunks can form into a larger vspltis*. 884 885 // Check to see if all of the leading entries are either 0 or -1. If 886 // neither, then this won't fit into the immediate field. 887 bool LeadingZero = true; 888 bool LeadingOnes = true; 889 for (unsigned i = 0; i != Multiple-1; ++i) { 890 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs. 891 892 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue(); 893 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue(); 894 } 895 // Finally, check the least significant entry. 896 if (LeadingZero) { 897 if (UniquedVals[Multiple-1].getNode() == 0) 898 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef 899 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 900 if (Val < 16) 901 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4) 902 } 903 if (LeadingOnes) { 904 if (UniquedVals[Multiple-1].getNode() == 0) 905 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef 906 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 907 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 908 return DAG.getTargetConstant(Val, MVT::i32); 909 } 910 911 return SDValue(); 912 } 913 914 // Check to see if this buildvec has a single non-undef value in its elements. 915 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 916 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 917 if (OpVal.getNode() == 0) 918 OpVal = N->getOperand(i); 919 else if (OpVal != N->getOperand(i)) 920 return SDValue(); 921 } 922 923 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def. 924 925 unsigned ValSizeInBytes = EltSize; 926 uint64_t Value = 0; 927 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 928 Value = CN->getZExtValue(); 929 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 930 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 931 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 932 } 933 934 // If the splat value is larger than the element value, then we can never do 935 // this splat. The only case that we could fit the replicated bits into our 936 // immediate field for would be zero, and we prefer to use vxor for it. 937 if (ValSizeInBytes < ByteSize) return SDValue(); 938 939 // If the element value is larger than the splat value, cut it in half and 940 // check to see if the two halves are equal. Continue doing this until we 941 // get to ByteSize. This allows us to handle 0x01010101 as 0x01. 942 while (ValSizeInBytes > ByteSize) { 943 ValSizeInBytes >>= 1; 944 945 // If the top half equals the bottom half, we're still ok. 946 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != 947 (Value & ((1 << (8*ValSizeInBytes))-1))) 948 return SDValue(); 949 } 950 951 // Properly sign extend the value. 952 int MaskVal = SignExtend32(Value, ByteSize * 8); 953 954 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 955 if (MaskVal == 0) return SDValue(); 956 957 // Finally, if this value fits in a 5 bit sext field, return it 958 if (SignExtend32<5>(MaskVal) == MaskVal) 959 return DAG.getTargetConstant(MaskVal, MVT::i32); 960 return SDValue(); 961} 962 963//===----------------------------------------------------------------------===// 964// Addressing Mode Selection 965//===----------------------------------------------------------------------===// 966 967/// isIntS16Immediate - This method tests to see if the node is either a 32-bit 968/// or 64-bit immediate, and if the value can be accurately represented as a 969/// sign extension from a 16-bit value. If so, this returns true and the 970/// immediate. 971static bool isIntS16Immediate(SDNode *N, short &Imm) { 972 if (N->getOpcode() != ISD::Constant) 973 return false; 974 975 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 976 if (N->getValueType(0) == MVT::i32) 977 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 978 else 979 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 980} 981static bool isIntS16Immediate(SDValue Op, short &Imm) { 982 return isIntS16Immediate(Op.getNode(), Imm); 983} 984 985 986/// SelectAddressRegReg - Given the specified addressed, check to see if it 987/// can be represented as an indexed [r+r] operation. Returns false if it 988/// can be more efficiently represented with [r+imm]. 989bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 990 SDValue &Index, 991 SelectionDAG &DAG) const { 992 short imm = 0; 993 if (N.getOpcode() == ISD::ADD) { 994 if (isIntS16Immediate(N.getOperand(1), imm)) 995 return false; // r+i 996 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 997 return false; // r+i 998 999 Base = N.getOperand(0); 1000 Index = N.getOperand(1); 1001 return true; 1002 } else if (N.getOpcode() == ISD::OR) { 1003 if (isIntS16Immediate(N.getOperand(1), imm)) 1004 return false; // r+i can fold it if we can. 1005 1006 // If this is an or of disjoint bitfields, we can codegen this as an add 1007 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1008 // disjoint. 1009 APInt LHSKnownZero, LHSKnownOne; 1010 APInt RHSKnownZero, RHSKnownOne; 1011 DAG.ComputeMaskedBits(N.getOperand(0), 1012 LHSKnownZero, LHSKnownOne); 1013 1014 if (LHSKnownZero.getBoolValue()) { 1015 DAG.ComputeMaskedBits(N.getOperand(1), 1016 RHSKnownZero, RHSKnownOne); 1017 // If all of the bits are known zero on the LHS or RHS, the add won't 1018 // carry. 1019 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1020 Base = N.getOperand(0); 1021 Index = N.getOperand(1); 1022 return true; 1023 } 1024 } 1025 } 1026 1027 return false; 1028} 1029 1030/// Returns true if the address N can be represented by a base register plus 1031/// a signed 16-bit displacement [r+imm], and if it is not better 1032/// represented as reg+reg. If Aligned is true, only accept displacements 1033/// suitable for STD and friends, i.e. multiples of 4. 1034bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1035 SDValue &Base, 1036 SelectionDAG &DAG, 1037 bool Aligned) const { 1038 // FIXME dl should come from parent load or store, not from address 1039 SDLoc dl(N); 1040 // If this can be more profitably realized as r+r, fail. 1041 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1042 return false; 1043 1044 if (N.getOpcode() == ISD::ADD) { 1045 short imm = 0; 1046 if (isIntS16Immediate(N.getOperand(1), imm) && 1047 (!Aligned || (imm & 3) == 0)) { 1048 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1049 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1050 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1051 } else { 1052 Base = N.getOperand(0); 1053 } 1054 return true; // [r+i] 1055 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1056 // Match LOAD (ADD (X, Lo(G))). 1057 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1058 && "Cannot handle constant offsets yet!"); 1059 Disp = N.getOperand(1).getOperand(0); // The global address. 1060 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1061 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1062 Disp.getOpcode() == ISD::TargetConstantPool || 1063 Disp.getOpcode() == ISD::TargetJumpTable); 1064 Base = N.getOperand(0); 1065 return true; // [&g+r] 1066 } 1067 } else if (N.getOpcode() == ISD::OR) { 1068 short imm = 0; 1069 if (isIntS16Immediate(N.getOperand(1), imm) && 1070 (!Aligned || (imm & 3) == 0)) { 1071 // If this is an or of disjoint bitfields, we can codegen this as an add 1072 // (for better address arithmetic) if the LHS and RHS of the OR are 1073 // provably disjoint. 1074 APInt LHSKnownZero, LHSKnownOne; 1075 DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1076 1077 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1078 // If all of the bits are known zero on the LHS or RHS, the add won't 1079 // carry. 1080 Base = N.getOperand(0); 1081 Disp = DAG.getTargetConstant(imm, N.getValueType()); 1082 return true; 1083 } 1084 } 1085 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1086 // Loading from a constant address. 1087 1088 // If this address fits entirely in a 16-bit sext immediate field, codegen 1089 // this as "d, 0" 1090 short Imm; 1091 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1092 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0)); 1093 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1094 CN->getValueType(0)); 1095 return true; 1096 } 1097 1098 // Handle 32-bit sext immediates with LIS + addr mode. 1099 if ((CN->getValueType(0) == MVT::i32 || 1100 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1101 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1102 int Addr = (int)CN->getZExtValue(); 1103 1104 // Otherwise, break this down into an LIS + disp. 1105 Disp = DAG.getTargetConstant((short)Addr, MVT::i32); 1106 1107 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); 1108 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1109 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1110 return true; 1111 } 1112 } 1113 1114 Disp = DAG.getTargetConstant(0, getPointerTy()); 1115 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) 1116 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1117 else 1118 Base = N; 1119 return true; // [r+0] 1120} 1121 1122/// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1123/// represented as an indexed [r+r] operation. 1124bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1125 SDValue &Index, 1126 SelectionDAG &DAG) const { 1127 // Check to see if we can easily represent this as an [r+r] address. This 1128 // will fail if it thinks that the address is more profitably represented as 1129 // reg+imm, e.g. where imm = 0. 1130 if (SelectAddressRegReg(N, Base, Index, DAG)) 1131 return true; 1132 1133 // If the operand is an addition, always emit this as [r+r], since this is 1134 // better (for code size, and execution, as the memop does the add for free) 1135 // than emitting an explicit add. 1136 if (N.getOpcode() == ISD::ADD) { 1137 Base = N.getOperand(0); 1138 Index = N.getOperand(1); 1139 return true; 1140 } 1141 1142 // Otherwise, do it the hard way, using R0 as the base register. 1143 Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1144 N.getValueType()); 1145 Index = N; 1146 return true; 1147} 1148 1149/// getPreIndexedAddressParts - returns true by value, base pointer and 1150/// offset pointer and addressing mode by reference if the node's address 1151/// can be legally represented as pre-indexed load / store address. 1152bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1153 SDValue &Offset, 1154 ISD::MemIndexedMode &AM, 1155 SelectionDAG &DAG) const { 1156 if (DisablePPCPreinc) return false; 1157 1158 bool isLoad = true; 1159 SDValue Ptr; 1160 EVT VT; 1161 unsigned Alignment; 1162 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1163 Ptr = LD->getBasePtr(); 1164 VT = LD->getMemoryVT(); 1165 Alignment = LD->getAlignment(); 1166 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1167 Ptr = ST->getBasePtr(); 1168 VT = ST->getMemoryVT(); 1169 Alignment = ST->getAlignment(); 1170 isLoad = false; 1171 } else 1172 return false; 1173 1174 // PowerPC doesn't have preinc load/store instructions for vectors. 1175 if (VT.isVector()) 1176 return false; 1177 1178 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1179 1180 // Common code will reject creating a pre-inc form if the base pointer 1181 // is a frame index, or if N is a store and the base pointer is either 1182 // the same as or a predecessor of the value being stored. Check for 1183 // those situations here, and try with swapped Base/Offset instead. 1184 bool Swap = false; 1185 1186 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1187 Swap = true; 1188 else if (!isLoad) { 1189 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1190 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1191 Swap = true; 1192 } 1193 1194 if (Swap) 1195 std::swap(Base, Offset); 1196 1197 AM = ISD::PRE_INC; 1198 return true; 1199 } 1200 1201 // LDU/STU can only handle immediates that are a multiple of 4. 1202 if (VT != MVT::i64) { 1203 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1204 return false; 1205 } else { 1206 // LDU/STU need an address with at least 4-byte alignment. 1207 if (Alignment < 4) 1208 return false; 1209 1210 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1211 return false; 1212 } 1213 1214 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1215 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1216 // sext i32 to i64 when addr mode is r+i. 1217 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1218 LD->getExtensionType() == ISD::SEXTLOAD && 1219 isa<ConstantSDNode>(Offset)) 1220 return false; 1221 } 1222 1223 AM = ISD::PRE_INC; 1224 return true; 1225} 1226 1227//===----------------------------------------------------------------------===// 1228// LowerOperation implementation 1229//===----------------------------------------------------------------------===// 1230 1231/// GetLabelAccessInfo - Return true if we should reference labels using a 1232/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1233static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags, 1234 unsigned &LoOpFlags, const GlobalValue *GV = 0) { 1235 HiOpFlags = PPCII::MO_HA; 1236 LoOpFlags = PPCII::MO_LO; 1237 1238 // Don't use the pic base if not in PIC relocation model. Or if we are on a 1239 // non-darwin platform. We don't support PIC on other platforms yet. 1240 bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && 1241 TM.getSubtarget<PPCSubtarget>().isDarwin(); 1242 if (isPIC) { 1243 HiOpFlags |= PPCII::MO_PIC_FLAG; 1244 LoOpFlags |= PPCII::MO_PIC_FLAG; 1245 } 1246 1247 // If this is a reference to a global value that requires a non-lazy-ptr, make 1248 // sure that instruction lowering adds it. 1249 if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { 1250 HiOpFlags |= PPCII::MO_NLP_FLAG; 1251 LoOpFlags |= PPCII::MO_NLP_FLAG; 1252 1253 if (GV->hasHiddenVisibility()) { 1254 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1255 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1256 } 1257 } 1258 1259 return isPIC; 1260} 1261 1262static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1263 SelectionDAG &DAG) { 1264 EVT PtrVT = HiPart.getValueType(); 1265 SDValue Zero = DAG.getConstant(0, PtrVT); 1266 SDLoc DL(HiPart); 1267 1268 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1269 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1270 1271 // With PIC, the first instruction is actually "GR+hi(&G)". 1272 if (isPIC) 1273 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1274 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1275 1276 // Generate non-pic code that has direct accesses to the constant pool. 1277 // The address of the global is just (hi(&g)+lo(&g)). 1278 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 1279} 1280 1281SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 1282 SelectionDAG &DAG) const { 1283 EVT PtrVT = Op.getValueType(); 1284 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 1285 const Constant *C = CP->getConstVal(); 1286 1287 // 64-bit SVR4 ABI code is always position-independent. 1288 // The actual address of the GlobalValue is stored in the TOC. 1289 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1290 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 1291 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(CP), MVT::i64, GA, 1292 DAG.getRegister(PPC::X2, MVT::i64)); 1293 } 1294 1295 unsigned MOHiFlag, MOLoFlag; 1296 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1297 SDValue CPIHi = 1298 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 1299 SDValue CPILo = 1300 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 1301 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 1302} 1303 1304SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 1305 EVT PtrVT = Op.getValueType(); 1306 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 1307 1308 // 64-bit SVR4 ABI code is always position-independent. 1309 // The actual address of the GlobalValue is stored in the TOC. 1310 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1311 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1312 return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(JT), MVT::i64, GA, 1313 DAG.getRegister(PPC::X2, MVT::i64)); 1314 } 1315 1316 unsigned MOHiFlag, MOLoFlag; 1317 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1318 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 1319 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 1320 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 1321} 1322 1323SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 1324 SelectionDAG &DAG) const { 1325 EVT PtrVT = Op.getValueType(); 1326 1327 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1328 1329 unsigned MOHiFlag, MOLoFlag; 1330 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); 1331 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 1332 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 1333 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 1334} 1335 1336SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 1337 SelectionDAG &DAG) const { 1338 1339 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 1340 SDLoc dl(GA); 1341 const GlobalValue *GV = GA->getGlobal(); 1342 EVT PtrVT = getPointerTy(); 1343 bool is64bit = PPCSubTarget.isPPC64(); 1344 1345 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 1346 1347 if (Model == TLSModel::LocalExec) { 1348 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1349 PPCII::MO_TPREL_HA); 1350 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1351 PPCII::MO_TPREL_LO); 1352 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 1353 is64bit ? MVT::i64 : MVT::i32); 1354 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 1355 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 1356 } 1357 1358 if (!is64bit) 1359 llvm_unreachable("only local-exec is currently supported for ppc32"); 1360 1361 if (Model == TLSModel::InitialExec) { 1362 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1363 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 1364 PPCII::MO_TLS); 1365 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1366 SDValue TPOffsetHi = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 1367 PtrVT, GOTReg, TGA); 1368 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 1369 PtrVT, TGA, TPOffsetHi); 1370 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 1371 } 1372 1373 if (Model == TLSModel::GeneralDynamic) { 1374 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1375 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1376 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 1377 GOTReg, TGA); 1378 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT, 1379 GOTEntryHi, TGA); 1380 1381 // We need a chain node, and don't have one handy. The underlying 1382 // call has no side effects, so using the function entry node 1383 // suffices. 1384 SDValue Chain = DAG.getEntryNode(); 1385 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1386 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1387 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl, 1388 PtrVT, ParmReg, TGA); 1389 // The return value from GET_TLS_ADDR really is in X3 already, but 1390 // some hacks are needed here to tie everything together. The extra 1391 // copies dissolve during subsequent transforms. 1392 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1393 return DAG.getCopyFromReg(Chain, dl, PPC::X3, PtrVT); 1394 } 1395 1396 if (Model == TLSModel::LocalDynamic) { 1397 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 1398 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 1399 SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 1400 GOTReg, TGA); 1401 SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT, 1402 GOTEntryHi, TGA); 1403 1404 // We need a chain node, and don't have one handy. The underlying 1405 // call has no side effects, so using the function entry node 1406 // suffices. 1407 SDValue Chain = DAG.getEntryNode(); 1408 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry); 1409 SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64); 1410 SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl, 1411 PtrVT, ParmReg, TGA); 1412 // The return value from GET_TLSLD_ADDR really is in X3 already, but 1413 // some hacks are needed here to tie everything together. The extra 1414 // copies dissolve during subsequent transforms. 1415 Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr); 1416 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT, 1417 Chain, ParmReg, TGA); 1418 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 1419 } 1420 1421 llvm_unreachable("Unknown TLS model!"); 1422} 1423 1424SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 1425 SelectionDAG &DAG) const { 1426 EVT PtrVT = Op.getValueType(); 1427 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 1428 SDLoc DL(GSDN); 1429 const GlobalValue *GV = GSDN->getGlobal(); 1430 1431 // 64-bit SVR4 ABI code is always position-independent. 1432 // The actual address of the GlobalValue is stored in the TOC. 1433 if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { 1434 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 1435 return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA, 1436 DAG.getRegister(PPC::X2, MVT::i64)); 1437 } 1438 1439 unsigned MOHiFlag, MOLoFlag; 1440 bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV); 1441 1442 SDValue GAHi = 1443 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 1444 SDValue GALo = 1445 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 1446 1447 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 1448 1449 // If the global reference is actually to a non-lazy-pointer, we have to do an 1450 // extra load to get the address of the global. 1451 if (MOHiFlag & PPCII::MO_NLP_FLAG) 1452 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 1453 false, false, false, 0); 1454 return Ptr; 1455} 1456 1457SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 1458 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1459 SDLoc dl(Op); 1460 1461 // If we're comparing for equality to zero, expose the fact that this is 1462 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 1463 // fold the new nodes. 1464 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1465 if (C->isNullValue() && CC == ISD::SETEQ) { 1466 EVT VT = Op.getOperand(0).getValueType(); 1467 SDValue Zext = Op.getOperand(0); 1468 if (VT.bitsLT(MVT::i32)) { 1469 VT = MVT::i32; 1470 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 1471 } 1472 unsigned Log2b = Log2_32(VT.getSizeInBits()); 1473 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 1474 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 1475 DAG.getConstant(Log2b, MVT::i32)); 1476 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 1477 } 1478 // Leave comparisons against 0 and -1 alone for now, since they're usually 1479 // optimized. FIXME: revisit this when we can custom lower all setcc 1480 // optimizations. 1481 if (C->isAllOnesValue() || C->isNullValue()) 1482 return SDValue(); 1483 } 1484 1485 // If we have an integer seteq/setne, turn it into a compare against zero 1486 // by xor'ing the rhs with the lhs, which is faster than setting a 1487 // condition register, reading it back out, and masking the correct bit. The 1488 // normal approach here uses sub to do this instead of xor. Using xor exposes 1489 // the result to other bit-twiddling opportunities. 1490 EVT LHSVT = Op.getOperand(0).getValueType(); 1491 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1492 EVT VT = Op.getValueType(); 1493 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 1494 Op.getOperand(1)); 1495 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC); 1496 } 1497 return SDValue(); 1498} 1499 1500SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 1501 const PPCSubtarget &Subtarget) const { 1502 SDNode *Node = Op.getNode(); 1503 EVT VT = Node->getValueType(0); 1504 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1505 SDValue InChain = Node->getOperand(0); 1506 SDValue VAListPtr = Node->getOperand(1); 1507 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 1508 SDLoc dl(Node); 1509 1510 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 1511 1512 // gpr_index 1513 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1514 VAListPtr, MachinePointerInfo(SV), MVT::i8, 1515 false, false, 0); 1516 InChain = GprIndex.getValue(1); 1517 1518 if (VT == MVT::i64) { 1519 // Check if GprIndex is even 1520 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 1521 DAG.getConstant(1, MVT::i32)); 1522 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 1523 DAG.getConstant(0, MVT::i32), ISD::SETNE); 1524 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 1525 DAG.getConstant(1, MVT::i32)); 1526 // Align GprIndex to be even if it isn't 1527 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 1528 GprIndex); 1529 } 1530 1531 // fpr index is 1 byte after gpr 1532 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1533 DAG.getConstant(1, MVT::i32)); 1534 1535 // fpr 1536 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 1537 FprPtr, MachinePointerInfo(SV), MVT::i8, 1538 false, false, 0); 1539 InChain = FprIndex.getValue(1); 1540 1541 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1542 DAG.getConstant(8, MVT::i32)); 1543 1544 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 1545 DAG.getConstant(4, MVT::i32)); 1546 1547 // areas 1548 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 1549 MachinePointerInfo(), false, false, 1550 false, 0); 1551 InChain = OverflowArea.getValue(1); 1552 1553 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 1554 MachinePointerInfo(), false, false, 1555 false, 0); 1556 InChain = RegSaveArea.getValue(1); 1557 1558 // select overflow_area if index > 8 1559 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 1560 DAG.getConstant(8, MVT::i32), ISD::SETLT); 1561 1562 // adjustment constant gpr_index * 4/8 1563 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 1564 VT.isInteger() ? GprIndex : FprIndex, 1565 DAG.getConstant(VT.isInteger() ? 4 : 8, 1566 MVT::i32)); 1567 1568 // OurReg = RegSaveArea + RegConstant 1569 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 1570 RegConstant); 1571 1572 // Floating types are 32 bytes into RegSaveArea 1573 if (VT.isFloatingPoint()) 1574 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 1575 DAG.getConstant(32, MVT::i32)); 1576 1577 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 1578 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 1579 VT.isInteger() ? GprIndex : FprIndex, 1580 DAG.getConstant(VT == MVT::i64 ? 2 : 1, 1581 MVT::i32)); 1582 1583 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 1584 VT.isInteger() ? VAListPtr : FprPtr, 1585 MachinePointerInfo(SV), 1586 MVT::i8, false, false, 0); 1587 1588 // determine if we should load from reg_save_area or overflow_area 1589 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 1590 1591 // increase overflow_area by 4/8 if gpr/fpr > 8 1592 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 1593 DAG.getConstant(VT.isInteger() ? 4 : 8, 1594 MVT::i32)); 1595 1596 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 1597 OverflowAreaPlusN); 1598 1599 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 1600 OverflowAreaPtr, 1601 MachinePointerInfo(), 1602 MVT::i32, false, false, 0); 1603 1604 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 1605 false, false, false, 0); 1606} 1607 1608SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 1609 SelectionDAG &DAG) const { 1610 return Op.getOperand(0); 1611} 1612 1613SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 1614 SelectionDAG &DAG) const { 1615 SDValue Chain = Op.getOperand(0); 1616 SDValue Trmp = Op.getOperand(1); // trampoline 1617 SDValue FPtr = Op.getOperand(2); // nested function 1618 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 1619 SDLoc dl(Op); 1620 1621 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1622 bool isPPC64 = (PtrVT == MVT::i64); 1623 Type *IntPtrTy = 1624 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( 1625 *DAG.getContext()); 1626 1627 TargetLowering::ArgListTy Args; 1628 TargetLowering::ArgListEntry Entry; 1629 1630 Entry.Ty = IntPtrTy; 1631 Entry.Node = Trmp; Args.push_back(Entry); 1632 1633 // TrampSize == (isPPC64 ? 48 : 40); 1634 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, 1635 isPPC64 ? MVT::i64 : MVT::i32); 1636 Args.push_back(Entry); 1637 1638 Entry.Node = FPtr; Args.push_back(Entry); 1639 Entry.Node = Nest; Args.push_back(Entry); 1640 1641 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 1642 TargetLowering::CallLoweringInfo CLI(Chain, 1643 Type::getVoidTy(*DAG.getContext()), 1644 false, false, false, false, 0, 1645 CallingConv::C, 1646 /*isTailCall=*/false, 1647 /*doesNotRet=*/false, 1648 /*isReturnValueUsed=*/true, 1649 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 1650 Args, DAG, dl); 1651 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 1652 1653 return CallResult.second; 1654} 1655 1656SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 1657 const PPCSubtarget &Subtarget) const { 1658 MachineFunction &MF = DAG.getMachineFunction(); 1659 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1660 1661 SDLoc dl(Op); 1662 1663 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 1664 // vastart just stores the address of the VarArgsFrameIndex slot into the 1665 // memory location argument. 1666 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1667 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 1668 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1669 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 1670 MachinePointerInfo(SV), 1671 false, false, 0); 1672 } 1673 1674 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 1675 // We suppose the given va_list is already allocated. 1676 // 1677 // typedef struct { 1678 // char gpr; /* index into the array of 8 GPRs 1679 // * stored in the register save area 1680 // * gpr=0 corresponds to r3, 1681 // * gpr=1 to r4, etc. 1682 // */ 1683 // char fpr; /* index into the array of 8 FPRs 1684 // * stored in the register save area 1685 // * fpr=0 corresponds to f1, 1686 // * fpr=1 to f2, etc. 1687 // */ 1688 // char *overflow_arg_area; 1689 // /* location on stack that holds 1690 // * the next overflow argument 1691 // */ 1692 // char *reg_save_area; 1693 // /* where r3:r10 and f1:f8 (if saved) 1694 // * are stored 1695 // */ 1696 // } va_list[1]; 1697 1698 1699 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32); 1700 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32); 1701 1702 1703 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1704 1705 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 1706 PtrVT); 1707 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1708 PtrVT); 1709 1710 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 1711 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); 1712 1713 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 1714 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); 1715 1716 uint64_t FPROffset = 1; 1717 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); 1718 1719 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1720 1721 // Store first byte : number of int regs 1722 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 1723 Op.getOperand(1), 1724 MachinePointerInfo(SV), 1725 MVT::i8, false, false, 0); 1726 uint64_t nextOffset = FPROffset; 1727 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 1728 ConstFPROffset); 1729 1730 // Store second byte : number of float regs 1731 SDValue secondStore = 1732 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 1733 MachinePointerInfo(SV, nextOffset), MVT::i8, 1734 false, false, 0); 1735 nextOffset += StackOffset; 1736 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 1737 1738 // Store second word : arguments given on stack 1739 SDValue thirdStore = 1740 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 1741 MachinePointerInfo(SV, nextOffset), 1742 false, false, 0); 1743 nextOffset += FrameOffset; 1744 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 1745 1746 // Store third word : arguments given in registers 1747 return DAG.getStore(thirdStore, dl, FR, nextPtr, 1748 MachinePointerInfo(SV, nextOffset), 1749 false, false, 0); 1750 1751} 1752 1753#include "PPCGenCallingConv.inc" 1754 1755bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 1756 CCValAssign::LocInfo &LocInfo, 1757 ISD::ArgFlagsTy &ArgFlags, 1758 CCState &State) { 1759 return true; 1760} 1761 1762bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 1763 MVT &LocVT, 1764 CCValAssign::LocInfo &LocInfo, 1765 ISD::ArgFlagsTy &ArgFlags, 1766 CCState &State) { 1767 static const uint16_t ArgRegs[] = { 1768 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 1769 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 1770 }; 1771 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1772 1773 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1774 1775 // Skip one register if the first unallocated register has an even register 1776 // number and there are still argument registers available which have not been 1777 // allocated yet. RegNum is actually an index into ArgRegs, which means we 1778 // need to skip a register if RegNum is odd. 1779 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 1780 State.AllocateReg(ArgRegs[RegNum]); 1781 } 1782 1783 // Always return false here, as this function only makes sure that the first 1784 // unallocated register has an odd register number and does not actually 1785 // allocate a register for the current argument. 1786 return false; 1787} 1788 1789bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 1790 MVT &LocVT, 1791 CCValAssign::LocInfo &LocInfo, 1792 ISD::ArgFlagsTy &ArgFlags, 1793 CCState &State) { 1794 static const uint16_t ArgRegs[] = { 1795 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1796 PPC::F8 1797 }; 1798 1799 const unsigned NumArgRegs = array_lengthof(ArgRegs); 1800 1801 unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); 1802 1803 // If there is only one Floating-point register left we need to put both f64 1804 // values of a split ppc_fp128 value on the stack. 1805 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 1806 State.AllocateReg(ArgRegs[RegNum]); 1807 } 1808 1809 // Always return false here, as this function only makes sure that the two f64 1810 // values a ppc_fp128 value is split into are both passed in registers or both 1811 // passed on the stack and does not actually allocate a register for the 1812 // current argument. 1813 return false; 1814} 1815 1816/// GetFPR - Get the set of FP registers that should be allocated for arguments, 1817/// on Darwin. 1818static const uint16_t *GetFPR() { 1819 static const uint16_t FPR[] = { 1820 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 1821 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13 1822 }; 1823 1824 return FPR; 1825} 1826 1827/// CalculateStackSlotSize - Calculates the size reserved for this argument on 1828/// the stack. 1829static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 1830 unsigned PtrByteSize) { 1831 unsigned ArgSize = ArgVT.getSizeInBits()/8; 1832 if (Flags.isByVal()) 1833 ArgSize = Flags.getByValSize(); 1834 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 1835 1836 return ArgSize; 1837} 1838 1839SDValue 1840PPCTargetLowering::LowerFormalArguments(SDValue Chain, 1841 CallingConv::ID CallConv, bool isVarArg, 1842 const SmallVectorImpl<ISD::InputArg> 1843 &Ins, 1844 SDLoc dl, SelectionDAG &DAG, 1845 SmallVectorImpl<SDValue> &InVals) 1846 const { 1847 if (PPCSubTarget.isSVR4ABI()) { 1848 if (PPCSubTarget.isPPC64()) 1849 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 1850 dl, DAG, InVals); 1851 else 1852 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 1853 dl, DAG, InVals); 1854 } else { 1855 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 1856 dl, DAG, InVals); 1857 } 1858} 1859 1860SDValue 1861PPCTargetLowering::LowerFormalArguments_32SVR4( 1862 SDValue Chain, 1863 CallingConv::ID CallConv, bool isVarArg, 1864 const SmallVectorImpl<ISD::InputArg> 1865 &Ins, 1866 SDLoc dl, SelectionDAG &DAG, 1867 SmallVectorImpl<SDValue> &InVals) const { 1868 1869 // 32-bit SVR4 ABI Stack Frame Layout: 1870 // +-----------------------------------+ 1871 // +--> | Back chain | 1872 // | +-----------------------------------+ 1873 // | | Floating-point register save area | 1874 // | +-----------------------------------+ 1875 // | | General register save area | 1876 // | +-----------------------------------+ 1877 // | | CR save word | 1878 // | +-----------------------------------+ 1879 // | | VRSAVE save word | 1880 // | +-----------------------------------+ 1881 // | | Alignment padding | 1882 // | +-----------------------------------+ 1883 // | | Vector register save area | 1884 // | +-----------------------------------+ 1885 // | | Local variable space | 1886 // | +-----------------------------------+ 1887 // | | Parameter list area | 1888 // | +-----------------------------------+ 1889 // | | LR save word | 1890 // | +-----------------------------------+ 1891 // SP--> +--- | Back chain | 1892 // +-----------------------------------+ 1893 // 1894 // Specifications: 1895 // System V Application Binary Interface PowerPC Processor Supplement 1896 // AltiVec Technology Programming Interface Manual 1897 1898 MachineFunction &MF = DAG.getMachineFunction(); 1899 MachineFrameInfo *MFI = MF.getFrameInfo(); 1900 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1901 1902 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1903 // Potential tail calls could cause overwriting of argument stack slots. 1904 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 1905 (CallConv == CallingConv::Fast)); 1906 unsigned PtrByteSize = 4; 1907 1908 // Assign locations to all of the incoming arguments. 1909 SmallVector<CCValAssign, 16> ArgLocs; 1910 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1911 getTargetMachine(), ArgLocs, *DAG.getContext()); 1912 1913 // Reserve space for the linkage area on the stack. 1914 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 1915 1916 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 1917 1918 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1919 CCValAssign &VA = ArgLocs[i]; 1920 1921 // Arguments stored in registers. 1922 if (VA.isRegLoc()) { 1923 const TargetRegisterClass *RC; 1924 EVT ValVT = VA.getValVT(); 1925 1926 switch (ValVT.getSimpleVT().SimpleTy) { 1927 default: 1928 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 1929 case MVT::i32: 1930 RC = &PPC::GPRCRegClass; 1931 break; 1932 case MVT::f32: 1933 RC = &PPC::F4RCRegClass; 1934 break; 1935 case MVT::f64: 1936 RC = &PPC::F8RCRegClass; 1937 break; 1938 case MVT::v16i8: 1939 case MVT::v8i16: 1940 case MVT::v4i32: 1941 case MVT::v4f32: 1942 RC = &PPC::VRRCRegClass; 1943 break; 1944 } 1945 1946 // Transform the arguments stored in physical registers into virtual ones. 1947 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1948 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); 1949 1950 InVals.push_back(ArgValue); 1951 } else { 1952 // Argument stored in memory. 1953 assert(VA.isMemLoc()); 1954 1955 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 1956 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 1957 isImmutable); 1958 1959 // Create load nodes to retrieve arguments from the stack. 1960 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1961 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 1962 MachinePointerInfo(), 1963 false, false, false, 0)); 1964 } 1965 } 1966 1967 // Assign locations to all of the incoming aggregate by value arguments. 1968 // Aggregates passed by value are stored in the local variable space of the 1969 // caller's stack frame, right above the parameter list area. 1970 SmallVector<CCValAssign, 16> ByValArgLocs; 1971 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1972 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 1973 1974 // Reserve stack space for the allocations in CCInfo. 1975 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 1976 1977 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 1978 1979 // Area that is at least reserved in the caller of this function. 1980 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 1981 1982 // Set the size that is at least reserved in caller of this function. Tail 1983 // call optimized function's reserved stack space needs to be aligned so that 1984 // taking the difference between two stack areas will result in an aligned 1985 // stack. 1986 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 1987 1988 MinReservedArea = 1989 std::max(MinReservedArea, 1990 PPCFrameLowering::getMinCallFrameSize(false, false)); 1991 1992 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> 1993 getStackAlignment(); 1994 unsigned AlignMask = TargetAlign-1; 1995 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 1996 1997 FI->setMinReservedArea(MinReservedArea); 1998 1999 SmallVector<SDValue, 8> MemOps; 2000 2001 // If the function takes variable number of arguments, make a frame index for 2002 // the start of the first vararg value... for expansion of llvm.va_start. 2003 if (isVarArg) { 2004 static const uint16_t GPArgRegs[] = { 2005 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2006 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2007 }; 2008 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2009 2010 static const uint16_t FPArgRegs[] = { 2011 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2012 PPC::F8 2013 }; 2014 const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2015 2016 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs, 2017 NumGPArgRegs)); 2018 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs, 2019 NumFPArgRegs)); 2020 2021 // Make room for NumGPArgRegs and NumFPArgRegs. 2022 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2023 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8; 2024 2025 FuncInfo->setVarArgsStackOffset( 2026 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2027 CCInfo.getNextStackOffset(), true)); 2028 2029 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2030 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2031 2032 // The fixed integer arguments of a variadic function are stored to the 2033 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2034 // the result of va_next. 2035 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2036 // Get an existing live-in vreg, or add a new one. 2037 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2038 if (!VReg) 2039 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2040 2041 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2042 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2043 MachinePointerInfo(), false, false, 0); 2044 MemOps.push_back(Store); 2045 // Increment the address by four for the next argument to store 2046 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2047 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2048 } 2049 2050 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2051 // is set. 2052 // The double arguments are stored to the VarArgsFrameIndex 2053 // on the stack. 2054 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2055 // Get an existing live-in vreg, or add a new one. 2056 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 2057 if (!VReg) 2058 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 2059 2060 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 2061 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2062 MachinePointerInfo(), false, false, 0); 2063 MemOps.push_back(Store); 2064 // Increment the address by eight for the next argument to store 2065 SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8, 2066 PtrVT); 2067 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2068 } 2069 } 2070 2071 if (!MemOps.empty()) 2072 Chain = DAG.getNode(ISD::TokenFactor, dl, 2073 MVT::Other, &MemOps[0], MemOps.size()); 2074 2075 return Chain; 2076} 2077 2078// PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2079// value to MVT::i64 and then truncate to the correct register size. 2080SDValue 2081PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 2082 SelectionDAG &DAG, SDValue ArgVal, 2083 SDLoc dl) const { 2084 if (Flags.isSExt()) 2085 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 2086 DAG.getValueType(ObjectVT)); 2087 else if (Flags.isZExt()) 2088 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 2089 DAG.getValueType(ObjectVT)); 2090 2091 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 2092} 2093 2094// Set the size that is at least reserved in caller of this function. Tail 2095// call optimized functions' reserved stack space needs to be aligned so that 2096// taking the difference between two stack areas will result in an aligned 2097// stack. 2098void 2099PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, 2100 unsigned nAltivecParamsAtEnd, 2101 unsigned MinReservedArea, 2102 bool isPPC64) const { 2103 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 2104 // Add the Altivec parameters at the end, if needed. 2105 if (nAltivecParamsAtEnd) { 2106 MinReservedArea = ((MinReservedArea+15)/16)*16; 2107 MinReservedArea += 16*nAltivecParamsAtEnd; 2108 } 2109 MinReservedArea = 2110 std::max(MinReservedArea, 2111 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2112 unsigned TargetAlign 2113 = DAG.getMachineFunction().getTarget().getFrameLowering()-> 2114 getStackAlignment(); 2115 unsigned AlignMask = TargetAlign-1; 2116 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; 2117 FI->setMinReservedArea(MinReservedArea); 2118} 2119 2120SDValue 2121PPCTargetLowering::LowerFormalArguments_64SVR4( 2122 SDValue Chain, 2123 CallingConv::ID CallConv, bool isVarArg, 2124 const SmallVectorImpl<ISD::InputArg> 2125 &Ins, 2126 SDLoc dl, SelectionDAG &DAG, 2127 SmallVectorImpl<SDValue> &InVals) const { 2128 // TODO: add description of PPC stack frame format, or at least some docs. 2129 // 2130 MachineFunction &MF = DAG.getMachineFunction(); 2131 MachineFrameInfo *MFI = MF.getFrameInfo(); 2132 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2133 2134 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2135 // Potential tail calls could cause overwriting of argument stack slots. 2136 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2137 (CallConv == CallingConv::Fast)); 2138 unsigned PtrByteSize = 8; 2139 2140 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 2141 // Area that is at least reserved in caller of this function. 2142 unsigned MinReservedArea = ArgOffset; 2143 2144 static const uint16_t GPR[] = { 2145 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2146 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2147 }; 2148 2149 static const uint16_t *FPR = GetFPR(); 2150 2151 static const uint16_t VR[] = { 2152 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2153 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2154 }; 2155 2156 const unsigned Num_GPR_Regs = array_lengthof(GPR); 2157 const unsigned Num_FPR_Regs = 13; 2158 const unsigned Num_VR_Regs = array_lengthof(VR); 2159 2160 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2161 2162 // Add DAG nodes to load the arguments or copy them out of registers. On 2163 // entry to a function on PPC, the arguments start after the linkage area, 2164 // although the first ones are often in registers. 2165 2166 SmallVector<SDValue, 8> MemOps; 2167 unsigned nAltivecParamsAtEnd = 0; 2168 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2169 unsigned CurArgIdx = 0; 2170 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2171 SDValue ArgVal; 2172 bool needsLoad = false; 2173 EVT ObjectVT = Ins[ArgNo].VT; 2174 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2175 unsigned ArgSize = ObjSize; 2176 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2177 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2178 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2179 2180 unsigned CurArgOffset = ArgOffset; 2181 2182 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2183 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2184 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2185 if (isVarArg) { 2186 MinReservedArea = ((MinReservedArea+15)/16)*16; 2187 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2188 Flags, 2189 PtrByteSize); 2190 } else 2191 nAltivecParamsAtEnd++; 2192 } else 2193 // Calculate min reserved area. 2194 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2195 Flags, 2196 PtrByteSize); 2197 2198 // FIXME the codegen can be much improved in some cases. 2199 // We do not have to keep everything in memory. 2200 if (Flags.isByVal()) { 2201 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2202 ObjSize = Flags.getByValSize(); 2203 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2204 // Empty aggregate parameters do not take up registers. Examples: 2205 // struct { } a; 2206 // union { } b; 2207 // int c[0]; 2208 // etc. However, we have to provide a place-holder in InVals, so 2209 // pretend we have an 8-byte item at the current address for that 2210 // purpose. 2211 if (!ObjSize) { 2212 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2213 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2214 InVals.push_back(FIN); 2215 continue; 2216 } 2217 // All aggregates smaller than 8 bytes must be passed right-justified. 2218 if (ObjSize < PtrByteSize) 2219 CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); 2220 // The value of the object is its address. 2221 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2222 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2223 InVals.push_back(FIN); 2224 2225 if (ObjSize < 8) { 2226 if (GPR_idx != Num_GPR_Regs) { 2227 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2228 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2229 SDValue Store; 2230 2231 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 2232 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 2233 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 2234 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2235 MachinePointerInfo(FuncArg, CurArgOffset), 2236 ObjType, false, false, 0); 2237 } else { 2238 // For sizes that don't fit a truncating store (3, 5, 6, 7), 2239 // store the whole register as-is to the parameter save area 2240 // slot. The address of the parameter was already calculated 2241 // above (InVals.push_back(FIN)) to be the right-justified 2242 // offset within the slot. For this store, we need a new 2243 // frame index that points at the beginning of the slot. 2244 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2245 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2246 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2247 MachinePointerInfo(FuncArg, ArgOffset), 2248 false, false, 0); 2249 } 2250 2251 MemOps.push_back(Store); 2252 ++GPR_idx; 2253 } 2254 // Whether we copied from a register or not, advance the offset 2255 // into the parameter save area by a full doubleword. 2256 ArgOffset += PtrByteSize; 2257 continue; 2258 } 2259 2260 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2261 // Store whatever pieces of the object are in registers 2262 // to memory. ArgOffset will be the address of the beginning 2263 // of the object. 2264 if (GPR_idx != Num_GPR_Regs) { 2265 unsigned VReg; 2266 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2267 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2268 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2269 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2270 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2271 MachinePointerInfo(FuncArg, ArgOffset), 2272 false, false, 0); 2273 MemOps.push_back(Store); 2274 ++GPR_idx; 2275 ArgOffset += PtrByteSize; 2276 } else { 2277 ArgOffset += ArgSize - j; 2278 break; 2279 } 2280 } 2281 continue; 2282 } 2283 2284 switch (ObjectVT.getSimpleVT().SimpleTy) { 2285 default: llvm_unreachable("Unhandled argument type!"); 2286 case MVT::i32: 2287 case MVT::i64: 2288 if (GPR_idx != Num_GPR_Regs) { 2289 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2290 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2291 2292 if (ObjectVT == MVT::i32) 2293 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2294 // value to MVT::i64 and then truncate to the correct register size. 2295 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2296 2297 ++GPR_idx; 2298 } else { 2299 needsLoad = true; 2300 ArgSize = PtrByteSize; 2301 } 2302 ArgOffset += 8; 2303 break; 2304 2305 case MVT::f32: 2306 case MVT::f64: 2307 // Every 8 bytes of argument space consumes one of the GPRs available for 2308 // argument passing. 2309 if (GPR_idx != Num_GPR_Regs) { 2310 ++GPR_idx; 2311 } 2312 if (FPR_idx != Num_FPR_Regs) { 2313 unsigned VReg; 2314 2315 if (ObjectVT == MVT::f32) 2316 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2317 else 2318 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2319 2320 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2321 ++FPR_idx; 2322 } else { 2323 needsLoad = true; 2324 ArgSize = PtrByteSize; 2325 } 2326 2327 ArgOffset += 8; 2328 break; 2329 case MVT::v4f32: 2330 case MVT::v4i32: 2331 case MVT::v8i16: 2332 case MVT::v16i8: 2333 // Note that vector arguments in registers don't reserve stack space, 2334 // except in varargs functions. 2335 if (VR_idx != Num_VR_Regs) { 2336 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2337 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2338 if (isVarArg) { 2339 while ((ArgOffset % 16) != 0) { 2340 ArgOffset += PtrByteSize; 2341 if (GPR_idx != Num_GPR_Regs) 2342 GPR_idx++; 2343 } 2344 ArgOffset += 16; 2345 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2346 } 2347 ++VR_idx; 2348 } else { 2349 // Vectors are aligned. 2350 ArgOffset = ((ArgOffset+15)/16)*16; 2351 CurArgOffset = ArgOffset; 2352 ArgOffset += 16; 2353 needsLoad = true; 2354 } 2355 break; 2356 } 2357 2358 // We need to load the argument to a virtual register if we determined 2359 // above that we ran out of physical registers of the appropriate type. 2360 if (needsLoad) { 2361 int FI = MFI->CreateFixedObject(ObjSize, 2362 CurArgOffset + (ArgSize - ObjSize), 2363 isImmutable); 2364 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2365 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2366 false, false, false, 0); 2367 } 2368 2369 InVals.push_back(ArgVal); 2370 } 2371 2372 // Set the size that is at least reserved in caller of this function. Tail 2373 // call optimized functions' reserved stack space needs to be aligned so that 2374 // taking the difference between two stack areas will result in an aligned 2375 // stack. 2376 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true); 2377 2378 // If the function takes variable number of arguments, make a frame index for 2379 // the start of the first vararg value... for expansion of llvm.va_start. 2380 if (isVarArg) { 2381 int Depth = ArgOffset; 2382 2383 FuncInfo->setVarArgsFrameIndex( 2384 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 2385 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2386 2387 // If this function is vararg, store any remaining integer argument regs 2388 // to their spots on the stack so that they may be loaded by deferencing the 2389 // result of va_next. 2390 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2391 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2392 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2393 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2394 MachinePointerInfo(), false, false, 0); 2395 MemOps.push_back(Store); 2396 // Increment the address by four for the next argument to store 2397 SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); 2398 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2399 } 2400 } 2401 2402 if (!MemOps.empty()) 2403 Chain = DAG.getNode(ISD::TokenFactor, dl, 2404 MVT::Other, &MemOps[0], MemOps.size()); 2405 2406 return Chain; 2407} 2408 2409SDValue 2410PPCTargetLowering::LowerFormalArguments_Darwin( 2411 SDValue Chain, 2412 CallingConv::ID CallConv, bool isVarArg, 2413 const SmallVectorImpl<ISD::InputArg> 2414 &Ins, 2415 SDLoc dl, SelectionDAG &DAG, 2416 SmallVectorImpl<SDValue> &InVals) const { 2417 // TODO: add description of PPC stack frame format, or at least some docs. 2418 // 2419 MachineFunction &MF = DAG.getMachineFunction(); 2420 MachineFrameInfo *MFI = MF.getFrameInfo(); 2421 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2422 2423 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2424 bool isPPC64 = PtrVT == MVT::i64; 2425 // Potential tail calls could cause overwriting of argument stack slots. 2426 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2427 (CallConv == CallingConv::Fast)); 2428 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2429 2430 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 2431 // Area that is at least reserved in caller of this function. 2432 unsigned MinReservedArea = ArgOffset; 2433 2434 static const uint16_t GPR_32[] = { // 32-bit registers. 2435 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2436 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2437 }; 2438 static const uint16_t GPR_64[] = { // 64-bit registers. 2439 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 2440 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 2441 }; 2442 2443 static const uint16_t *FPR = GetFPR(); 2444 2445 static const uint16_t VR[] = { 2446 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 2447 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 2448 }; 2449 2450 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 2451 const unsigned Num_FPR_Regs = 13; 2452 const unsigned Num_VR_Regs = array_lengthof( VR); 2453 2454 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 2455 2456 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 2457 2458 // In 32-bit non-varargs functions, the stack space for vectors is after the 2459 // stack space for non-vectors. We do not use this space unless we have 2460 // too many vectors to fit in registers, something that only occurs in 2461 // constructed examples:), but we have to walk the arglist to figure 2462 // that out...for the pathological case, compute VecArgOffset as the 2463 // start of the vector parameter area. Computing VecArgOffset is the 2464 // entire point of the following loop. 2465 unsigned VecArgOffset = ArgOffset; 2466 if (!isVarArg && !isPPC64) { 2467 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 2468 ++ArgNo) { 2469 EVT ObjectVT = Ins[ArgNo].VT; 2470 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2471 2472 if (Flags.isByVal()) { 2473 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 2474 unsigned ObjSize = Flags.getByValSize(); 2475 unsigned ArgSize = 2476 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2477 VecArgOffset += ArgSize; 2478 continue; 2479 } 2480 2481 switch(ObjectVT.getSimpleVT().SimpleTy) { 2482 default: llvm_unreachable("Unhandled argument type!"); 2483 case MVT::i32: 2484 case MVT::f32: 2485 VecArgOffset += 4; 2486 break; 2487 case MVT::i64: // PPC64 2488 case MVT::f64: 2489 // FIXME: We are guaranteed to be !isPPC64 at this point. 2490 // Does MVT::i64 apply? 2491 VecArgOffset += 8; 2492 break; 2493 case MVT::v4f32: 2494 case MVT::v4i32: 2495 case MVT::v8i16: 2496 case MVT::v16i8: 2497 // Nothing to do, we're only looking at Nonvector args here. 2498 break; 2499 } 2500 } 2501 } 2502 // We've found where the vector parameter area in memory is. Skip the 2503 // first 12 parameters; these don't use that memory. 2504 VecArgOffset = ((VecArgOffset+15)/16)*16; 2505 VecArgOffset += 12*16; 2506 2507 // Add DAG nodes to load the arguments or copy them out of registers. On 2508 // entry to a function on PPC, the arguments start after the linkage area, 2509 // although the first ones are often in registers. 2510 2511 SmallVector<SDValue, 8> MemOps; 2512 unsigned nAltivecParamsAtEnd = 0; 2513 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 2514 unsigned CurArgIdx = 0; 2515 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 2516 SDValue ArgVal; 2517 bool needsLoad = false; 2518 EVT ObjectVT = Ins[ArgNo].VT; 2519 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 2520 unsigned ArgSize = ObjSize; 2521 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 2522 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx); 2523 CurArgIdx = Ins[ArgNo].OrigArgIndex; 2524 2525 unsigned CurArgOffset = ArgOffset; 2526 2527 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 2528 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 2529 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 2530 if (isVarArg || isPPC64) { 2531 MinReservedArea = ((MinReservedArea+15)/16)*16; 2532 MinReservedArea += CalculateStackSlotSize(ObjectVT, 2533 Flags, 2534 PtrByteSize); 2535 } else nAltivecParamsAtEnd++; 2536 } else 2537 // Calculate min reserved area. 2538 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 2539 Flags, 2540 PtrByteSize); 2541 2542 // FIXME the codegen can be much improved in some cases. 2543 // We do not have to keep everything in memory. 2544 if (Flags.isByVal()) { 2545 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 2546 ObjSize = Flags.getByValSize(); 2547 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2548 // Objects of size 1 and 2 are right justified, everything else is 2549 // left justified. This means the memory address is adjusted forwards. 2550 if (ObjSize==1 || ObjSize==2) { 2551 CurArgOffset = CurArgOffset + (4 - ObjSize); 2552 } 2553 // The value of the object is its address. 2554 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); 2555 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2556 InVals.push_back(FIN); 2557 if (ObjSize==1 || ObjSize==2) { 2558 if (GPR_idx != Num_GPR_Regs) { 2559 unsigned VReg; 2560 if (isPPC64) 2561 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2562 else 2563 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2564 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2565 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 2566 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 2567 MachinePointerInfo(FuncArg, 2568 CurArgOffset), 2569 ObjType, false, false, 0); 2570 MemOps.push_back(Store); 2571 ++GPR_idx; 2572 } 2573 2574 ArgOffset += PtrByteSize; 2575 2576 continue; 2577 } 2578 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 2579 // Store whatever pieces of the object are in registers 2580 // to memory. ArgOffset will be the address of the beginning 2581 // of the object. 2582 if (GPR_idx != Num_GPR_Regs) { 2583 unsigned VReg; 2584 if (isPPC64) 2585 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2586 else 2587 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2588 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 2589 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2590 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2591 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2592 MachinePointerInfo(FuncArg, ArgOffset), 2593 false, false, 0); 2594 MemOps.push_back(Store); 2595 ++GPR_idx; 2596 ArgOffset += PtrByteSize; 2597 } else { 2598 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 2599 break; 2600 } 2601 } 2602 continue; 2603 } 2604 2605 switch (ObjectVT.getSimpleVT().SimpleTy) { 2606 default: llvm_unreachable("Unhandled argument type!"); 2607 case MVT::i32: 2608 if (!isPPC64) { 2609 if (GPR_idx != Num_GPR_Regs) { 2610 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2611 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2612 ++GPR_idx; 2613 } else { 2614 needsLoad = true; 2615 ArgSize = PtrByteSize; 2616 } 2617 // All int arguments reserve stack space in the Darwin ABI. 2618 ArgOffset += PtrByteSize; 2619 break; 2620 } 2621 // FALLTHROUGH 2622 case MVT::i64: // PPC64 2623 if (GPR_idx != Num_GPR_Regs) { 2624 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2625 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2626 2627 if (ObjectVT == MVT::i32) 2628 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 2629 // value to MVT::i64 and then truncate to the correct register size. 2630 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 2631 2632 ++GPR_idx; 2633 } else { 2634 needsLoad = true; 2635 ArgSize = PtrByteSize; 2636 } 2637 // All int arguments reserve stack space in the Darwin ABI. 2638 ArgOffset += 8; 2639 break; 2640 2641 case MVT::f32: 2642 case MVT::f64: 2643 // Every 4 bytes of argument space consumes one of the GPRs available for 2644 // argument passing. 2645 if (GPR_idx != Num_GPR_Regs) { 2646 ++GPR_idx; 2647 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 2648 ++GPR_idx; 2649 } 2650 if (FPR_idx != Num_FPR_Regs) { 2651 unsigned VReg; 2652 2653 if (ObjectVT == MVT::f32) 2654 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 2655 else 2656 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 2657 2658 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2659 ++FPR_idx; 2660 } else { 2661 needsLoad = true; 2662 } 2663 2664 // All FP arguments reserve stack space in the Darwin ABI. 2665 ArgOffset += isPPC64 ? 8 : ObjSize; 2666 break; 2667 case MVT::v4f32: 2668 case MVT::v4i32: 2669 case MVT::v8i16: 2670 case MVT::v16i8: 2671 // Note that vector arguments in registers don't reserve stack space, 2672 // except in varargs functions. 2673 if (VR_idx != Num_VR_Regs) { 2674 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 2675 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 2676 if (isVarArg) { 2677 while ((ArgOffset % 16) != 0) { 2678 ArgOffset += PtrByteSize; 2679 if (GPR_idx != Num_GPR_Regs) 2680 GPR_idx++; 2681 } 2682 ArgOffset += 16; 2683 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 2684 } 2685 ++VR_idx; 2686 } else { 2687 if (!isVarArg && !isPPC64) { 2688 // Vectors go after all the nonvectors. 2689 CurArgOffset = VecArgOffset; 2690 VecArgOffset += 16; 2691 } else { 2692 // Vectors are aligned. 2693 ArgOffset = ((ArgOffset+15)/16)*16; 2694 CurArgOffset = ArgOffset; 2695 ArgOffset += 16; 2696 } 2697 needsLoad = true; 2698 } 2699 break; 2700 } 2701 2702 // We need to load the argument to a virtual register if we determined above 2703 // that we ran out of physical registers of the appropriate type. 2704 if (needsLoad) { 2705 int FI = MFI->CreateFixedObject(ObjSize, 2706 CurArgOffset + (ArgSize - ObjSize), 2707 isImmutable); 2708 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2709 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 2710 false, false, false, 0); 2711 } 2712 2713 InVals.push_back(ArgVal); 2714 } 2715 2716 // Set the size that is at least reserved in caller of this function. Tail 2717 // call optimized functions' reserved stack space needs to be aligned so that 2718 // taking the difference between two stack areas will result in an aligned 2719 // stack. 2720 setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64); 2721 2722 // If the function takes variable number of arguments, make a frame index for 2723 // the start of the first vararg value... for expansion of llvm.va_start. 2724 if (isVarArg) { 2725 int Depth = ArgOffset; 2726 2727 FuncInfo->setVarArgsFrameIndex( 2728 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2729 Depth, true)); 2730 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2731 2732 // If this function is vararg, store any remaining integer argument regs 2733 // to their spots on the stack so that they may be loaded by deferencing the 2734 // result of va_next. 2735 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 2736 unsigned VReg; 2737 2738 if (isPPC64) 2739 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 2740 else 2741 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 2742 2743 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2744 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2745 MachinePointerInfo(), false, false, 0); 2746 MemOps.push_back(Store); 2747 // Increment the address by four for the next argument to store 2748 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); 2749 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2750 } 2751 } 2752 2753 if (!MemOps.empty()) 2754 Chain = DAG.getNode(ISD::TokenFactor, dl, 2755 MVT::Other, &MemOps[0], MemOps.size()); 2756 2757 return Chain; 2758} 2759 2760/// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus 2761/// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI. 2762static unsigned 2763CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, 2764 bool isPPC64, 2765 bool isVarArg, 2766 unsigned CC, 2767 const SmallVectorImpl<ISD::OutputArg> 2768 &Outs, 2769 const SmallVectorImpl<SDValue> &OutVals, 2770 unsigned &nAltivecParamsAtEnd) { 2771 // Count how many bytes are to be pushed on the stack, including the linkage 2772 // area, and parameter passing area. We start with 24/48 bytes, which is 2773 // prereserved space for [SP][CR][LR][3 x unused]. 2774 unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true); 2775 unsigned NumOps = Outs.size(); 2776 unsigned PtrByteSize = isPPC64 ? 8 : 4; 2777 2778 // Add up all the space actually used. 2779 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 2780 // they all go in registers, but we must reserve stack space for them for 2781 // possible use by the caller. In varargs or 64-bit calls, parameters are 2782 // assigned stack space in order, with padding so Altivec parameters are 2783 // 16-byte aligned. 2784 nAltivecParamsAtEnd = 0; 2785 for (unsigned i = 0; i != NumOps; ++i) { 2786 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2787 EVT ArgVT = Outs[i].VT; 2788 // Varargs Altivec parameters are padded to a 16 byte boundary. 2789 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || 2790 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) { 2791 if (!isVarArg && !isPPC64) { 2792 // Non-varargs Altivec parameters go after all the non-Altivec 2793 // parameters; handle those later so we know how much padding we need. 2794 nAltivecParamsAtEnd++; 2795 continue; 2796 } 2797 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 2798 NumBytes = ((NumBytes+15)/16)*16; 2799 } 2800 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2801 } 2802 2803 // Allow for Altivec parameters at the end, if needed. 2804 if (nAltivecParamsAtEnd) { 2805 NumBytes = ((NumBytes+15)/16)*16; 2806 NumBytes += 16*nAltivecParamsAtEnd; 2807 } 2808 2809 // The prolog code of the callee may store up to 8 GPR argument registers to 2810 // the stack, allowing va_start to index over them in memory if its varargs. 2811 // Because we cannot tell if this is needed on the caller side, we have to 2812 // conservatively assume that it is needed. As such, make sure we have at 2813 // least enough stack space for the caller to store the 8 GPRs. 2814 NumBytes = std::max(NumBytes, 2815 PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); 2816 2817 // Tail call needs the stack to be aligned. 2818 if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){ 2819 unsigned TargetAlign = DAG.getMachineFunction().getTarget(). 2820 getFrameLowering()->getStackAlignment(); 2821 unsigned AlignMask = TargetAlign-1; 2822 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2823 } 2824 2825 return NumBytes; 2826} 2827 2828/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 2829/// adjusted to accommodate the arguments for the tailcall. 2830static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 2831 unsigned ParamSize) { 2832 2833 if (!isTailCall) return 0; 2834 2835 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 2836 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 2837 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 2838 // Remember only if the new adjustement is bigger. 2839 if (SPDiff < FI->getTailCallSPDelta()) 2840 FI->setTailCallSPDelta(SPDiff); 2841 2842 return SPDiff; 2843} 2844 2845/// IsEligibleForTailCallOptimization - Check whether the call is eligible 2846/// for tail call optimization. Targets which want to do tail call 2847/// optimization should implement this function. 2848bool 2849PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2850 CallingConv::ID CalleeCC, 2851 bool isVarArg, 2852 const SmallVectorImpl<ISD::InputArg> &Ins, 2853 SelectionDAG& DAG) const { 2854 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 2855 return false; 2856 2857 // Variable argument functions are not supported. 2858 if (isVarArg) 2859 return false; 2860 2861 MachineFunction &MF = DAG.getMachineFunction(); 2862 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 2863 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 2864 // Functions containing by val parameters are not supported. 2865 for (unsigned i = 0; i != Ins.size(); i++) { 2866 ISD::ArgFlagsTy Flags = Ins[i].Flags; 2867 if (Flags.isByVal()) return false; 2868 } 2869 2870 // Non PIC/GOT tail calls are supported. 2871 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 2872 return true; 2873 2874 // At the moment we can only do local tail calls (in same module, hidden 2875 // or protected) if we are generating PIC. 2876 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2877 return G->getGlobal()->hasHiddenVisibility() 2878 || G->getGlobal()->hasProtectedVisibility(); 2879 } 2880 2881 return false; 2882} 2883 2884/// isCallCompatibleAddress - Return the immediate to use if the specified 2885/// 32-bit value is representable in the immediate field of a BxA instruction. 2886static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 2887 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2888 if (!C) return 0; 2889 2890 int Addr = C->getZExtValue(); 2891 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 2892 SignExtend32<26>(Addr) != Addr) 2893 return 0; // Top 6 bits have to be sext of immediate. 2894 2895 return DAG.getConstant((int)C->getZExtValue() >> 2, 2896 DAG.getTargetLoweringInfo().getPointerTy()).getNode(); 2897} 2898 2899namespace { 2900 2901struct TailCallArgumentInfo { 2902 SDValue Arg; 2903 SDValue FrameIdxOp; 2904 int FrameIdx; 2905 2906 TailCallArgumentInfo() : FrameIdx(0) {} 2907}; 2908 2909} 2910 2911/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 2912static void 2913StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 2914 SDValue Chain, 2915 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, 2916 SmallVector<SDValue, 8> &MemOpChains, 2917 SDLoc dl) { 2918 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 2919 SDValue Arg = TailCallArgs[i].Arg; 2920 SDValue FIN = TailCallArgs[i].FrameIdxOp; 2921 int FI = TailCallArgs[i].FrameIdx; 2922 // Store relative to framepointer. 2923 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN, 2924 MachinePointerInfo::getFixedStack(FI), 2925 false, false, 0)); 2926 } 2927} 2928 2929/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 2930/// the appropriate stack slot for the tail call optimized function call. 2931static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 2932 MachineFunction &MF, 2933 SDValue Chain, 2934 SDValue OldRetAddr, 2935 SDValue OldFP, 2936 int SPDiff, 2937 bool isPPC64, 2938 bool isDarwinABI, 2939 SDLoc dl) { 2940 if (SPDiff) { 2941 // Calculate the new stack slot for the return address. 2942 int SlotSize = isPPC64 ? 8 : 4; 2943 int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64, 2944 isDarwinABI); 2945 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 2946 NewRetAddrLoc, true); 2947 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2948 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 2949 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 2950 MachinePointerInfo::getFixedStack(NewRetAddr), 2951 false, false, 0); 2952 2953 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 2954 // slot as the FP is never overwritten. 2955 if (isDarwinABI) { 2956 int NewFPLoc = 2957 SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI); 2958 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 2959 true); 2960 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 2961 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 2962 MachinePointerInfo::getFixedStack(NewFPIdx), 2963 false, false, 0); 2964 } 2965 } 2966 return Chain; 2967} 2968 2969/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 2970/// the position of the argument. 2971static void 2972CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 2973 SDValue Arg, int SPDiff, unsigned ArgOffset, 2974 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { 2975 int Offset = ArgOffset + SPDiff; 2976 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 2977 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2978 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 2979 SDValue FIN = DAG.getFrameIndex(FI, VT); 2980 TailCallArgumentInfo Info; 2981 Info.Arg = Arg; 2982 Info.FrameIdxOp = FIN; 2983 Info.FrameIdx = FI; 2984 TailCallArguments.push_back(Info); 2985} 2986 2987/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 2988/// stack slot. Returns the chain as result and the loaded frame pointers in 2989/// LROpOut/FPOpout. Used when tail calling. 2990SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 2991 int SPDiff, 2992 SDValue Chain, 2993 SDValue &LROpOut, 2994 SDValue &FPOpOut, 2995 bool isDarwinABI, 2996 SDLoc dl) const { 2997 if (SPDiff) { 2998 // Load the LR and FP stack slot for later adjusting. 2999 EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; 3000 LROpOut = getReturnAddrFrameIndex(DAG); 3001 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 3002 false, false, false, 0); 3003 Chain = SDValue(LROpOut.getNode(), 1); 3004 3005 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 3006 // slot as the FP is never overwritten. 3007 if (isDarwinABI) { 3008 FPOpOut = getFramePointerFrameIndex(DAG); 3009 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 3010 false, false, false, 0); 3011 Chain = SDValue(FPOpOut.getNode(), 1); 3012 } 3013 } 3014 return Chain; 3015} 3016 3017/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 3018/// by "Src" to address "Dst" of size "Size". Alignment information is 3019/// specified by the specific parameter attribute. The copy will be passed as 3020/// a byval function parameter. 3021/// Sometimes what we are copying is the end of a larger object, the part that 3022/// does not fit in registers. 3023static SDValue 3024CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 3025 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 3026 SDLoc dl) { 3027 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 3028 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 3029 false, false, MachinePointerInfo(0), 3030 MachinePointerInfo(0)); 3031} 3032 3033/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 3034/// tail calls. 3035static void 3036LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 3037 SDValue Arg, SDValue PtrOff, int SPDiff, 3038 unsigned ArgOffset, bool isPPC64, bool isTailCall, 3039 bool isVector, SmallVector<SDValue, 8> &MemOpChains, 3040 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments, 3041 SDLoc dl) { 3042 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3043 if (!isTailCall) { 3044 if (isVector) { 3045 SDValue StackPtr; 3046 if (isPPC64) 3047 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3048 else 3049 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3050 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3051 DAG.getConstant(ArgOffset, PtrVT)); 3052 } 3053 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3054 MachinePointerInfo(), false, false, 0)); 3055 // Calculate and remember argument location. 3056 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 3057 TailCallArguments); 3058} 3059 3060static 3061void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 3062 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 3063 SDValue LROp, SDValue FPOp, bool isDarwinABI, 3064 SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) { 3065 MachineFunction &MF = DAG.getMachineFunction(); 3066 3067 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 3068 // might overwrite each other in case of tail call optimization. 3069 SmallVector<SDValue, 8> MemOpChains2; 3070 // Do not flag preceding copytoreg stuff together with the following stuff. 3071 InFlag = SDValue(); 3072 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 3073 MemOpChains2, dl); 3074 if (!MemOpChains2.empty()) 3075 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3076 &MemOpChains2[0], MemOpChains2.size()); 3077 3078 // Store the return address to the appropriate stack slot. 3079 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 3080 isPPC64, isDarwinABI, dl); 3081 3082 // Emit callseq_end just before tailcall node. 3083 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3084 DAG.getIntPtrConstant(0, true), InFlag, dl); 3085 InFlag = Chain.getValue(1); 3086} 3087 3088static 3089unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 3090 SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall, 3091 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 3092 SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, 3093 const PPCSubtarget &PPCSubTarget) { 3094 3095 bool isPPC64 = PPCSubTarget.isPPC64(); 3096 bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); 3097 3098 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3099 NodeTys.push_back(MVT::Other); // Returns a chain 3100 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 3101 3102 unsigned CallOpc = PPCISD::CALL; 3103 3104 bool needIndirectCall = true; 3105 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 3106 // If this is an absolute destination address, use the munged value. 3107 Callee = SDValue(Dest, 0); 3108 needIndirectCall = false; 3109 } 3110 3111 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3112 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 3113 // Use indirect calls for ALL functions calls in JIT mode, since the 3114 // far-call stubs may be outside relocation limits for a BL instruction. 3115 if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) { 3116 unsigned OpFlags = 0; 3117 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3118 (PPCSubTarget.getTargetTriple().isMacOSX() && 3119 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 3120 (G->getGlobal()->isDeclaration() || 3121 G->getGlobal()->isWeakForLinker())) { 3122 // PC-relative references to external symbols should go through $stub, 3123 // unless we're building with the leopard linker or later, which 3124 // automatically synthesizes these stubs. 3125 OpFlags = PPCII::MO_DARWIN_STUB; 3126 } 3127 3128 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 3129 // every direct call is) turn it into a TargetGlobalAddress / 3130 // TargetExternalSymbol node so that legalize doesn't hack it. 3131 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 3132 Callee.getValueType(), 3133 0, OpFlags); 3134 needIndirectCall = false; 3135 } 3136 } 3137 3138 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3139 unsigned char OpFlags = 0; 3140 3141 if (DAG.getTarget().getRelocationModel() != Reloc::Static && 3142 (PPCSubTarget.getTargetTriple().isMacOSX() && 3143 PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) { 3144 // PC-relative references to external symbols should go through $stub, 3145 // unless we're building with the leopard linker or later, which 3146 // automatically synthesizes these stubs. 3147 OpFlags = PPCII::MO_DARWIN_STUB; 3148 } 3149 3150 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 3151 OpFlags); 3152 needIndirectCall = false; 3153 } 3154 3155 if (needIndirectCall) { 3156 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 3157 // to do the call, we can't use PPCISD::CALL. 3158 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 3159 3160 if (isSVR4ABI && isPPC64) { 3161 // Function pointers in the 64-bit SVR4 ABI do not point to the function 3162 // entry point, but to the function descriptor (the function entry point 3163 // address is part of the function descriptor though). 3164 // The function descriptor is a three doubleword structure with the 3165 // following fields: function entry point, TOC base address and 3166 // environment pointer. 3167 // Thus for a call through a function pointer, the following actions need 3168 // to be performed: 3169 // 1. Save the TOC of the caller in the TOC save area of its stack 3170 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 3171 // 2. Load the address of the function entry point from the function 3172 // descriptor. 3173 // 3. Load the TOC of the callee from the function descriptor into r2. 3174 // 4. Load the environment pointer from the function descriptor into 3175 // r11. 3176 // 5. Branch to the function entry point address. 3177 // 6. On return of the callee, the TOC of the caller needs to be 3178 // restored (this is done in FinishCall()). 3179 // 3180 // All those operations are flagged together to ensure that no other 3181 // operations can be scheduled in between. E.g. without flagging the 3182 // operations together, a TOC access in the caller could be scheduled 3183 // between the load of the callee TOC and the branch to the callee, which 3184 // results in the TOC access going through the TOC of the callee instead 3185 // of going through the TOC of the caller, which leads to incorrect code. 3186 3187 // Load the address of the function entry point from the function 3188 // descriptor. 3189 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue); 3190 SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, 3191 InFlag.getNode() ? 3 : 2); 3192 Chain = LoadFuncPtr.getValue(1); 3193 InFlag = LoadFuncPtr.getValue(2); 3194 3195 // Load environment pointer into r11. 3196 // Offset of the environment pointer within the function descriptor. 3197 SDValue PtrOff = DAG.getIntPtrConstant(16); 3198 3199 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 3200 SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, 3201 InFlag); 3202 Chain = LoadEnvPtr.getValue(1); 3203 InFlag = LoadEnvPtr.getValue(2); 3204 3205 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 3206 InFlag); 3207 Chain = EnvVal.getValue(0); 3208 InFlag = EnvVal.getValue(1); 3209 3210 // Load TOC of the callee into r2. We are using a target-specific load 3211 // with r2 hard coded, because the result of a target-independent load 3212 // would never go directly into r2, since r2 is a reserved register (which 3213 // prevents the register allocator from allocating it), resulting in an 3214 // additional register being allocated and an unnecessary move instruction 3215 // being generated. 3216 VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3217 SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, 3218 Callee, InFlag); 3219 Chain = LoadTOCPtr.getValue(0); 3220 InFlag = LoadTOCPtr.getValue(1); 3221 3222 MTCTROps[0] = Chain; 3223 MTCTROps[1] = LoadFuncPtr; 3224 MTCTROps[2] = InFlag; 3225 } 3226 3227 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 3228 2 + (InFlag.getNode() != 0)); 3229 InFlag = Chain.getValue(1); 3230 3231 NodeTys.clear(); 3232 NodeTys.push_back(MVT::Other); 3233 NodeTys.push_back(MVT::Glue); 3234 Ops.push_back(Chain); 3235 CallOpc = PPCISD::BCTRL; 3236 Callee.setNode(0); 3237 // Add use of X11 (holding environment pointer) 3238 if (isSVR4ABI && isPPC64) 3239 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 3240 // Add CTR register as callee so a bctr can be emitted later. 3241 if (isTailCall) 3242 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 3243 } 3244 3245 // If this is a direct call, pass the chain and the callee. 3246 if (Callee.getNode()) { 3247 Ops.push_back(Chain); 3248 Ops.push_back(Callee); 3249 } 3250 // If this is a tail call add stack pointer delta. 3251 if (isTailCall) 3252 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); 3253 3254 // Add argument registers to the end of the list so that they are known live 3255 // into the call. 3256 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 3257 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 3258 RegsToPass[i].second.getValueType())); 3259 3260 return CallOpc; 3261} 3262 3263static 3264bool isLocalCall(const SDValue &Callee) 3265{ 3266 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3267 return !G->getGlobal()->isDeclaration() && 3268 !G->getGlobal()->isWeakForLinker(); 3269 return false; 3270} 3271 3272SDValue 3273PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 3274 CallingConv::ID CallConv, bool isVarArg, 3275 const SmallVectorImpl<ISD::InputArg> &Ins, 3276 SDLoc dl, SelectionDAG &DAG, 3277 SmallVectorImpl<SDValue> &InVals) const { 3278 3279 SmallVector<CCValAssign, 16> RVLocs; 3280 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3281 getTargetMachine(), RVLocs, *DAG.getContext()); 3282 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 3283 3284 // Copy all of the result registers out of their specified physreg. 3285 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 3286 CCValAssign &VA = RVLocs[i]; 3287 assert(VA.isRegLoc() && "Can only return in registers!"); 3288 3289 SDValue Val = DAG.getCopyFromReg(Chain, dl, 3290 VA.getLocReg(), VA.getLocVT(), InFlag); 3291 Chain = Val.getValue(1); 3292 InFlag = Val.getValue(2); 3293 3294 switch (VA.getLocInfo()) { 3295 default: llvm_unreachable("Unknown loc info!"); 3296 case CCValAssign::Full: break; 3297 case CCValAssign::AExt: 3298 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3299 break; 3300 case CCValAssign::ZExt: 3301 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 3302 DAG.getValueType(VA.getValVT())); 3303 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3304 break; 3305 case CCValAssign::SExt: 3306 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 3307 DAG.getValueType(VA.getValVT())); 3308 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 3309 break; 3310 } 3311 3312 InVals.push_back(Val); 3313 } 3314 3315 return Chain; 3316} 3317 3318SDValue 3319PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 3320 bool isTailCall, bool isVarArg, 3321 SelectionDAG &DAG, 3322 SmallVector<std::pair<unsigned, SDValue>, 8> 3323 &RegsToPass, 3324 SDValue InFlag, SDValue Chain, 3325 SDValue &Callee, 3326 int SPDiff, unsigned NumBytes, 3327 const SmallVectorImpl<ISD::InputArg> &Ins, 3328 SmallVectorImpl<SDValue> &InVals) const { 3329 std::vector<EVT> NodeTys; 3330 SmallVector<SDValue, 8> Ops; 3331 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, 3332 isTailCall, RegsToPass, Ops, NodeTys, 3333 PPCSubTarget); 3334 3335 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 3336 if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) 3337 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 3338 3339 // When performing tail call optimization the callee pops its arguments off 3340 // the stack. Account for this here so these bytes can be pushed back on in 3341 // PPCFrameLowering::eliminateCallFramePseudoInstr. 3342 int BytesCalleePops = 3343 (CallConv == CallingConv::Fast && 3344 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 3345 3346 // Add a register mask operand representing the call-preserved registers. 3347 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 3348 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 3349 assert(Mask && "Missing call preserved mask for calling convention"); 3350 Ops.push_back(DAG.getRegisterMask(Mask)); 3351 3352 if (InFlag.getNode()) 3353 Ops.push_back(InFlag); 3354 3355 // Emit tail call. 3356 if (isTailCall) { 3357 assert(((Callee.getOpcode() == ISD::Register && 3358 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 3359 Callee.getOpcode() == ISD::TargetExternalSymbol || 3360 Callee.getOpcode() == ISD::TargetGlobalAddress || 3361 isa<ConstantSDNode>(Callee)) && 3362 "Expecting an global address, external symbol, absolute value or register"); 3363 3364 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size()); 3365 } 3366 3367 // Add a NOP immediately after the branch instruction when using the 64-bit 3368 // SVR4 ABI. At link time, if caller and callee are in a different module and 3369 // thus have a different TOC, the call will be replaced with a call to a stub 3370 // function which saves the current TOC, loads the TOC of the callee and 3371 // branches to the callee. The NOP will be replaced with a load instruction 3372 // which restores the TOC of the caller from the TOC save slot of the current 3373 // stack frame. If caller and callee belong to the same module (and have the 3374 // same TOC), the NOP will remain unchanged. 3375 3376 bool needsTOCRestore = false; 3377 if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { 3378 if (CallOpc == PPCISD::BCTRL) { 3379 // This is a call through a function pointer. 3380 // Restore the caller TOC from the save area into R2. 3381 // See PrepareCall() for more information about calls through function 3382 // pointers in the 64-bit SVR4 ABI. 3383 // We are using a target-specific load with r2 hard coded, because the 3384 // result of a target-independent load would never go directly into r2, 3385 // since r2 is a reserved register (which prevents the register allocator 3386 // from allocating it), resulting in an additional register being 3387 // allocated and an unnecessary move instruction being generated. 3388 needsTOCRestore = true; 3389 } else if ((CallOpc == PPCISD::CALL) && !isLocalCall(Callee)) { 3390 // Otherwise insert NOP for non-local calls. 3391 CallOpc = PPCISD::CALL_NOP; 3392 } 3393 } 3394 3395 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 3396 InFlag = Chain.getValue(1); 3397 3398 if (needsTOCRestore) { 3399 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3400 Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); 3401 InFlag = Chain.getValue(1); 3402 } 3403 3404 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 3405 DAG.getIntPtrConstant(BytesCalleePops, true), 3406 InFlag, dl); 3407 if (!Ins.empty()) 3408 InFlag = Chain.getValue(1); 3409 3410 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 3411 Ins, dl, DAG, InVals); 3412} 3413 3414SDValue 3415PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 3416 SmallVectorImpl<SDValue> &InVals) const { 3417 SelectionDAG &DAG = CLI.DAG; 3418 SDLoc &dl = CLI.DL; 3419 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 3420 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 3421 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 3422 SDValue Chain = CLI.Chain; 3423 SDValue Callee = CLI.Callee; 3424 bool &isTailCall = CLI.IsTailCall; 3425 CallingConv::ID CallConv = CLI.CallConv; 3426 bool isVarArg = CLI.IsVarArg; 3427 3428 if (isTailCall) 3429 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 3430 Ins, DAG); 3431 3432 if (PPCSubTarget.isSVR4ABI()) { 3433 if (PPCSubTarget.isPPC64()) 3434 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 3435 isTailCall, Outs, OutVals, Ins, 3436 dl, DAG, InVals); 3437 else 3438 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 3439 isTailCall, Outs, OutVals, Ins, 3440 dl, DAG, InVals); 3441 } 3442 3443 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 3444 isTailCall, Outs, OutVals, Ins, 3445 dl, DAG, InVals); 3446} 3447 3448SDValue 3449PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 3450 CallingConv::ID CallConv, bool isVarArg, 3451 bool isTailCall, 3452 const SmallVectorImpl<ISD::OutputArg> &Outs, 3453 const SmallVectorImpl<SDValue> &OutVals, 3454 const SmallVectorImpl<ISD::InputArg> &Ins, 3455 SDLoc dl, SelectionDAG &DAG, 3456 SmallVectorImpl<SDValue> &InVals) const { 3457 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 3458 // of the 32-bit SVR4 ABI stack frame layout. 3459 3460 assert((CallConv == CallingConv::C || 3461 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 3462 3463 unsigned PtrByteSize = 4; 3464 3465 MachineFunction &MF = DAG.getMachineFunction(); 3466 3467 // Mark this function as potentially containing a function that contains a 3468 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3469 // and restoring the callers stack pointer in this functions epilog. This is 3470 // done because by tail calling the called function might overwrite the value 3471 // in this function's (MF) stack pointer stack slot 0(SP). 3472 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3473 CallConv == CallingConv::Fast) 3474 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3475 3476 // Count how many bytes are to be pushed on the stack, including the linkage 3477 // area, parameter list area and the part of the local variable space which 3478 // contains copies of aggregates which are passed by value. 3479 3480 // Assign locations to all of the outgoing arguments. 3481 SmallVector<CCValAssign, 16> ArgLocs; 3482 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3483 getTargetMachine(), ArgLocs, *DAG.getContext()); 3484 3485 // Reserve space for the linkage area on the stack. 3486 CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize); 3487 3488 if (isVarArg) { 3489 // Handle fixed and variable vector arguments differently. 3490 // Fixed vector arguments go into registers as long as registers are 3491 // available. Variable vector arguments always go into memory. 3492 unsigned NumArgs = Outs.size(); 3493 3494 for (unsigned i = 0; i != NumArgs; ++i) { 3495 MVT ArgVT = Outs[i].VT; 3496 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3497 bool Result; 3498 3499 if (Outs[i].IsFixed) { 3500 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 3501 CCInfo); 3502 } else { 3503 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 3504 ArgFlags, CCInfo); 3505 } 3506 3507 if (Result) { 3508#ifndef NDEBUG 3509 errs() << "Call operand #" << i << " has unhandled type " 3510 << EVT(ArgVT).getEVTString() << "\n"; 3511#endif 3512 llvm_unreachable(0); 3513 } 3514 } 3515 } else { 3516 // All arguments are treated the same. 3517 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 3518 } 3519 3520 // Assign locations to all of the outgoing aggregate by value arguments. 3521 SmallVector<CCValAssign, 16> ByValArgLocs; 3522 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3523 getTargetMachine(), ByValArgLocs, *DAG.getContext()); 3524 3525 // Reserve stack space for the allocations in CCInfo. 3526 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3527 3528 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 3529 3530 // Size of the linkage area, parameter list area and the part of the local 3531 // space variable where copies of aggregates which are passed by value are 3532 // stored. 3533 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 3534 3535 // Calculate by how many bytes the stack has to be adjusted in case of tail 3536 // call optimization. 3537 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3538 3539 // Adjust the stack pointer for the new arguments... 3540 // These operations are automatically eliminated by the prolog/epilog pass 3541 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3542 dl); 3543 SDValue CallSeqStart = Chain; 3544 3545 // Load the return address and frame pointer so it can be moved somewhere else 3546 // later. 3547 SDValue LROp, FPOp; 3548 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 3549 dl); 3550 3551 // Set up a copy of the stack pointer for use loading and storing any 3552 // arguments that may not fit in the registers available for argument 3553 // passing. 3554 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 3555 3556 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3557 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3558 SmallVector<SDValue, 8> MemOpChains; 3559 3560 bool seenFloatArg = false; 3561 // Walk the register/memloc assignments, inserting copies/loads. 3562 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 3563 i != e; 3564 ++i) { 3565 CCValAssign &VA = ArgLocs[i]; 3566 SDValue Arg = OutVals[i]; 3567 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3568 3569 if (Flags.isByVal()) { 3570 // Argument is an aggregate which is passed by value, thus we need to 3571 // create a copy of it in the local variable space of the current stack 3572 // frame (which is the stack frame of the caller) and pass the address of 3573 // this copy to the callee. 3574 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 3575 CCValAssign &ByValVA = ByValArgLocs[j++]; 3576 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 3577 3578 // Memory reserved in the local variable space of the callers stack frame. 3579 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 3580 3581 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3582 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3583 3584 // Create a copy of the argument in the local area of the current 3585 // stack frame. 3586 SDValue MemcpyCall = 3587 CreateCopyOfByValArgument(Arg, PtrOff, 3588 CallSeqStart.getNode()->getOperand(0), 3589 Flags, DAG, dl); 3590 3591 // This must go outside the CALLSEQ_START..END. 3592 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3593 CallSeqStart.getNode()->getOperand(1), 3594 SDLoc(MemcpyCall)); 3595 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3596 NewCallSeqStart.getNode()); 3597 Chain = CallSeqStart = NewCallSeqStart; 3598 3599 // Pass the address of the aggregate copy on the stack either in a 3600 // physical register or in the parameter list area of the current stack 3601 // frame to the callee. 3602 Arg = PtrOff; 3603 } 3604 3605 if (VA.isRegLoc()) { 3606 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 3607 // Put argument in a physical register. 3608 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3609 } else { 3610 // Put argument in the parameter list area of the current stack frame. 3611 assert(VA.isMemLoc()); 3612 unsigned LocMemOffset = VA.getLocMemOffset(); 3613 3614 if (!isTailCall) { 3615 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 3616 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 3617 3618 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 3619 MachinePointerInfo(), 3620 false, false, 0)); 3621 } else { 3622 // Calculate and remember argument location. 3623 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 3624 TailCallArguments); 3625 } 3626 } 3627 } 3628 3629 if (!MemOpChains.empty()) 3630 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3631 &MemOpChains[0], MemOpChains.size()); 3632 3633 // Build a sequence of copy-to-reg nodes chained together with token chain 3634 // and flag operands which copy the outgoing args into the appropriate regs. 3635 SDValue InFlag; 3636 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 3637 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 3638 RegsToPass[i].second, InFlag); 3639 InFlag = Chain.getValue(1); 3640 } 3641 3642 // Set CR bit 6 to true if this is a vararg call with floating args passed in 3643 // registers. 3644 if (isVarArg) { 3645 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 3646 SDValue Ops[] = { Chain, InFlag }; 3647 3648 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 3649 dl, VTs, Ops, InFlag.getNode() ? 2 : 1); 3650 3651 InFlag = Chain.getValue(1); 3652 } 3653 3654 if (isTailCall) 3655 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 3656 false, TailCallArguments); 3657 3658 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 3659 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 3660 Ins, InVals); 3661} 3662 3663// Copy an argument into memory, being careful to do this outside the 3664// call sequence for the call to which the argument belongs. 3665SDValue 3666PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 3667 SDValue CallSeqStart, 3668 ISD::ArgFlagsTy Flags, 3669 SelectionDAG &DAG, 3670 SDLoc dl) const { 3671 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 3672 CallSeqStart.getNode()->getOperand(0), 3673 Flags, DAG, dl); 3674 // The MEMCPY must go outside the CALLSEQ_START..END. 3675 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 3676 CallSeqStart.getNode()->getOperand(1), 3677 SDLoc(MemcpyCall)); 3678 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 3679 NewCallSeqStart.getNode()); 3680 return NewCallSeqStart; 3681} 3682 3683SDValue 3684PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 3685 CallingConv::ID CallConv, bool isVarArg, 3686 bool isTailCall, 3687 const SmallVectorImpl<ISD::OutputArg> &Outs, 3688 const SmallVectorImpl<SDValue> &OutVals, 3689 const SmallVectorImpl<ISD::InputArg> &Ins, 3690 SDLoc dl, SelectionDAG &DAG, 3691 SmallVectorImpl<SDValue> &InVals) const { 3692 3693 unsigned NumOps = Outs.size(); 3694 3695 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 3696 unsigned PtrByteSize = 8; 3697 3698 MachineFunction &MF = DAG.getMachineFunction(); 3699 3700 // Mark this function as potentially containing a function that contains a 3701 // tail call. As a consequence the frame pointer will be used for dynamicalloc 3702 // and restoring the callers stack pointer in this functions epilog. This is 3703 // done because by tail calling the called function might overwrite the value 3704 // in this function's (MF) stack pointer stack slot 0(SP). 3705 if (getTargetMachine().Options.GuaranteedTailCallOpt && 3706 CallConv == CallingConv::Fast) 3707 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 3708 3709 unsigned nAltivecParamsAtEnd = 0; 3710 3711 // Count how many bytes are to be pushed on the stack, including the linkage 3712 // area, and parameter passing area. We start with at least 48 bytes, which 3713 // is reserved space for [SP][CR][LR][3 x unused]. 3714 // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result 3715 // of this call. 3716 unsigned NumBytes = 3717 CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv, 3718 Outs, OutVals, nAltivecParamsAtEnd); 3719 3720 // Calculate by how many bytes the stack has to be adjusted in case of tail 3721 // call optimization. 3722 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 3723 3724 // To protect arguments on the stack from being clobbered in a tail call, 3725 // force all the loads to happen before doing any other lowering. 3726 if (isTailCall) 3727 Chain = DAG.getStackArgumentTokenFactor(Chain); 3728 3729 // Adjust the stack pointer for the new arguments... 3730 // These operations are automatically eliminated by the prolog/epilog pass 3731 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 3732 dl); 3733 SDValue CallSeqStart = Chain; 3734 3735 // Load the return address and frame pointer so it can be move somewhere else 3736 // later. 3737 SDValue LROp, FPOp; 3738 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 3739 dl); 3740 3741 // Set up a copy of the stack pointer for use loading and storing any 3742 // arguments that may not fit in the registers available for argument 3743 // passing. 3744 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 3745 3746 // Figure out which arguments are going to go in registers, and which in 3747 // memory. Also, if this is a vararg function, floating point operations 3748 // must be stored to our stack, and loaded into integer regs as well, if 3749 // any integer regs are available for argument passing. 3750 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); 3751 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3752 3753 static const uint16_t GPR[] = { 3754 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3755 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3756 }; 3757 static const uint16_t *FPR = GetFPR(); 3758 3759 static const uint16_t VR[] = { 3760 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3761 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3762 }; 3763 const unsigned NumGPRs = array_lengthof(GPR); 3764 const unsigned NumFPRs = 13; 3765 const unsigned NumVRs = array_lengthof(VR); 3766 3767 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3768 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 3769 3770 SmallVector<SDValue, 8> MemOpChains; 3771 for (unsigned i = 0; i != NumOps; ++i) { 3772 SDValue Arg = OutVals[i]; 3773 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3774 3775 // PtrOff will be used to store the current argument to the stack if a 3776 // register cannot be found for it. 3777 SDValue PtrOff; 3778 3779 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 3780 3781 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 3782 3783 // Promote integers to 64-bit values. 3784 if (Arg.getValueType() == MVT::i32) { 3785 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 3786 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 3787 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 3788 } 3789 3790 // FIXME memcpy is used way more than necessary. Correctness first. 3791 // Note: "by value" is code for passing a structure by value, not 3792 // basic types. 3793 if (Flags.isByVal()) { 3794 // Note: Size includes alignment padding, so 3795 // struct x { short a; char b; } 3796 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 3797 // These are the proper values we need for right-justifying the 3798 // aggregate in a parameter register. 3799 unsigned Size = Flags.getByValSize(); 3800 3801 // An empty aggregate parameter takes up no storage and no 3802 // registers. 3803 if (Size == 0) 3804 continue; 3805 3806 // All aggregates smaller than 8 bytes must be passed right-justified. 3807 if (Size==1 || Size==2 || Size==4) { 3808 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 3809 if (GPR_idx != NumGPRs) { 3810 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 3811 MachinePointerInfo(), VT, 3812 false, false, 0); 3813 MemOpChains.push_back(Load.getValue(1)); 3814 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3815 3816 ArgOffset += PtrByteSize; 3817 continue; 3818 } 3819 } 3820 3821 if (GPR_idx == NumGPRs && Size < 8) { 3822 SDValue Const = DAG.getConstant(PtrByteSize - Size, 3823 PtrOff.getValueType()); 3824 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3825 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3826 CallSeqStart, 3827 Flags, DAG, dl); 3828 ArgOffset += PtrByteSize; 3829 continue; 3830 } 3831 // Copy entire object into memory. There are cases where gcc-generated 3832 // code assumes it is there, even if it could be put entirely into 3833 // registers. (This is not what the doc says.) 3834 3835 // FIXME: The above statement is likely due to a misunderstanding of the 3836 // documents. All arguments must be copied into the parameter area BY 3837 // THE CALLEE in the event that the callee takes the address of any 3838 // formal argument. That has not yet been implemented. However, it is 3839 // reasonable to use the stack area as a staging area for the register 3840 // load. 3841 3842 // Skip this for small aggregates, as we will use the same slot for a 3843 // right-justified copy, below. 3844 if (Size >= 8) 3845 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 3846 CallSeqStart, 3847 Flags, DAG, dl); 3848 3849 // When a register is available, pass a small aggregate right-justified. 3850 if (Size < 8 && GPR_idx != NumGPRs) { 3851 // The easiest way to get this right-justified in a register 3852 // is to copy the structure into the rightmost portion of a 3853 // local variable slot, then load the whole slot into the 3854 // register. 3855 // FIXME: The memcpy seems to produce pretty awful code for 3856 // small aggregates, particularly for packed ones. 3857 // FIXME: It would be preferable to use the slot in the 3858 // parameter save area instead of a new local variable. 3859 SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); 3860 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 3861 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 3862 CallSeqStart, 3863 Flags, DAG, dl); 3864 3865 // Load the slot into the register. 3866 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 3867 MachinePointerInfo(), 3868 false, false, false, 0); 3869 MemOpChains.push_back(Load.getValue(1)); 3870 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3871 3872 // Done with this argument. 3873 ArgOffset += PtrByteSize; 3874 continue; 3875 } 3876 3877 // For aggregates larger than PtrByteSize, copy the pieces of the 3878 // object that fit into registers from the parameter save area. 3879 for (unsigned j=0; j<Size; j+=PtrByteSize) { 3880 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 3881 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 3882 if (GPR_idx != NumGPRs) { 3883 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 3884 MachinePointerInfo(), 3885 false, false, false, 0); 3886 MemOpChains.push_back(Load.getValue(1)); 3887 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3888 ArgOffset += PtrByteSize; 3889 } else { 3890 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 3891 break; 3892 } 3893 } 3894 continue; 3895 } 3896 3897 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 3898 default: llvm_unreachable("Unexpected ValueType for argument!"); 3899 case MVT::i32: 3900 case MVT::i64: 3901 if (GPR_idx != NumGPRs) { 3902 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 3903 } else { 3904 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3905 true, isTailCall, false, MemOpChains, 3906 TailCallArguments, dl); 3907 } 3908 ArgOffset += PtrByteSize; 3909 break; 3910 case MVT::f32: 3911 case MVT::f64: 3912 if (FPR_idx != NumFPRs) { 3913 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 3914 3915 if (isVarArg) { 3916 // A single float or an aggregate containing only a single float 3917 // must be passed right-justified in the stack doubleword, and 3918 // in the GPR, if one is available. 3919 SDValue StoreOff; 3920 if (Arg.getValueType().getSimpleVT().SimpleTy == MVT::f32) { 3921 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3922 StoreOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3923 } else 3924 StoreOff = PtrOff; 3925 3926 SDValue Store = DAG.getStore(Chain, dl, Arg, StoreOff, 3927 MachinePointerInfo(), false, false, 0); 3928 MemOpChains.push_back(Store); 3929 3930 // Float varargs are always shadowed in available integer registers 3931 if (GPR_idx != NumGPRs) { 3932 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 3933 MachinePointerInfo(), false, false, 3934 false, 0); 3935 MemOpChains.push_back(Load.getValue(1)); 3936 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3937 } 3938 } else if (GPR_idx != NumGPRs) 3939 // If we have any FPRs remaining, we may also have GPRs remaining. 3940 ++GPR_idx; 3941 } else { 3942 // Single-precision floating-point values are mapped to the 3943 // second (rightmost) word of the stack doubleword. 3944 if (Arg.getValueType() == MVT::f32) { 3945 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 3946 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 3947 } 3948 3949 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 3950 true, isTailCall, false, MemOpChains, 3951 TailCallArguments, dl); 3952 } 3953 ArgOffset += 8; 3954 break; 3955 case MVT::v4f32: 3956 case MVT::v4i32: 3957 case MVT::v8i16: 3958 case MVT::v16i8: 3959 if (isVarArg) { 3960 // These go aligned on the stack, or in the corresponding R registers 3961 // when within range. The Darwin PPC ABI doc claims they also go in 3962 // V registers; in fact gcc does this only for arguments that are 3963 // prototyped, not for those that match the ... We do it for all 3964 // arguments, seems to work. 3965 while (ArgOffset % 16 !=0) { 3966 ArgOffset += PtrByteSize; 3967 if (GPR_idx != NumGPRs) 3968 GPR_idx++; 3969 } 3970 // We could elide this store in the case where the object fits 3971 // entirely in R registers. Maybe later. 3972 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 3973 DAG.getConstant(ArgOffset, PtrVT)); 3974 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 3975 MachinePointerInfo(), false, false, 0); 3976 MemOpChains.push_back(Store); 3977 if (VR_idx != NumVRs) { 3978 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 3979 MachinePointerInfo(), 3980 false, false, false, 0); 3981 MemOpChains.push_back(Load.getValue(1)); 3982 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 3983 } 3984 ArgOffset += 16; 3985 for (unsigned i=0; i<16; i+=PtrByteSize) { 3986 if (GPR_idx == NumGPRs) 3987 break; 3988 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 3989 DAG.getConstant(i, PtrVT)); 3990 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 3991 false, false, false, 0); 3992 MemOpChains.push_back(Load.getValue(1)); 3993 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 3994 } 3995 break; 3996 } 3997 3998 // Non-varargs Altivec params generally go in registers, but have 3999 // stack space allocated at the end. 4000 if (VR_idx != NumVRs) { 4001 // Doesn't have GPR space allocated. 4002 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4003 } else { 4004 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4005 true, isTailCall, true, MemOpChains, 4006 TailCallArguments, dl); 4007 ArgOffset += 16; 4008 } 4009 break; 4010 } 4011 } 4012 4013 if (!MemOpChains.empty()) 4014 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4015 &MemOpChains[0], MemOpChains.size()); 4016 4017 // Check if this is an indirect call (MTCTR/BCTRL). 4018 // See PrepareCall() for more information about calls through function 4019 // pointers in the 64-bit SVR4 ABI. 4020 if (!isTailCall && 4021 !dyn_cast<GlobalAddressSDNode>(Callee) && 4022 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4023 !isBLACompatibleAddress(Callee, DAG)) { 4024 // Load r2 into a virtual register and store it to the TOC save area. 4025 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 4026 // TOC save area offset. 4027 SDValue PtrOff = DAG.getIntPtrConstant(40); 4028 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4029 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), 4030 false, false, 0); 4031 // R12 must contain the address of an indirect callee. This does not 4032 // mean the MTCTR instruction must use R12; it's easier to model this 4033 // as an extra parameter, so do that. 4034 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 4035 } 4036 4037 // Build a sequence of copy-to-reg nodes chained together with token chain 4038 // and flag operands which copy the outgoing args into the appropriate regs. 4039 SDValue InFlag; 4040 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4041 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4042 RegsToPass[i].second, InFlag); 4043 InFlag = Chain.getValue(1); 4044 } 4045 4046 if (isTailCall) 4047 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 4048 FPOp, true, TailCallArguments); 4049 4050 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4051 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4052 Ins, InVals); 4053} 4054 4055SDValue 4056PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 4057 CallingConv::ID CallConv, bool isVarArg, 4058 bool isTailCall, 4059 const SmallVectorImpl<ISD::OutputArg> &Outs, 4060 const SmallVectorImpl<SDValue> &OutVals, 4061 const SmallVectorImpl<ISD::InputArg> &Ins, 4062 SDLoc dl, SelectionDAG &DAG, 4063 SmallVectorImpl<SDValue> &InVals) const { 4064 4065 unsigned NumOps = Outs.size(); 4066 4067 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4068 bool isPPC64 = PtrVT == MVT::i64; 4069 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4070 4071 MachineFunction &MF = DAG.getMachineFunction(); 4072 4073 // Mark this function as potentially containing a function that contains a 4074 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4075 // and restoring the callers stack pointer in this functions epilog. This is 4076 // done because by tail calling the called function might overwrite the value 4077 // in this function's (MF) stack pointer stack slot 0(SP). 4078 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4079 CallConv == CallingConv::Fast) 4080 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4081 4082 unsigned nAltivecParamsAtEnd = 0; 4083 4084 // Count how many bytes are to be pushed on the stack, including the linkage 4085 // area, and parameter passing area. We start with 24/48 bytes, which is 4086 // prereserved space for [SP][CR][LR][3 x unused]. 4087 unsigned NumBytes = 4088 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv, 4089 Outs, OutVals, 4090 nAltivecParamsAtEnd); 4091 4092 // Calculate by how many bytes the stack has to be adjusted in case of tail 4093 // call optimization. 4094 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4095 4096 // To protect arguments on the stack from being clobbered in a tail call, 4097 // force all the loads to happen before doing any other lowering. 4098 if (isTailCall) 4099 Chain = DAG.getStackArgumentTokenFactor(Chain); 4100 4101 // Adjust the stack pointer for the new arguments... 4102 // These operations are automatically eliminated by the prolog/epilog pass 4103 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 4104 dl); 4105 SDValue CallSeqStart = Chain; 4106 4107 // Load the return address and frame pointer so it can be move somewhere else 4108 // later. 4109 SDValue LROp, FPOp; 4110 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4111 dl); 4112 4113 // Set up a copy of the stack pointer for use loading and storing any 4114 // arguments that may not fit in the registers available for argument 4115 // passing. 4116 SDValue StackPtr; 4117 if (isPPC64) 4118 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4119 else 4120 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4121 4122 // Figure out which arguments are going to go in registers, and which in 4123 // memory. Also, if this is a vararg function, floating point operations 4124 // must be stored to our stack, and loaded into integer regs as well, if 4125 // any integer regs are available for argument passing. 4126 unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true); 4127 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4128 4129 static const uint16_t GPR_32[] = { // 32-bit registers. 4130 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4131 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4132 }; 4133 static const uint16_t GPR_64[] = { // 64-bit registers. 4134 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4135 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4136 }; 4137 static const uint16_t *FPR = GetFPR(); 4138 4139 static const uint16_t VR[] = { 4140 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4141 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4142 }; 4143 const unsigned NumGPRs = array_lengthof(GPR_32); 4144 const unsigned NumFPRs = 13; 4145 const unsigned NumVRs = array_lengthof(VR); 4146 4147 const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32; 4148 4149 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4150 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4151 4152 SmallVector<SDValue, 8> MemOpChains; 4153 for (unsigned i = 0; i != NumOps; ++i) { 4154 SDValue Arg = OutVals[i]; 4155 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4156 4157 // PtrOff will be used to store the current argument to the stack if a 4158 // register cannot be found for it. 4159 SDValue PtrOff; 4160 4161 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 4162 4163 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4164 4165 // On PPC64, promote integers to 64-bit values. 4166 if (isPPC64 && Arg.getValueType() == MVT::i32) { 4167 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4168 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4169 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4170 } 4171 4172 // FIXME memcpy is used way more than necessary. Correctness first. 4173 // Note: "by value" is code for passing a structure by value, not 4174 // basic types. 4175 if (Flags.isByVal()) { 4176 unsigned Size = Flags.getByValSize(); 4177 // Very small objects are passed right-justified. Everything else is 4178 // passed left-justified. 4179 if (Size==1 || Size==2) { 4180 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 4181 if (GPR_idx != NumGPRs) { 4182 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4183 MachinePointerInfo(), VT, 4184 false, false, 0); 4185 MemOpChains.push_back(Load.getValue(1)); 4186 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4187 4188 ArgOffset += PtrByteSize; 4189 } else { 4190 SDValue Const = DAG.getConstant(PtrByteSize - Size, 4191 PtrOff.getValueType()); 4192 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4193 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 4194 CallSeqStart, 4195 Flags, DAG, dl); 4196 ArgOffset += PtrByteSize; 4197 } 4198 continue; 4199 } 4200 // Copy entire object into memory. There are cases where gcc-generated 4201 // code assumes it is there, even if it could be put entirely into 4202 // registers. (This is not what the doc says.) 4203 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 4204 CallSeqStart, 4205 Flags, DAG, dl); 4206 4207 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 4208 // copy the pieces of the object that fit into registers from the 4209 // parameter save area. 4210 for (unsigned j=0; j<Size; j+=PtrByteSize) { 4211 SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); 4212 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 4213 if (GPR_idx != NumGPRs) { 4214 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 4215 MachinePointerInfo(), 4216 false, false, false, 0); 4217 MemOpChains.push_back(Load.getValue(1)); 4218 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4219 ArgOffset += PtrByteSize; 4220 } else { 4221 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 4222 break; 4223 } 4224 } 4225 continue; 4226 } 4227 4228 switch (Arg.getValueType().getSimpleVT().SimpleTy) { 4229 default: llvm_unreachable("Unexpected ValueType for argument!"); 4230 case MVT::i32: 4231 case MVT::i64: 4232 if (GPR_idx != NumGPRs) { 4233 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 4234 } else { 4235 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4236 isPPC64, isTailCall, false, MemOpChains, 4237 TailCallArguments, dl); 4238 } 4239 ArgOffset += PtrByteSize; 4240 break; 4241 case MVT::f32: 4242 case MVT::f64: 4243 if (FPR_idx != NumFPRs) { 4244 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 4245 4246 if (isVarArg) { 4247 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4248 MachinePointerInfo(), false, false, 0); 4249 MemOpChains.push_back(Store); 4250 4251 // Float varargs are always shadowed in available integer registers 4252 if (GPR_idx != NumGPRs) { 4253 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4254 MachinePointerInfo(), false, false, 4255 false, 0); 4256 MemOpChains.push_back(Load.getValue(1)); 4257 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4258 } 4259 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 4260 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); 4261 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 4262 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 4263 MachinePointerInfo(), 4264 false, false, false, 0); 4265 MemOpChains.push_back(Load.getValue(1)); 4266 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4267 } 4268 } else { 4269 // If we have any FPRs remaining, we may also have GPRs remaining. 4270 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 4271 // GPRs. 4272 if (GPR_idx != NumGPRs) 4273 ++GPR_idx; 4274 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 4275 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 4276 ++GPR_idx; 4277 } 4278 } else 4279 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4280 isPPC64, isTailCall, false, MemOpChains, 4281 TailCallArguments, dl); 4282 if (isPPC64) 4283 ArgOffset += 8; 4284 else 4285 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 4286 break; 4287 case MVT::v4f32: 4288 case MVT::v4i32: 4289 case MVT::v8i16: 4290 case MVT::v16i8: 4291 if (isVarArg) { 4292 // These go aligned on the stack, or in the corresponding R registers 4293 // when within range. The Darwin PPC ABI doc claims they also go in 4294 // V registers; in fact gcc does this only for arguments that are 4295 // prototyped, not for those that match the ... We do it for all 4296 // arguments, seems to work. 4297 while (ArgOffset % 16 !=0) { 4298 ArgOffset += PtrByteSize; 4299 if (GPR_idx != NumGPRs) 4300 GPR_idx++; 4301 } 4302 // We could elide this store in the case where the object fits 4303 // entirely in R registers. Maybe later. 4304 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4305 DAG.getConstant(ArgOffset, PtrVT)); 4306 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 4307 MachinePointerInfo(), false, false, 0); 4308 MemOpChains.push_back(Store); 4309 if (VR_idx != NumVRs) { 4310 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 4311 MachinePointerInfo(), 4312 false, false, false, 0); 4313 MemOpChains.push_back(Load.getValue(1)); 4314 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 4315 } 4316 ArgOffset += 16; 4317 for (unsigned i=0; i<16; i+=PtrByteSize) { 4318 if (GPR_idx == NumGPRs) 4319 break; 4320 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 4321 DAG.getConstant(i, PtrVT)); 4322 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 4323 false, false, false, 0); 4324 MemOpChains.push_back(Load.getValue(1)); 4325 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4326 } 4327 break; 4328 } 4329 4330 // Non-varargs Altivec params generally go in registers, but have 4331 // stack space allocated at the end. 4332 if (VR_idx != NumVRs) { 4333 // Doesn't have GPR space allocated. 4334 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 4335 } else if (nAltivecParamsAtEnd==0) { 4336 // We are emitting Altivec params in order. 4337 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4338 isPPC64, isTailCall, true, MemOpChains, 4339 TailCallArguments, dl); 4340 ArgOffset += 16; 4341 } 4342 break; 4343 } 4344 } 4345 // If all Altivec parameters fit in registers, as they usually do, 4346 // they get stack space following the non-Altivec parameters. We 4347 // don't track this here because nobody below needs it. 4348 // If there are more Altivec parameters than fit in registers emit 4349 // the stores here. 4350 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 4351 unsigned j = 0; 4352 // Offset is aligned; skip 1st 12 params which go in V registers. 4353 ArgOffset = ((ArgOffset+15)/16)*16; 4354 ArgOffset += 12*16; 4355 for (unsigned i = 0; i != NumOps; ++i) { 4356 SDValue Arg = OutVals[i]; 4357 EVT ArgType = Outs[i].VT; 4358 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 4359 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 4360 if (++j > NumVRs) { 4361 SDValue PtrOff; 4362 // We are emitting Altivec params in order. 4363 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 4364 isPPC64, isTailCall, true, MemOpChains, 4365 TailCallArguments, dl); 4366 ArgOffset += 16; 4367 } 4368 } 4369 } 4370 } 4371 4372 if (!MemOpChains.empty()) 4373 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4374 &MemOpChains[0], MemOpChains.size()); 4375 4376 // On Darwin, R12 must contain the address of an indirect callee. This does 4377 // not mean the MTCTR instruction must use R12; it's easier to model this as 4378 // an extra parameter, so do that. 4379 if (!isTailCall && 4380 !dyn_cast<GlobalAddressSDNode>(Callee) && 4381 !dyn_cast<ExternalSymbolSDNode>(Callee) && 4382 !isBLACompatibleAddress(Callee, DAG)) 4383 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 4384 PPC::R12), Callee)); 4385 4386 // Build a sequence of copy-to-reg nodes chained together with token chain 4387 // and flag operands which copy the outgoing args into the appropriate regs. 4388 SDValue InFlag; 4389 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4390 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4391 RegsToPass[i].second, InFlag); 4392 InFlag = Chain.getValue(1); 4393 } 4394 4395 if (isTailCall) 4396 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 4397 FPOp, true, TailCallArguments); 4398 4399 return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, 4400 RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, 4401 Ins, InVals); 4402} 4403 4404bool 4405PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 4406 MachineFunction &MF, bool isVarArg, 4407 const SmallVectorImpl<ISD::OutputArg> &Outs, 4408 LLVMContext &Context) const { 4409 SmallVector<CCValAssign, 16> RVLocs; 4410 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 4411 RVLocs, Context); 4412 return CCInfo.CheckReturn(Outs, RetCC_PPC); 4413} 4414 4415SDValue 4416PPCTargetLowering::LowerReturn(SDValue Chain, 4417 CallingConv::ID CallConv, bool isVarArg, 4418 const SmallVectorImpl<ISD::OutputArg> &Outs, 4419 const SmallVectorImpl<SDValue> &OutVals, 4420 SDLoc dl, SelectionDAG &DAG) const { 4421 4422 SmallVector<CCValAssign, 16> RVLocs; 4423 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4424 getTargetMachine(), RVLocs, *DAG.getContext()); 4425 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 4426 4427 SDValue Flag; 4428 SmallVector<SDValue, 4> RetOps(1, Chain); 4429 4430 // Copy the result values into the output registers. 4431 for (unsigned i = 0; i != RVLocs.size(); ++i) { 4432 CCValAssign &VA = RVLocs[i]; 4433 assert(VA.isRegLoc() && "Can only return in registers!"); 4434 4435 SDValue Arg = OutVals[i]; 4436 4437 switch (VA.getLocInfo()) { 4438 default: llvm_unreachable("Unknown loc info!"); 4439 case CCValAssign::Full: break; 4440 case CCValAssign::AExt: 4441 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 4442 break; 4443 case CCValAssign::ZExt: 4444 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 4445 break; 4446 case CCValAssign::SExt: 4447 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 4448 break; 4449 } 4450 4451 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 4452 Flag = Chain.getValue(1); 4453 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 4454 } 4455 4456 RetOps[0] = Chain; // Update chain. 4457 4458 // Add the flag if we have it. 4459 if (Flag.getNode()) 4460 RetOps.push_back(Flag); 4461 4462 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, 4463 &RetOps[0], RetOps.size()); 4464} 4465 4466SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 4467 const PPCSubtarget &Subtarget) const { 4468 // When we pop the dynamic allocation we need to restore the SP link. 4469 SDLoc dl(Op); 4470 4471 // Get the corect type for pointers. 4472 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4473 4474 // Construct the stack pointer operand. 4475 bool isPPC64 = Subtarget.isPPC64(); 4476 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 4477 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 4478 4479 // Get the operands for the STACKRESTORE. 4480 SDValue Chain = Op.getOperand(0); 4481 SDValue SaveSP = Op.getOperand(1); 4482 4483 // Load the old link SP. 4484 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 4485 MachinePointerInfo(), 4486 false, false, false, 0); 4487 4488 // Restore the stack pointer. 4489 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 4490 4491 // Store the old link SP. 4492 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 4493 false, false, 0); 4494} 4495 4496 4497 4498SDValue 4499PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { 4500 MachineFunction &MF = DAG.getMachineFunction(); 4501 bool isPPC64 = PPCSubTarget.isPPC64(); 4502 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4503 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4504 4505 // Get current frame pointer save index. The users of this index will be 4506 // primarily DYNALLOC instructions. 4507 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4508 int RASI = FI->getReturnAddrSaveIndex(); 4509 4510 // If the frame pointer save index hasn't been defined yet. 4511 if (!RASI) { 4512 // Find out what the fix offset of the frame pointer save area. 4513 int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI); 4514 // Allocate the frame index for frame pointer save area. 4515 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true); 4516 // Save the result. 4517 FI->setReturnAddrSaveIndex(RASI); 4518 } 4519 return DAG.getFrameIndex(RASI, PtrVT); 4520} 4521 4522SDValue 4523PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 4524 MachineFunction &MF = DAG.getMachineFunction(); 4525 bool isPPC64 = PPCSubTarget.isPPC64(); 4526 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 4527 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4528 4529 // Get current frame pointer save index. The users of this index will be 4530 // primarily DYNALLOC instructions. 4531 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 4532 int FPSI = FI->getFramePointerSaveIndex(); 4533 4534 // If the frame pointer save index hasn't been defined yet. 4535 if (!FPSI) { 4536 // Find out what the fix offset of the frame pointer save area. 4537 int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64, 4538 isDarwinABI); 4539 4540 // Allocate the frame index for frame pointer save area. 4541 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 4542 // Save the result. 4543 FI->setFramePointerSaveIndex(FPSI); 4544 } 4545 return DAG.getFrameIndex(FPSI, PtrVT); 4546} 4547 4548SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 4549 SelectionDAG &DAG, 4550 const PPCSubtarget &Subtarget) const { 4551 // Get the inputs. 4552 SDValue Chain = Op.getOperand(0); 4553 SDValue Size = Op.getOperand(1); 4554 SDLoc dl(Op); 4555 4556 // Get the corect type for pointers. 4557 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4558 // Negate the size. 4559 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 4560 DAG.getConstant(0, PtrVT), Size); 4561 // Construct a node for the frame pointer save index. 4562 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 4563 // Build a DYNALLOC node. 4564 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 4565 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 4566 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3); 4567} 4568 4569SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 4570 SelectionDAG &DAG) const { 4571 SDLoc DL(Op); 4572 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 4573 DAG.getVTList(MVT::i32, MVT::Other), 4574 Op.getOperand(0), Op.getOperand(1)); 4575} 4576 4577SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 4578 SelectionDAG &DAG) const { 4579 SDLoc DL(Op); 4580 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 4581 Op.getOperand(0), Op.getOperand(1)); 4582} 4583 4584/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 4585/// possible. 4586SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4587 // Not FP? Not a fsel. 4588 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 4589 !Op.getOperand(2).getValueType().isFloatingPoint()) 4590 return Op; 4591 4592 // We might be able to do better than this under some circumstances, but in 4593 // general, fsel-based lowering of select is a finite-math-only optimization. 4594 // For more information, see section F.3 of the 2.06 ISA specification. 4595 if (!DAG.getTarget().Options.NoInfsFPMath || 4596 !DAG.getTarget().Options.NoNaNsFPMath) 4597 return Op; 4598 4599 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4600 4601 EVT ResVT = Op.getValueType(); 4602 EVT CmpVT = Op.getOperand(0).getValueType(); 4603 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 4604 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 4605 SDLoc dl(Op); 4606 4607 // If the RHS of the comparison is a 0.0, we don't need to do the 4608 // subtraction at all. 4609 SDValue Sel1; 4610 if (isFloatingPointZero(RHS)) 4611 switch (CC) { 4612 default: break; // SETUO etc aren't handled by fsel. 4613 case ISD::SETNE: 4614 std::swap(TV, FV); 4615 case ISD::SETEQ: 4616 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4617 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4618 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4619 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4620 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4621 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4622 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 4623 case ISD::SETULT: 4624 case ISD::SETLT: 4625 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4626 case ISD::SETOGE: 4627 case ISD::SETGE: 4628 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4629 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4630 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 4631 case ISD::SETUGT: 4632 case ISD::SETGT: 4633 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 4634 case ISD::SETOLE: 4635 case ISD::SETLE: 4636 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 4637 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 4638 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4639 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 4640 } 4641 4642 SDValue Cmp; 4643 switch (CC) { 4644 default: break; // SETUO etc aren't handled by fsel. 4645 case ISD::SETNE: 4646 std::swap(TV, FV); 4647 case ISD::SETEQ: 4648 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4649 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4650 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4651 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4652 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 4653 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 4654 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 4655 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 4656 case ISD::SETULT: 4657 case ISD::SETLT: 4658 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4659 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4660 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4661 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4662 case ISD::SETOGE: 4663 case ISD::SETGE: 4664 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS); 4665 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4666 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4667 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4668 case ISD::SETUGT: 4669 case ISD::SETGT: 4670 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4671 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4672 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4673 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 4674 case ISD::SETOLE: 4675 case ISD::SETLE: 4676 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS); 4677 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 4678 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 4679 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 4680 } 4681 return Op; 4682} 4683 4684// FIXME: Split this code up when LegalizeDAGTypes lands. 4685SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 4686 SDLoc dl) const { 4687 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 4688 SDValue Src = Op.getOperand(0); 4689 4690 // If we have a long double here, it must be that we have an undef of 4691 // that type. In this case return an undef of the target type. 4692 if (Src.getValueType() == MVT::ppcf128) { 4693 assert(Src.getOpcode() == ISD::UNDEF && "Unhandled ppcf128!"); 4694 return DAG.getNode(ISD::UNDEF, dl, 4695 Op.getValueType().getSimpleVT().SimpleTy); 4696 } 4697 4698 if (Src.getValueType() == MVT::f32) 4699 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 4700 4701 SDValue Tmp; 4702 switch (Op.getValueType().getSimpleVT().SimpleTy) { 4703 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 4704 case MVT::i32: 4705 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : 4706 (PPCSubTarget.hasFPCVT() ? PPCISD::FCTIWUZ : 4707 PPCISD::FCTIDZ), 4708 dl, MVT::f64, Src); 4709 break; 4710 case MVT::i64: 4711 assert((Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()) && 4712 "i64 FP_TO_UINT is supported only with FPCVT"); 4713 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 4714 PPCISD::FCTIDUZ, 4715 dl, MVT::f64, Src); 4716 break; 4717 } 4718 4719 // Convert the FP value to an int value through memory. 4720 bool i32Stack = Op.getValueType() == MVT::i32 && PPCSubTarget.hasSTFIWX() && 4721 (Op.getOpcode() == ISD::FP_TO_SINT || PPCSubTarget.hasFPCVT()); 4722 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 4723 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 4724 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(FI); 4725 4726 // Emit a store to the stack slot. 4727 SDValue Chain; 4728 if (i32Stack) { 4729 MachineFunction &MF = DAG.getMachineFunction(); 4730 MachineMemOperand *MMO = 4731 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4); 4732 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 4733 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 4734 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 4735 MVT::i32, MMO); 4736 } else 4737 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, 4738 MPI, false, false, 0); 4739 4740 // Result is a load from the stack slot. If loading 4 bytes, make sure to 4741 // add in a bias. 4742 if (Op.getValueType() == MVT::i32 && !i32Stack) { 4743 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 4744 DAG.getConstant(4, FIPtr.getValueType())); 4745 MPI = MachinePointerInfo(); 4746 } 4747 4748 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MPI, 4749 false, false, false, 0); 4750} 4751 4752SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 4753 SelectionDAG &DAG) const { 4754 SDLoc dl(Op); 4755 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 4756 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 4757 return SDValue(); 4758 4759 assert((Op.getOpcode() == ISD::SINT_TO_FP || PPCSubTarget.hasFPCVT()) && 4760 "UINT_TO_FP is supported only with FPCVT"); 4761 4762 // If we have FCFIDS, then use it when converting to single-precision. 4763 // Otherwise, convert to double-precision and then round. 4764 unsigned FCFOp = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4765 (Op.getOpcode() == ISD::UINT_TO_FP ? 4766 PPCISD::FCFIDUS : PPCISD::FCFIDS) : 4767 (Op.getOpcode() == ISD::UINT_TO_FP ? 4768 PPCISD::FCFIDU : PPCISD::FCFID); 4769 MVT FCFTy = (PPCSubTarget.hasFPCVT() && Op.getValueType() == MVT::f32) ? 4770 MVT::f32 : MVT::f64; 4771 4772 if (Op.getOperand(0).getValueType() == MVT::i64) { 4773 SDValue SINT = Op.getOperand(0); 4774 // When converting to single-precision, we actually need to convert 4775 // to double-precision first and then round to single-precision. 4776 // To avoid double-rounding effects during that operation, we have 4777 // to prepare the input operand. Bits that might be truncated when 4778 // converting to double-precision are replaced by a bit that won't 4779 // be lost at this stage, but is below the single-precision rounding 4780 // position. 4781 // 4782 // However, if -enable-unsafe-fp-math is in effect, accept double 4783 // rounding to avoid the extra overhead. 4784 if (Op.getValueType() == MVT::f32 && 4785 !PPCSubTarget.hasFPCVT() && 4786 !DAG.getTarget().Options.UnsafeFPMath) { 4787 4788 // Twiddle input to make sure the low 11 bits are zero. (If this 4789 // is the case, we are guaranteed the value will fit into the 53 bit 4790 // mantissa of an IEEE double-precision value without rounding.) 4791 // If any of those low 11 bits were not zero originally, make sure 4792 // bit 12 (value 2048) is set instead, so that the final rounding 4793 // to single-precision gets the correct result. 4794 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4795 SINT, DAG.getConstant(2047, MVT::i64)); 4796 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 4797 Round, DAG.getConstant(2047, MVT::i64)); 4798 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 4799 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 4800 Round, DAG.getConstant(-2048, MVT::i64)); 4801 4802 // However, we cannot use that value unconditionally: if the magnitude 4803 // of the input value is small, the bit-twiddling we did above might 4804 // end up visibly changing the output. Fortunately, in that case, we 4805 // don't need to twiddle bits since the original input will convert 4806 // exactly to double-precision floating-point already. Therefore, 4807 // construct a conditional to use the original value if the top 11 4808 // bits are all sign-bit copies, and use the rounded value computed 4809 // above otherwise. 4810 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 4811 SINT, DAG.getConstant(53, MVT::i32)); 4812 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 4813 Cond, DAG.getConstant(1, MVT::i64)); 4814 Cond = DAG.getSetCC(dl, MVT::i32, 4815 Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); 4816 4817 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 4818 } 4819 4820 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 4821 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 4822 4823 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4824 FP = DAG.getNode(ISD::FP_ROUND, dl, 4825 MVT::f32, FP, DAG.getIntPtrConstant(0)); 4826 return FP; 4827 } 4828 4829 assert(Op.getOperand(0).getValueType() == MVT::i32 && 4830 "Unhandled INT_TO_FP type in custom expander!"); 4831 // Since we only generate this in 64-bit mode, we can take advantage of 4832 // 64-bit registers. In particular, sign extend the input value into the 4833 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 4834 // then lfd it and fcfid it. 4835 MachineFunction &MF = DAG.getMachineFunction(); 4836 MachineFrameInfo *FrameInfo = MF.getFrameInfo(); 4837 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4838 4839 SDValue Ld; 4840 if (PPCSubTarget.hasLFIWAX() || PPCSubTarget.hasFPCVT()) { 4841 int FrameIdx = FrameInfo->CreateStackObject(4, 4, false); 4842 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4843 4844 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 4845 MachinePointerInfo::getFixedStack(FrameIdx), 4846 false, false, 0); 4847 4848 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 4849 "Expected an i32 store"); 4850 MachineMemOperand *MMO = 4851 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 4852 MachineMemOperand::MOLoad, 4, 4); 4853 SDValue Ops[] = { Store, FIdx }; 4854 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 4855 PPCISD::LFIWZX : PPCISD::LFIWAX, 4856 dl, DAG.getVTList(MVT::f64, MVT::Other), 4857 Ops, 2, MVT::i32, MMO); 4858 } else { 4859 assert(PPCSubTarget.isPPC64() && 4860 "i32->FP without LFIWAX supported only on PPC64"); 4861 4862 int FrameIdx = FrameInfo->CreateStackObject(8, 8, false); 4863 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 4864 4865 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 4866 Op.getOperand(0)); 4867 4868 // STD the extended value into the stack slot. 4869 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Ext64, FIdx, 4870 MachinePointerInfo::getFixedStack(FrameIdx), 4871 false, false, 0); 4872 4873 // Load the value as a double. 4874 Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, 4875 MachinePointerInfo::getFixedStack(FrameIdx), 4876 false, false, false, 0); 4877 } 4878 4879 // FCFID it and return it. 4880 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 4881 if (Op.getValueType() == MVT::f32 && !PPCSubTarget.hasFPCVT()) 4882 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0)); 4883 return FP; 4884} 4885 4886SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4887 SelectionDAG &DAG) const { 4888 SDLoc dl(Op); 4889 /* 4890 The rounding mode is in bits 30:31 of FPSR, and has the following 4891 settings: 4892 00 Round to nearest 4893 01 Round to 0 4894 10 Round to +inf 4895 11 Round to -inf 4896 4897 FLT_ROUNDS, on the other hand, expects the following: 4898 -1 Undefined 4899 0 Round to 0 4900 1 Round to nearest 4901 2 Round to +inf 4902 3 Round to -inf 4903 4904 To perform the conversion, we do: 4905 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 4906 */ 4907 4908 MachineFunction &MF = DAG.getMachineFunction(); 4909 EVT VT = Op.getValueType(); 4910 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 4911 SDValue MFFSreg, InFlag; 4912 4913 // Save FP Control Word to register 4914 EVT NodeTys[] = { 4915 MVT::f64, // return register 4916 MVT::Glue // unused in this context 4917 }; 4918 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0); 4919 4920 // Save FP register to stack slot 4921 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); 4922 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 4923 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, 4924 StackSlot, MachinePointerInfo(), false, false,0); 4925 4926 // Load FP Control Word from low 32 bits of stack slot. 4927 SDValue Four = DAG.getConstant(4, PtrVT); 4928 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 4929 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(), 4930 false, false, false, 0); 4931 4932 // Transform as necessary 4933 SDValue CWD1 = 4934 DAG.getNode(ISD::AND, dl, MVT::i32, 4935 CWD, DAG.getConstant(3, MVT::i32)); 4936 SDValue CWD2 = 4937 DAG.getNode(ISD::SRL, dl, MVT::i32, 4938 DAG.getNode(ISD::AND, dl, MVT::i32, 4939 DAG.getNode(ISD::XOR, dl, MVT::i32, 4940 CWD, DAG.getConstant(3, MVT::i32)), 4941 DAG.getConstant(3, MVT::i32)), 4942 DAG.getConstant(1, MVT::i32)); 4943 4944 SDValue RetVal = 4945 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 4946 4947 return DAG.getNode((VT.getSizeInBits() < 16 ? 4948 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 4949} 4950 4951SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 4952 EVT VT = Op.getValueType(); 4953 unsigned BitWidth = VT.getSizeInBits(); 4954 SDLoc dl(Op); 4955 assert(Op.getNumOperands() == 3 && 4956 VT == Op.getOperand(1).getValueType() && 4957 "Unexpected SHL!"); 4958 4959 // Expand into a bunch of logical ops. Note that these ops 4960 // depend on the PPC behavior for oversized shift amounts. 4961 SDValue Lo = Op.getOperand(0); 4962 SDValue Hi = Op.getOperand(1); 4963 SDValue Amt = Op.getOperand(2); 4964 EVT AmtVT = Amt.getValueType(); 4965 4966 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 4967 DAG.getConstant(BitWidth, AmtVT), Amt); 4968 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 4969 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 4970 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 4971 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 4972 DAG.getConstant(-BitWidth, AmtVT)); 4973 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 4974 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 4975 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 4976 SDValue OutOps[] = { OutLo, OutHi }; 4977 return DAG.getMergeValues(OutOps, 2, dl); 4978} 4979 4980SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 4981 EVT VT = Op.getValueType(); 4982 SDLoc dl(Op); 4983 unsigned BitWidth = VT.getSizeInBits(); 4984 assert(Op.getNumOperands() == 3 && 4985 VT == Op.getOperand(1).getValueType() && 4986 "Unexpected SRL!"); 4987 4988 // Expand into a bunch of logical ops. Note that these ops 4989 // depend on the PPC behavior for oversized shift amounts. 4990 SDValue Lo = Op.getOperand(0); 4991 SDValue Hi = Op.getOperand(1); 4992 SDValue Amt = Op.getOperand(2); 4993 EVT AmtVT = Amt.getValueType(); 4994 4995 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 4996 DAG.getConstant(BitWidth, AmtVT), Amt); 4997 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 4998 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 4999 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5000 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5001 DAG.getConstant(-BitWidth, AmtVT)); 5002 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 5003 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 5004 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 5005 SDValue OutOps[] = { OutLo, OutHi }; 5006 return DAG.getMergeValues(OutOps, 2, dl); 5007} 5008 5009SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 5010 SDLoc dl(Op); 5011 EVT VT = Op.getValueType(); 5012 unsigned BitWidth = VT.getSizeInBits(); 5013 assert(Op.getNumOperands() == 3 && 5014 VT == Op.getOperand(1).getValueType() && 5015 "Unexpected SRA!"); 5016 5017 // Expand into a bunch of logical ops, followed by a select_cc. 5018 SDValue Lo = Op.getOperand(0); 5019 SDValue Hi = Op.getOperand(1); 5020 SDValue Amt = Op.getOperand(2); 5021 EVT AmtVT = Amt.getValueType(); 5022 5023 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 5024 DAG.getConstant(BitWidth, AmtVT), Amt); 5025 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 5026 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 5027 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 5028 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 5029 DAG.getConstant(-BitWidth, AmtVT)); 5030 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 5031 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 5032 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT), 5033 Tmp4, Tmp6, ISD::SETLE); 5034 SDValue OutOps[] = { OutLo, OutHi }; 5035 return DAG.getMergeValues(OutOps, 2, dl); 5036} 5037 5038//===----------------------------------------------------------------------===// 5039// Vector related lowering. 5040// 5041 5042/// BuildSplatI - Build a canonical splati of Val with an element size of 5043/// SplatSize. Cast the result to VT. 5044static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 5045 SelectionDAG &DAG, SDLoc dl) { 5046 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); 5047 5048 static const EVT VTys[] = { // canonical VT to use for each size. 5049 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 5050 }; 5051 5052 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 5053 5054 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 5055 if (Val == -1) 5056 SplatSize = 1; 5057 5058 EVT CanonicalVT = VTys[SplatSize-1]; 5059 5060 // Build a canonical splat for this value. 5061 SDValue Elt = DAG.getConstant(Val, MVT::i32); 5062 SmallVector<SDValue, 8> Ops; 5063 Ops.assign(CanonicalVT.getVectorNumElements(), Elt); 5064 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, 5065 &Ops[0], Ops.size()); 5066 return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res); 5067} 5068 5069/// BuildIntrinsicOp - Return a unary operator intrinsic node with the 5070/// specified intrinsic ID. 5071static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, 5072 SelectionDAG &DAG, SDLoc dl, 5073 EVT DestVT = MVT::Other) { 5074 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 5075 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5076 DAG.getConstant(IID, MVT::i32), Op); 5077} 5078 5079/// BuildIntrinsicOp - Return a binary operator intrinsic node with the 5080/// specified intrinsic ID. 5081static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 5082 SelectionDAG &DAG, SDLoc dl, 5083 EVT DestVT = MVT::Other) { 5084 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 5085 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5086 DAG.getConstant(IID, MVT::i32), LHS, RHS); 5087} 5088 5089/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 5090/// specified intrinsic ID. 5091static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 5092 SDValue Op2, SelectionDAG &DAG, 5093 SDLoc dl, EVT DestVT = MVT::Other) { 5094 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 5095 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 5096 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2); 5097} 5098 5099 5100/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 5101/// amount. The result has the specified value type. 5102static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, 5103 EVT VT, SelectionDAG &DAG, SDLoc dl) { 5104 // Force LHS/RHS to be the right type. 5105 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 5106 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 5107 5108 int Ops[16]; 5109 for (unsigned i = 0; i != 16; ++i) 5110 Ops[i] = i + Amt; 5111 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 5112 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5113} 5114 5115// If this is a case we can't handle, return null and let the default 5116// expansion code take care of it. If we CAN select this case, and if it 5117// selects to a single instruction, return Op. Otherwise, if we can codegen 5118// this case more efficiently than a constant pool load, lower it to the 5119// sequence of ops that should be used. 5120SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 5121 SelectionDAG &DAG) const { 5122 SDLoc dl(Op); 5123 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 5124 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 5125 5126 // Check if this is a splat of a constant value. 5127 APInt APSplatBits, APSplatUndef; 5128 unsigned SplatBitSize; 5129 bool HasAnyUndefs; 5130 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 5131 HasAnyUndefs, 0, true) || SplatBitSize > 32) 5132 return SDValue(); 5133 5134 unsigned SplatBits = APSplatBits.getZExtValue(); 5135 unsigned SplatUndef = APSplatUndef.getZExtValue(); 5136 unsigned SplatSize = SplatBitSize / 8; 5137 5138 // First, handle single instruction cases. 5139 5140 // All zeros? 5141 if (SplatBits == 0) { 5142 // Canonicalize all zero vectors to be v4i32. 5143 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 5144 SDValue Z = DAG.getConstant(0, MVT::i32); 5145 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); 5146 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 5147 } 5148 return Op; 5149 } 5150 5151 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 5152 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 5153 (32-SplatBitSize)); 5154 if (SextVal >= -16 && SextVal <= 15) 5155 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 5156 5157 5158 // Two instruction sequences. 5159 5160 // If this value is in the range [-32,30] and is even, use: 5161 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 5162 // If this value is in the range [17,31] and is odd, use: 5163 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 5164 // If this value is in the range [-31,-17] and is odd, use: 5165 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 5166 // Note the last two are three-instruction sequences. 5167 if (SextVal >= -32 && SextVal <= 31) { 5168 // To avoid having these optimizations undone by constant folding, 5169 // we convert to a pseudo that will be expanded later into one of 5170 // the above forms. 5171 SDValue Elt = DAG.getConstant(SextVal, MVT::i32); 5172 EVT VT = Op.getValueType(); 5173 int Size = VT == MVT::v16i8 ? 1 : (VT == MVT::v8i16 ? 2 : 4); 5174 SDValue EltSize = DAG.getConstant(Size, MVT::i32); 5175 return DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 5176 } 5177 5178 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 5179 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 5180 // for fneg/fabs. 5181 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 5182 // Make -1 and vspltisw -1: 5183 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 5184 5185 // Make the VSLW intrinsic, computing 0x8000_0000. 5186 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 5187 OnesV, DAG, dl); 5188 5189 // xor by OnesV to invert it. 5190 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 5191 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5192 } 5193 5194 // Check to see if this is a wide variety of vsplti*, binop self cases. 5195 static const signed char SplatCsts[] = { 5196 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 5197 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 5198 }; 5199 5200 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 5201 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 5202 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 5203 int i = SplatCsts[idx]; 5204 5205 // Figure out what shift amount will be used by altivec if shifted by i in 5206 // this splat size. 5207 unsigned TypeShiftAmt = i & (SplatBitSize-1); 5208 5209 // vsplti + shl self. 5210 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 5211 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5212 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5213 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 5214 Intrinsic::ppc_altivec_vslw 5215 }; 5216 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5217 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5218 } 5219 5220 // vsplti + srl self. 5221 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5222 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5223 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5224 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 5225 Intrinsic::ppc_altivec_vsrw 5226 }; 5227 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5228 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5229 } 5230 5231 // vsplti + sra self. 5232 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 5233 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5234 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5235 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 5236 Intrinsic::ppc_altivec_vsraw 5237 }; 5238 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5239 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5240 } 5241 5242 // vsplti + rol self. 5243 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 5244 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 5245 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 5246 static const unsigned IIDs[] = { // Intrinsic to use for each size. 5247 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 5248 Intrinsic::ppc_altivec_vrlw 5249 }; 5250 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 5251 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 5252 } 5253 5254 // t = vsplti c, result = vsldoi t, t, 1 5255 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 5256 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5257 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); 5258 } 5259 // t = vsplti c, result = vsldoi t, t, 2 5260 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 5261 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5262 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); 5263 } 5264 // t = vsplti c, result = vsldoi t, t, 3 5265 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 5266 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 5267 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); 5268 } 5269 } 5270 5271 return SDValue(); 5272} 5273 5274/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5275/// the specified operations to build the shuffle. 5276static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5277 SDValue RHS, SelectionDAG &DAG, 5278 SDLoc dl) { 5279 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5280 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5281 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5282 5283 enum { 5284 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5285 OP_VMRGHW, 5286 OP_VMRGLW, 5287 OP_VSPLTISW0, 5288 OP_VSPLTISW1, 5289 OP_VSPLTISW2, 5290 OP_VSPLTISW3, 5291 OP_VSLDOI4, 5292 OP_VSLDOI8, 5293 OP_VSLDOI12 5294 }; 5295 5296 if (OpNum == OP_COPY) { 5297 if (LHSID == (1*9+2)*9+3) return LHS; 5298 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5299 return RHS; 5300 } 5301 5302 SDValue OpLHS, OpRHS; 5303 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5304 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5305 5306 int ShufIdxs[16]; 5307 switch (OpNum) { 5308 default: llvm_unreachable("Unknown i32 permute!"); 5309 case OP_VMRGHW: 5310 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 5311 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 5312 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 5313 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 5314 break; 5315 case OP_VMRGLW: 5316 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 5317 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 5318 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 5319 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 5320 break; 5321 case OP_VSPLTISW0: 5322 for (unsigned i = 0; i != 16; ++i) 5323 ShufIdxs[i] = (i&3)+0; 5324 break; 5325 case OP_VSPLTISW1: 5326 for (unsigned i = 0; i != 16; ++i) 5327 ShufIdxs[i] = (i&3)+4; 5328 break; 5329 case OP_VSPLTISW2: 5330 for (unsigned i = 0; i != 16; ++i) 5331 ShufIdxs[i] = (i&3)+8; 5332 break; 5333 case OP_VSPLTISW3: 5334 for (unsigned i = 0; i != 16; ++i) 5335 ShufIdxs[i] = (i&3)+12; 5336 break; 5337 case OP_VSLDOI4: 5338 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 5339 case OP_VSLDOI8: 5340 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 5341 case OP_VSLDOI12: 5342 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 5343 } 5344 EVT VT = OpLHS.getValueType(); 5345 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 5346 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 5347 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 5348 return DAG.getNode(ISD::BITCAST, dl, VT, T); 5349} 5350 5351/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 5352/// is a shuffle we can handle in a single instruction, return it. Otherwise, 5353/// return the code it can be lowered into. Worst case, it can always be 5354/// lowered into a vperm. 5355SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 5356 SelectionDAG &DAG) const { 5357 SDLoc dl(Op); 5358 SDValue V1 = Op.getOperand(0); 5359 SDValue V2 = Op.getOperand(1); 5360 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5361 EVT VT = Op.getValueType(); 5362 5363 // Cases that are handled by instructions that take permute immediates 5364 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 5365 // selected by the instruction selector. 5366 if (V2.getOpcode() == ISD::UNDEF) { 5367 if (PPC::isSplatShuffleMask(SVOp, 1) || 5368 PPC::isSplatShuffleMask(SVOp, 2) || 5369 PPC::isSplatShuffleMask(SVOp, 4) || 5370 PPC::isVPKUWUMShuffleMask(SVOp, true) || 5371 PPC::isVPKUHUMShuffleMask(SVOp, true) || 5372 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 || 5373 PPC::isVMRGLShuffleMask(SVOp, 1, true) || 5374 PPC::isVMRGLShuffleMask(SVOp, 2, true) || 5375 PPC::isVMRGLShuffleMask(SVOp, 4, true) || 5376 PPC::isVMRGHShuffleMask(SVOp, 1, true) || 5377 PPC::isVMRGHShuffleMask(SVOp, 2, true) || 5378 PPC::isVMRGHShuffleMask(SVOp, 4, true)) { 5379 return Op; 5380 } 5381 } 5382 5383 // Altivec has a variety of "shuffle immediates" that take two vector inputs 5384 // and produce a fixed permutation. If any of these match, do not lower to 5385 // VPERM. 5386 if (PPC::isVPKUWUMShuffleMask(SVOp, false) || 5387 PPC::isVPKUHUMShuffleMask(SVOp, false) || 5388 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 || 5389 PPC::isVMRGLShuffleMask(SVOp, 1, false) || 5390 PPC::isVMRGLShuffleMask(SVOp, 2, false) || 5391 PPC::isVMRGLShuffleMask(SVOp, 4, false) || 5392 PPC::isVMRGHShuffleMask(SVOp, 1, false) || 5393 PPC::isVMRGHShuffleMask(SVOp, 2, false) || 5394 PPC::isVMRGHShuffleMask(SVOp, 4, false)) 5395 return Op; 5396 5397 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 5398 // perfect shuffle table to emit an optimal matching sequence. 5399 ArrayRef<int> PermMask = SVOp->getMask(); 5400 5401 unsigned PFIndexes[4]; 5402 bool isFourElementShuffle = true; 5403 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 5404 unsigned EltNo = 8; // Start out undef. 5405 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 5406 if (PermMask[i*4+j] < 0) 5407 continue; // Undef, ignore it. 5408 5409 unsigned ByteSource = PermMask[i*4+j]; 5410 if ((ByteSource & 3) != j) { 5411 isFourElementShuffle = false; 5412 break; 5413 } 5414 5415 if (EltNo == 8) { 5416 EltNo = ByteSource/4; 5417 } else if (EltNo != ByteSource/4) { 5418 isFourElementShuffle = false; 5419 break; 5420 } 5421 } 5422 PFIndexes[i] = EltNo; 5423 } 5424 5425 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 5426 // perfect shuffle vector to determine if it is cost effective to do this as 5427 // discrete instructions, or whether we should use a vperm. 5428 if (isFourElementShuffle) { 5429 // Compute the index in the perfect shuffle table. 5430 unsigned PFTableIndex = 5431 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5432 5433 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5434 unsigned Cost = (PFEntry >> 30); 5435 5436 // Determining when to avoid vperm is tricky. Many things affect the cost 5437 // of vperm, particularly how many times the perm mask needs to be computed. 5438 // For example, if the perm mask can be hoisted out of a loop or is already 5439 // used (perhaps because there are multiple permutes with the same shuffle 5440 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 5441 // the loop requires an extra register. 5442 // 5443 // As a compromise, we only emit discrete instructions if the shuffle can be 5444 // generated in 3 or fewer operations. When we have loop information 5445 // available, if this block is within a loop, we should avoid using vperm 5446 // for 3-operation perms and use a constant pool load instead. 5447 if (Cost < 3) 5448 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5449 } 5450 5451 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 5452 // vector that will get spilled to the constant pool. 5453 if (V2.getOpcode() == ISD::UNDEF) V2 = V1; 5454 5455 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 5456 // that it is in input element units, not in bytes. Convert now. 5457 EVT EltVT = V1.getValueType().getVectorElementType(); 5458 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 5459 5460 SmallVector<SDValue, 16> ResultMask; 5461 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 5462 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 5463 5464 for (unsigned j = 0; j != BytesPerElement; ++j) 5465 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j, 5466 MVT::i32)); 5467 } 5468 5469 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, 5470 &ResultMask[0], ResultMask.size()); 5471 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask); 5472} 5473 5474/// getAltivecCompareInfo - Given an intrinsic, return false if it is not an 5475/// altivec comparison. If it is, return true and fill in Opc/isDot with 5476/// information about the intrinsic. 5477static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, 5478 bool &isDot) { 5479 unsigned IntrinsicID = 5480 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 5481 CompareOpc = -1; 5482 isDot = false; 5483 switch (IntrinsicID) { 5484 default: return false; 5485 // Comparison predicates. 5486 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break; 5487 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break; 5488 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break; 5489 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break; 5490 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break; 5491 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break; 5492 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break; 5493 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break; 5494 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break; 5495 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break; 5496 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break; 5497 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break; 5498 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break; 5499 5500 // Normal Comparisons. 5501 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break; 5502 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break; 5503 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break; 5504 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break; 5505 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break; 5506 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break; 5507 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break; 5508 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break; 5509 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break; 5510 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break; 5511 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break; 5512 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break; 5513 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break; 5514 } 5515 return true; 5516} 5517 5518/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 5519/// lower, do it, otherwise return null. 5520SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 5521 SelectionDAG &DAG) const { 5522 // If this is a lowered altivec predicate compare, CompareOpc is set to the 5523 // opcode number of the comparison. 5524 SDLoc dl(Op); 5525 int CompareOpc; 5526 bool isDot; 5527 if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) 5528 return SDValue(); // Don't custom lower most intrinsics. 5529 5530 // If this is a non-dot comparison, make the VCMP node and we are done. 5531 if (!isDot) { 5532 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 5533 Op.getOperand(1), Op.getOperand(2), 5534 DAG.getConstant(CompareOpc, MVT::i32)); 5535 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 5536 } 5537 5538 // Create the PPCISD altivec 'dot' comparison node. 5539 SDValue Ops[] = { 5540 Op.getOperand(2), // LHS 5541 Op.getOperand(3), // RHS 5542 DAG.getConstant(CompareOpc, MVT::i32) 5543 }; 5544 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 5545 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 5546 5547 // Now that we have the comparison, emit a copy from the CR to a GPR. 5548 // This is flagged to the above dot comparison. 5549 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 5550 DAG.getRegister(PPC::CR6, MVT::i32), 5551 CompNode.getValue(1)); 5552 5553 // Unpack the result based on how the target uses it. 5554 unsigned BitNo; // Bit # of CR6. 5555 bool InvertBit; // Invert result? 5556 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 5557 default: // Can't happen, don't crash on invalid number though. 5558 case 0: // Return the value of the EQ bit of CR6. 5559 BitNo = 0; InvertBit = false; 5560 break; 5561 case 1: // Return the inverted value of the EQ bit of CR6. 5562 BitNo = 0; InvertBit = true; 5563 break; 5564 case 2: // Return the value of the LT bit of CR6. 5565 BitNo = 2; InvertBit = false; 5566 break; 5567 case 3: // Return the inverted value of the LT bit of CR6. 5568 BitNo = 2; InvertBit = true; 5569 break; 5570 } 5571 5572 // Shift the bit into the low position. 5573 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 5574 DAG.getConstant(8-(3-BitNo), MVT::i32)); 5575 // Isolate the bit. 5576 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 5577 DAG.getConstant(1, MVT::i32)); 5578 5579 // If we are supposed to, toggle the bit. 5580 if (InvertBit) 5581 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 5582 DAG.getConstant(1, MVT::i32)); 5583 return Flags; 5584} 5585 5586SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 5587 SelectionDAG &DAG) const { 5588 SDLoc dl(Op); 5589 // Create a stack slot that is 16-byte aligned. 5590 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 5591 int FrameIdx = FrameInfo->CreateStackObject(16, 16, false); 5592 EVT PtrVT = getPointerTy(); 5593 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 5594 5595 // Store the input value into Value#0 of the stack slot. 5596 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 5597 Op.getOperand(0), FIdx, MachinePointerInfo(), 5598 false, false, 0); 5599 // Load it out. 5600 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(), 5601 false, false, false, 0); 5602} 5603 5604SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 5605 SDLoc dl(Op); 5606 if (Op.getValueType() == MVT::v4i32) { 5607 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5608 5609 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 5610 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 5611 5612 SDValue RHSSwap = // = vrlw RHS, 16 5613 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 5614 5615 // Shrinkify inputs to v8i16. 5616 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 5617 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 5618 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 5619 5620 // Low parts multiplied together, generating 32-bit results (we ignore the 5621 // top parts). 5622 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 5623 LHS, RHS, DAG, dl, MVT::v4i32); 5624 5625 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 5626 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 5627 // Shift the high parts up 16 bits. 5628 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 5629 Neg16, DAG, dl); 5630 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 5631 } else if (Op.getValueType() == MVT::v8i16) { 5632 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5633 5634 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 5635 5636 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 5637 LHS, RHS, Zero, DAG, dl); 5638 } else if (Op.getValueType() == MVT::v16i8) { 5639 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 5640 5641 // Multiply the even 8-bit parts, producing 16-bit sums. 5642 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 5643 LHS, RHS, DAG, dl, MVT::v8i16); 5644 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 5645 5646 // Multiply the odd 8-bit parts, producing 16-bit sums. 5647 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 5648 LHS, RHS, DAG, dl, MVT::v8i16); 5649 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 5650 5651 // Merge the results together. 5652 int Ops[16]; 5653 for (unsigned i = 0; i != 8; ++i) { 5654 Ops[i*2 ] = 2*i+1; 5655 Ops[i*2+1] = 2*i+1+16; 5656 } 5657 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 5658 } else { 5659 llvm_unreachable("Unknown mul to lower!"); 5660 } 5661} 5662 5663/// LowerOperation - Provide custom lowering hooks for some operations. 5664/// 5665SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5666 switch (Op.getOpcode()) { 5667 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 5668 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5669 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5670 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 5671 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5672 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 5673 case ISD::SETCC: return LowerSETCC(Op, DAG); 5674 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 5675 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 5676 case ISD::VASTART: 5677 return LowerVASTART(Op, DAG, PPCSubTarget); 5678 5679 case ISD::VAARG: 5680 return LowerVAARG(Op, DAG, PPCSubTarget); 5681 5682 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); 5683 case ISD::DYNAMIC_STACKALLOC: 5684 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget); 5685 5686 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 5687 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 5688 5689 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5690 case ISD::FP_TO_UINT: 5691 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, 5692 SDLoc(Op)); 5693 case ISD::UINT_TO_FP: 5694 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5695 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5696 5697 // Lower 64-bit shifts. 5698 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 5699 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 5700 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 5701 5702 // Vector-related lowering. 5703 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 5704 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5705 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 5706 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 5707 case ISD::MUL: return LowerMUL(Op, DAG); 5708 5709 // For counter-based loop handling. 5710 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 5711 5712 // Frame & Return address. 5713 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5714 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5715 } 5716} 5717 5718void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 5719 SmallVectorImpl<SDValue>&Results, 5720 SelectionDAG &DAG) const { 5721 const TargetMachine &TM = getTargetMachine(); 5722 SDLoc dl(N); 5723 switch (N->getOpcode()) { 5724 default: 5725 llvm_unreachable("Do not know how to custom type legalize this operation!"); 5726 case ISD::INTRINSIC_W_CHAIN: { 5727 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 5728 Intrinsic::ppc_is_decremented_ctr_nonzero) 5729 break; 5730 5731 assert(N->getValueType(0) == MVT::i1 && 5732 "Unexpected result type for CTR decrement intrinsic"); 5733 EVT SVT = getSetCCResultType(*DAG.getContext(), N->getValueType(0)); 5734 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 5735 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 5736 N->getOperand(1)); 5737 5738 Results.push_back(NewInt); 5739 Results.push_back(NewInt.getValue(1)); 5740 break; 5741 } 5742 case ISD::VAARG: { 5743 if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI() 5744 || TM.getSubtarget<PPCSubtarget>().isPPC64()) 5745 return; 5746 5747 EVT VT = N->getValueType(0); 5748 5749 if (VT == MVT::i64) { 5750 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget); 5751 5752 Results.push_back(NewNode); 5753 Results.push_back(NewNode.getValue(1)); 5754 } 5755 return; 5756 } 5757 case ISD::FP_ROUND_INREG: { 5758 assert(N->getValueType(0) == MVT::ppcf128); 5759 assert(N->getOperand(0).getValueType() == MVT::ppcf128); 5760 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5761 MVT::f64, N->getOperand(0), 5762 DAG.getIntPtrConstant(0)); 5763 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 5764 MVT::f64, N->getOperand(0), 5765 DAG.getIntPtrConstant(1)); 5766 5767 // Add the two halves of the long double in round-to-zero mode. 5768 SDValue FPreg = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 5769 5770 // We know the low half is about to be thrown away, so just use something 5771 // convenient. 5772 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 5773 FPreg, FPreg)); 5774 return; 5775 } 5776 case ISD::FP_TO_SINT: 5777 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 5778 return; 5779 } 5780} 5781 5782 5783//===----------------------------------------------------------------------===// 5784// Other Lowering Code 5785//===----------------------------------------------------------------------===// 5786 5787MachineBasicBlock * 5788PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5789 bool is64bit, unsigned BinOpcode) const { 5790 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5791 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5792 5793 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5794 MachineFunction *F = BB->getParent(); 5795 MachineFunction::iterator It = BB; 5796 ++It; 5797 5798 unsigned dest = MI->getOperand(0).getReg(); 5799 unsigned ptrA = MI->getOperand(1).getReg(); 5800 unsigned ptrB = MI->getOperand(2).getReg(); 5801 unsigned incr = MI->getOperand(3).getReg(); 5802 DebugLoc dl = MI->getDebugLoc(); 5803 5804 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5805 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5806 F->insert(It, loopMBB); 5807 F->insert(It, exitMBB); 5808 exitMBB->splice(exitMBB->begin(), BB, 5809 llvm::next(MachineBasicBlock::iterator(MI)), 5810 BB->end()); 5811 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5812 5813 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5814 unsigned TmpReg = (!BinOpcode) ? incr : 5815 RegInfo.createVirtualRegister( 5816 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5817 (const TargetRegisterClass *) &PPC::GPRCRegClass); 5818 5819 // thisMBB: 5820 // ... 5821 // fallthrough --> loopMBB 5822 BB->addSuccessor(loopMBB); 5823 5824 // loopMBB: 5825 // l[wd]arx dest, ptr 5826 // add r0, dest, incr 5827 // st[wd]cx. r0, ptr 5828 // bne- loopMBB 5829 // fallthrough --> exitMBB 5830 BB = loopMBB; 5831 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 5832 .addReg(ptrA).addReg(ptrB); 5833 if (BinOpcode) 5834 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 5835 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 5836 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 5837 BuildMI(BB, dl, TII->get(PPC::BCC)) 5838 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5839 BB->addSuccessor(loopMBB); 5840 BB->addSuccessor(exitMBB); 5841 5842 // exitMBB: 5843 // ... 5844 BB = exitMBB; 5845 return BB; 5846} 5847 5848MachineBasicBlock * 5849PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 5850 MachineBasicBlock *BB, 5851 bool is8bit, // operation 5852 unsigned BinOpcode) const { 5853 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5854 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5855 // In 64 bit mode we have to use 64 bits for addresses, even though the 5856 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 5857 // registers without caring whether they're 32 or 64, but here we're 5858 // doing actual arithmetic on the addresses. 5859 bool is64bit = PPCSubTarget.isPPC64(); 5860 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 5861 5862 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5863 MachineFunction *F = BB->getParent(); 5864 MachineFunction::iterator It = BB; 5865 ++It; 5866 5867 unsigned dest = MI->getOperand(0).getReg(); 5868 unsigned ptrA = MI->getOperand(1).getReg(); 5869 unsigned ptrB = MI->getOperand(2).getReg(); 5870 unsigned incr = MI->getOperand(3).getReg(); 5871 DebugLoc dl = MI->getDebugLoc(); 5872 5873 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 5874 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 5875 F->insert(It, loopMBB); 5876 F->insert(It, exitMBB); 5877 exitMBB->splice(exitMBB->begin(), BB, 5878 llvm::next(MachineBasicBlock::iterator(MI)), 5879 BB->end()); 5880 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5881 5882 MachineRegisterInfo &RegInfo = F->getRegInfo(); 5883 const TargetRegisterClass *RC = 5884 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 5885 (const TargetRegisterClass *) &PPC::GPRCRegClass; 5886 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 5887 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 5888 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 5889 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC); 5890 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 5891 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 5892 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 5893 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 5894 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC); 5895 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 5896 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 5897 unsigned Ptr1Reg; 5898 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC); 5899 5900 // thisMBB: 5901 // ... 5902 // fallthrough --> loopMBB 5903 BB->addSuccessor(loopMBB); 5904 5905 // The 4-byte load must be aligned, while a char or short may be 5906 // anywhere in the word. Hence all this nasty bookkeeping code. 5907 // add ptr1, ptrA, ptrB [copy if ptrA==0] 5908 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 5909 // xori shift, shift1, 24 [16] 5910 // rlwinm ptr, ptr1, 0, 0, 29 5911 // slw incr2, incr, shift 5912 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 5913 // slw mask, mask2, shift 5914 // loopMBB: 5915 // lwarx tmpDest, ptr 5916 // add tmp, tmpDest, incr2 5917 // andc tmp2, tmpDest, mask 5918 // and tmp3, tmp, mask 5919 // or tmp4, tmp3, tmp2 5920 // stwcx. tmp4, ptr 5921 // bne- loopMBB 5922 // fallthrough --> exitMBB 5923 // srw dest, tmpDest, shift 5924 if (ptrA != ZeroReg) { 5925 Ptr1Reg = RegInfo.createVirtualRegister(RC); 5926 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 5927 .addReg(ptrA).addReg(ptrB); 5928 } else { 5929 Ptr1Reg = ptrB; 5930 } 5931 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 5932 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 5933 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 5934 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 5935 if (is64bit) 5936 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 5937 .addReg(Ptr1Reg).addImm(0).addImm(61); 5938 else 5939 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 5940 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 5941 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg) 5942 .addReg(incr).addReg(ShiftReg); 5943 if (is8bit) 5944 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 5945 else { 5946 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 5947 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535); 5948 } 5949 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 5950 .addReg(Mask2Reg).addReg(ShiftReg); 5951 5952 BB = loopMBB; 5953 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 5954 .addReg(ZeroReg).addReg(PtrReg); 5955 if (BinOpcode) 5956 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 5957 .addReg(Incr2Reg).addReg(TmpDestReg); 5958 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg) 5959 .addReg(TmpDestReg).addReg(MaskReg); 5960 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg) 5961 .addReg(TmpReg).addReg(MaskReg); 5962 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg) 5963 .addReg(Tmp3Reg).addReg(Tmp2Reg); 5964 BuildMI(BB, dl, TII->get(PPC::STWCX)) 5965 .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg); 5966 BuildMI(BB, dl, TII->get(PPC::BCC)) 5967 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 5968 BB->addSuccessor(loopMBB); 5969 BB->addSuccessor(exitMBB); 5970 5971 // exitMBB: 5972 // ... 5973 BB = exitMBB; 5974 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg) 5975 .addReg(ShiftReg); 5976 return BB; 5977} 5978 5979llvm::MachineBasicBlock* 5980PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, 5981 MachineBasicBlock *MBB) const { 5982 DebugLoc DL = MI->getDebugLoc(); 5983 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5984 5985 MachineFunction *MF = MBB->getParent(); 5986 MachineRegisterInfo &MRI = MF->getRegInfo(); 5987 5988 const BasicBlock *BB = MBB->getBasicBlock(); 5989 MachineFunction::iterator I = MBB; 5990 ++I; 5991 5992 // Memory Reference 5993 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 5994 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 5995 5996 unsigned DstReg = MI->getOperand(0).getReg(); 5997 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 5998 assert(RC->hasType(MVT::i32) && "Invalid destination!"); 5999 unsigned mainDstReg = MRI.createVirtualRegister(RC); 6000 unsigned restoreDstReg = MRI.createVirtualRegister(RC); 6001 6002 MVT PVT = getPointerTy(); 6003 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6004 "Invalid Pointer Size!"); 6005 // For v = setjmp(buf), we generate 6006 // 6007 // thisMBB: 6008 // SjLjSetup mainMBB 6009 // bl mainMBB 6010 // v_restore = 1 6011 // b sinkMBB 6012 // 6013 // mainMBB: 6014 // buf[LabelOffset] = LR 6015 // v_main = 0 6016 // 6017 // sinkMBB: 6018 // v = phi(main, restore) 6019 // 6020 6021 MachineBasicBlock *thisMBB = MBB; 6022 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 6023 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 6024 MF->insert(I, mainMBB); 6025 MF->insert(I, sinkMBB); 6026 6027 MachineInstrBuilder MIB; 6028 6029 // Transfer the remainder of BB and its successor edges to sinkMBB. 6030 sinkMBB->splice(sinkMBB->begin(), MBB, 6031 llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6032 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 6033 6034 // Note that the structure of the jmp_buf used here is not compatible 6035 // with that used by libc, and is not designed to be. Specifically, it 6036 // stores only those 'reserved' registers that LLVM does not otherwise 6037 // understand how to spill. Also, by convention, by the time this 6038 // intrinsic is called, Clang has already stored the frame address in the 6039 // first slot of the buffer and stack address in the third. Following the 6040 // X86 target code, we'll store the jump address in the second slot. We also 6041 // need to save the TOC pointer (R2) to handle jumps between shared 6042 // libraries, and that will be stored in the fourth slot. The thread 6043 // identifier (R13) is not affected. 6044 6045 // thisMBB: 6046 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6047 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6048 6049 // Prepare IP either in reg. 6050 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 6051 unsigned LabelReg = MRI.createVirtualRegister(PtrRC); 6052 unsigned BufReg = MI->getOperand(1).getReg(); 6053 6054 if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) { 6055 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 6056 .addReg(PPC::X2) 6057 .addImm(TOCOffset) 6058 .addReg(BufReg); 6059 6060 MIB.setMemRefs(MMOBegin, MMOEnd); 6061 } 6062 6063 // Setup 6064 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 6065 const PPCRegisterInfo *TRI = 6066 static_cast<const PPCRegisterInfo*>(getTargetMachine().getRegisterInfo()); 6067 MIB.addRegMask(TRI->getNoPreservedMask()); 6068 6069 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 6070 6071 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 6072 .addMBB(mainMBB); 6073 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 6074 6075 thisMBB->addSuccessor(mainMBB, /* weight */ 0); 6076 thisMBB->addSuccessor(sinkMBB, /* weight */ 1); 6077 6078 // mainMBB: 6079 // mainDstReg = 0 6080 MIB = BuildMI(mainMBB, DL, 6081 TII->get(PPCSubTarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 6082 6083 // Store IP 6084 if (PPCSubTarget.isPPC64()) { 6085 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 6086 .addReg(LabelReg) 6087 .addImm(LabelOffset) 6088 .addReg(BufReg); 6089 } else { 6090 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 6091 .addReg(LabelReg) 6092 .addImm(LabelOffset) 6093 .addReg(BufReg); 6094 } 6095 6096 MIB.setMemRefs(MMOBegin, MMOEnd); 6097 6098 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 6099 mainMBB->addSuccessor(sinkMBB); 6100 6101 // sinkMBB: 6102 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 6103 TII->get(PPC::PHI), DstReg) 6104 .addReg(mainDstReg).addMBB(mainMBB) 6105 .addReg(restoreDstReg).addMBB(thisMBB); 6106 6107 MI->eraseFromParent(); 6108 return sinkMBB; 6109} 6110 6111MachineBasicBlock * 6112PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, 6113 MachineBasicBlock *MBB) const { 6114 DebugLoc DL = MI->getDebugLoc(); 6115 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6116 6117 MachineFunction *MF = MBB->getParent(); 6118 MachineRegisterInfo &MRI = MF->getRegInfo(); 6119 6120 // Memory Reference 6121 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); 6122 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); 6123 6124 MVT PVT = getPointerTy(); 6125 assert((PVT == MVT::i64 || PVT == MVT::i32) && 6126 "Invalid Pointer Size!"); 6127 6128 const TargetRegisterClass *RC = 6129 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6130 unsigned Tmp = MRI.createVirtualRegister(RC); 6131 // Since FP is only updated here but NOT referenced, it's treated as GPR. 6132 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 6133 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 6134 6135 MachineInstrBuilder MIB; 6136 6137 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 6138 const int64_t SPOffset = 2 * PVT.getStoreSize(); 6139 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 6140 6141 unsigned BufReg = MI->getOperand(0).getReg(); 6142 6143 // Reload FP (the jumped-to function may not have had a 6144 // frame pointer, and if so, then its r31 will be restored 6145 // as necessary). 6146 if (PVT == MVT::i64) { 6147 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 6148 .addImm(0) 6149 .addReg(BufReg); 6150 } else { 6151 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 6152 .addImm(0) 6153 .addReg(BufReg); 6154 } 6155 MIB.setMemRefs(MMOBegin, MMOEnd); 6156 6157 // Reload IP 6158 if (PVT == MVT::i64) { 6159 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 6160 .addImm(LabelOffset) 6161 .addReg(BufReg); 6162 } else { 6163 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 6164 .addImm(LabelOffset) 6165 .addReg(BufReg); 6166 } 6167 MIB.setMemRefs(MMOBegin, MMOEnd); 6168 6169 // Reload SP 6170 if (PVT == MVT::i64) { 6171 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 6172 .addImm(SPOffset) 6173 .addReg(BufReg); 6174 } else { 6175 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 6176 .addImm(SPOffset) 6177 .addReg(BufReg); 6178 } 6179 MIB.setMemRefs(MMOBegin, MMOEnd); 6180 6181 // FIXME: When we also support base pointers, that register must also be 6182 // restored here. 6183 6184 // Reload TOC 6185 if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) { 6186 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 6187 .addImm(TOCOffset) 6188 .addReg(BufReg); 6189 6190 MIB.setMemRefs(MMOBegin, MMOEnd); 6191 } 6192 6193 // Jump 6194 BuildMI(*MBB, MI, DL, 6195 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 6196 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 6197 6198 MI->eraseFromParent(); 6199 return MBB; 6200} 6201 6202MachineBasicBlock * 6203PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 6204 MachineBasicBlock *BB) const { 6205 if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 || 6206 MI->getOpcode() == PPC::EH_SjLj_SetJmp64) { 6207 return emitEHSjLjSetJmp(MI, BB); 6208 } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 || 6209 MI->getOpcode() == PPC::EH_SjLj_LongJmp64) { 6210 return emitEHSjLjLongJmp(MI, BB); 6211 } 6212 6213 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6214 6215 // To "insert" these instructions we actually have to insert their 6216 // control-flow patterns. 6217 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6218 MachineFunction::iterator It = BB; 6219 ++It; 6220 6221 MachineFunction *F = BB->getParent(); 6222 6223 if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 || 6224 MI->getOpcode() == PPC::SELECT_CC_I8)) { 6225 SmallVector<MachineOperand, 2> Cond; 6226 Cond.push_back(MI->getOperand(4)); 6227 Cond.push_back(MI->getOperand(1)); 6228 6229 DebugLoc dl = MI->getDebugLoc(); 6230 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6231 TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(), 6232 Cond, MI->getOperand(2).getReg(), 6233 MI->getOperand(3).getReg()); 6234 } else if (MI->getOpcode() == PPC::SELECT_CC_I4 || 6235 MI->getOpcode() == PPC::SELECT_CC_I8 || 6236 MI->getOpcode() == PPC::SELECT_CC_F4 || 6237 MI->getOpcode() == PPC::SELECT_CC_F8 || 6238 MI->getOpcode() == PPC::SELECT_CC_VRRC) { 6239 6240 6241 // The incoming instruction knows the destination vreg to set, the 6242 // condition code register to branch on, the true/false values to 6243 // select between, and a branch opcode to use. 6244 6245 // thisMBB: 6246 // ... 6247 // TrueVal = ... 6248 // cmpTY ccX, r1, r2 6249 // bCC copy1MBB 6250 // fallthrough --> copy0MBB 6251 MachineBasicBlock *thisMBB = BB; 6252 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 6253 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 6254 unsigned SelectPred = MI->getOperand(4).getImm(); 6255 DebugLoc dl = MI->getDebugLoc(); 6256 F->insert(It, copy0MBB); 6257 F->insert(It, sinkMBB); 6258 6259 // Transfer the remainder of BB and its successor edges to sinkMBB. 6260 sinkMBB->splice(sinkMBB->begin(), BB, 6261 llvm::next(MachineBasicBlock::iterator(MI)), 6262 BB->end()); 6263 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 6264 6265 // Next, add the true and fallthrough blocks as its successors. 6266 BB->addSuccessor(copy0MBB); 6267 BB->addSuccessor(sinkMBB); 6268 6269 BuildMI(BB, dl, TII->get(PPC::BCC)) 6270 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 6271 6272 // copy0MBB: 6273 // %FalseValue = ... 6274 // # fallthrough to sinkMBB 6275 BB = copy0MBB; 6276 6277 // Update machine-CFG edges 6278 BB->addSuccessor(sinkMBB); 6279 6280 // sinkMBB: 6281 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 6282 // ... 6283 BB = sinkMBB; 6284 BuildMI(*BB, BB->begin(), dl, 6285 TII->get(PPC::PHI), MI->getOperand(0).getReg()) 6286 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 6287 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 6288 } 6289 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 6290 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 6291 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 6292 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 6293 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 6294 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); 6295 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 6296 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8); 6297 6298 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 6299 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 6300 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 6301 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 6302 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 6303 BB = EmitAtomicBinary(MI, BB, false, PPC::AND); 6304 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 6305 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8); 6306 6307 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 6308 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 6309 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 6310 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 6311 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 6312 BB = EmitAtomicBinary(MI, BB, false, PPC::OR); 6313 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 6314 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8); 6315 6316 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 6317 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 6318 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 6319 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 6320 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 6321 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR); 6322 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 6323 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8); 6324 6325 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 6326 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC); 6327 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 6328 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC); 6329 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 6330 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC); 6331 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 6332 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8); 6333 6334 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 6335 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 6336 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 6337 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 6338 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 6339 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF); 6340 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 6341 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8); 6342 6343 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8) 6344 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 6345 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16) 6346 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 6347 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32) 6348 BB = EmitAtomicBinary(MI, BB, false, 0); 6349 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64) 6350 BB = EmitAtomicBinary(MI, BB, true, 0); 6351 6352 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 6353 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) { 6354 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 6355 6356 unsigned dest = MI->getOperand(0).getReg(); 6357 unsigned ptrA = MI->getOperand(1).getReg(); 6358 unsigned ptrB = MI->getOperand(2).getReg(); 6359 unsigned oldval = MI->getOperand(3).getReg(); 6360 unsigned newval = MI->getOperand(4).getReg(); 6361 DebugLoc dl = MI->getDebugLoc(); 6362 6363 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6364 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6365 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6366 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6367 F->insert(It, loop1MBB); 6368 F->insert(It, loop2MBB); 6369 F->insert(It, midMBB); 6370 F->insert(It, exitMBB); 6371 exitMBB->splice(exitMBB->begin(), BB, 6372 llvm::next(MachineBasicBlock::iterator(MI)), 6373 BB->end()); 6374 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6375 6376 // thisMBB: 6377 // ... 6378 // fallthrough --> loopMBB 6379 BB->addSuccessor(loop1MBB); 6380 6381 // loop1MBB: 6382 // l[wd]arx dest, ptr 6383 // cmp[wd] dest, oldval 6384 // bne- midMBB 6385 // loop2MBB: 6386 // st[wd]cx. newval, ptr 6387 // bne- loopMBB 6388 // b exitBB 6389 // midMBB: 6390 // st[wd]cx. dest, ptr 6391 // exitBB: 6392 BB = loop1MBB; 6393 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest) 6394 .addReg(ptrA).addReg(ptrB); 6395 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 6396 .addReg(oldval).addReg(dest); 6397 BuildMI(BB, dl, TII->get(PPC::BCC)) 6398 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6399 BB->addSuccessor(loop2MBB); 6400 BB->addSuccessor(midMBB); 6401 6402 BB = loop2MBB; 6403 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6404 .addReg(newval).addReg(ptrA).addReg(ptrB); 6405 BuildMI(BB, dl, TII->get(PPC::BCC)) 6406 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6407 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6408 BB->addSuccessor(loop1MBB); 6409 BB->addSuccessor(exitMBB); 6410 6411 BB = midMBB; 6412 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX)) 6413 .addReg(dest).addReg(ptrA).addReg(ptrB); 6414 BB->addSuccessor(exitMBB); 6415 6416 // exitMBB: 6417 // ... 6418 BB = exitMBB; 6419 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 6420 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 6421 // We must use 64-bit registers for addresses when targeting 64-bit, 6422 // since we're actually doing arithmetic on them. Other registers 6423 // can be 32-bit. 6424 bool is64bit = PPCSubTarget.isPPC64(); 6425 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 6426 6427 unsigned dest = MI->getOperand(0).getReg(); 6428 unsigned ptrA = MI->getOperand(1).getReg(); 6429 unsigned ptrB = MI->getOperand(2).getReg(); 6430 unsigned oldval = MI->getOperand(3).getReg(); 6431 unsigned newval = MI->getOperand(4).getReg(); 6432 DebugLoc dl = MI->getDebugLoc(); 6433 6434 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 6435 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 6436 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6437 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6438 F->insert(It, loop1MBB); 6439 F->insert(It, loop2MBB); 6440 F->insert(It, midMBB); 6441 F->insert(It, exitMBB); 6442 exitMBB->splice(exitMBB->begin(), BB, 6443 llvm::next(MachineBasicBlock::iterator(MI)), 6444 BB->end()); 6445 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6446 6447 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6448 const TargetRegisterClass *RC = 6449 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass : 6450 (const TargetRegisterClass *) &PPC::GPRCRegClass; 6451 unsigned PtrReg = RegInfo.createVirtualRegister(RC); 6452 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC); 6453 unsigned ShiftReg = RegInfo.createVirtualRegister(RC); 6454 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC); 6455 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC); 6456 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC); 6457 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC); 6458 unsigned MaskReg = RegInfo.createVirtualRegister(RC); 6459 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC); 6460 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC); 6461 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC); 6462 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC); 6463 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC); 6464 unsigned Ptr1Reg; 6465 unsigned TmpReg = RegInfo.createVirtualRegister(RC); 6466 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 6467 // thisMBB: 6468 // ... 6469 // fallthrough --> loopMBB 6470 BB->addSuccessor(loop1MBB); 6471 6472 // The 4-byte load must be aligned, while a char or short may be 6473 // anywhere in the word. Hence all this nasty bookkeeping code. 6474 // add ptr1, ptrA, ptrB [copy if ptrA==0] 6475 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 6476 // xori shift, shift1, 24 [16] 6477 // rlwinm ptr, ptr1, 0, 0, 29 6478 // slw newval2, newval, shift 6479 // slw oldval2, oldval,shift 6480 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 6481 // slw mask, mask2, shift 6482 // and newval3, newval2, mask 6483 // and oldval3, oldval2, mask 6484 // loop1MBB: 6485 // lwarx tmpDest, ptr 6486 // and tmp, tmpDest, mask 6487 // cmpw tmp, oldval3 6488 // bne- midMBB 6489 // loop2MBB: 6490 // andc tmp2, tmpDest, mask 6491 // or tmp4, tmp2, newval3 6492 // stwcx. tmp4, ptr 6493 // bne- loop1MBB 6494 // b exitBB 6495 // midMBB: 6496 // stwcx. tmpDest, ptr 6497 // exitBB: 6498 // srw dest, tmpDest, shift 6499 if (ptrA != ZeroReg) { 6500 Ptr1Reg = RegInfo.createVirtualRegister(RC); 6501 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 6502 .addReg(ptrA).addReg(ptrB); 6503 } else { 6504 Ptr1Reg = ptrB; 6505 } 6506 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg) 6507 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27); 6508 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg) 6509 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16); 6510 if (is64bit) 6511 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 6512 .addReg(Ptr1Reg).addImm(0).addImm(61); 6513 else 6514 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 6515 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29); 6516 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 6517 .addReg(newval).addReg(ShiftReg); 6518 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 6519 .addReg(oldval).addReg(ShiftReg); 6520 if (is8bit) 6521 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 6522 else { 6523 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 6524 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 6525 .addReg(Mask3Reg).addImm(65535); 6526 } 6527 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 6528 .addReg(Mask2Reg).addReg(ShiftReg); 6529 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 6530 .addReg(NewVal2Reg).addReg(MaskReg); 6531 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 6532 .addReg(OldVal2Reg).addReg(MaskReg); 6533 6534 BB = loop1MBB; 6535 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 6536 .addReg(ZeroReg).addReg(PtrReg); 6537 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg) 6538 .addReg(TmpDestReg).addReg(MaskReg); 6539 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 6540 .addReg(TmpReg).addReg(OldVal3Reg); 6541 BuildMI(BB, dl, TII->get(PPC::BCC)) 6542 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB); 6543 BB->addSuccessor(loop2MBB); 6544 BB->addSuccessor(midMBB); 6545 6546 BB = loop2MBB; 6547 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg) 6548 .addReg(TmpDestReg).addReg(MaskReg); 6549 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg) 6550 .addReg(Tmp2Reg).addReg(NewVal3Reg); 6551 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg) 6552 .addReg(ZeroReg).addReg(PtrReg); 6553 BuildMI(BB, dl, TII->get(PPC::BCC)) 6554 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB); 6555 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6556 BB->addSuccessor(loop1MBB); 6557 BB->addSuccessor(exitMBB); 6558 6559 BB = midMBB; 6560 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg) 6561 .addReg(ZeroReg).addReg(PtrReg); 6562 BB->addSuccessor(exitMBB); 6563 6564 // exitMBB: 6565 // ... 6566 BB = exitMBB; 6567 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg) 6568 .addReg(ShiftReg); 6569 } else if (MI->getOpcode() == PPC::FADDrtz) { 6570 // This pseudo performs an FADD with rounding mode temporarily forced 6571 // to round-to-zero. We emit this via custom inserter since the FPSCR 6572 // is not modeled at the SelectionDAG level. 6573 unsigned Dest = MI->getOperand(0).getReg(); 6574 unsigned Src1 = MI->getOperand(1).getReg(); 6575 unsigned Src2 = MI->getOperand(2).getReg(); 6576 DebugLoc dl = MI->getDebugLoc(); 6577 6578 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6579 unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 6580 6581 // Save FPSCR value. 6582 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 6583 6584 // Set rounding mode to round-to-zero. 6585 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 6586 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 6587 6588 // Perform addition. 6589 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 6590 6591 // Restore FPSCR value. 6592 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)).addImm(1).addReg(MFFSReg); 6593 } else if (MI->getOpcode() == PPC::FRINDrint || 6594 MI->getOpcode() == PPC::FRINSrint) { 6595 bool isf32 = MI->getOpcode() == PPC::FRINSrint; 6596 unsigned Dest = MI->getOperand(0).getReg(); 6597 unsigned Src = MI->getOperand(1).getReg(); 6598 DebugLoc dl = MI->getDebugLoc(); 6599 6600 MachineRegisterInfo &RegInfo = F->getRegInfo(); 6601 unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 6602 6603 // Perform the rounding. 6604 BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FRINS : PPC::FRIND), Dest) 6605 .addReg(Src); 6606 6607 // Compare the results. 6608 BuildMI(*BB, MI, dl, TII->get(isf32 ? PPC::FCMPUS : PPC::FCMPUD), CRReg) 6609 .addReg(Dest).addReg(Src); 6610 6611 // If the results were not equal, then set the FPSCR XX bit. 6612 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 6613 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 6614 F->insert(It, midMBB); 6615 F->insert(It, exitMBB); 6616 exitMBB->splice(exitMBB->begin(), BB, 6617 llvm::next(MachineBasicBlock::iterator(MI)), 6618 BB->end()); 6619 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6620 6621 BuildMI(*BB, MI, dl, TII->get(PPC::BCC)) 6622 .addImm(PPC::PRED_EQ).addReg(CRReg).addMBB(exitMBB); 6623 6624 BB->addSuccessor(midMBB); 6625 BB->addSuccessor(exitMBB); 6626 6627 BB = midMBB; 6628 6629 // Set the FPSCR XX bit (FE_INEXACT). Note that we cannot just set 6630 // the FI bit here because that will not automatically set XX also, 6631 // and XX is what libm interprets as the FE_INEXACT flag. 6632 BuildMI(BB, dl, TII->get(PPC::MTFSB1)).addImm(/* 38 - 32 = */ 6); 6633 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 6634 6635 BB->addSuccessor(exitMBB); 6636 6637 BB = exitMBB; 6638 } else { 6639 llvm_unreachable("Unexpected instr type to insert"); 6640 } 6641 6642 MI->eraseFromParent(); // The pseudo instruction is gone now. 6643 return BB; 6644} 6645 6646//===----------------------------------------------------------------------===// 6647// Target Optimization Hooks 6648//===----------------------------------------------------------------------===// 6649 6650SDValue PPCTargetLowering::DAGCombineFastRecip(SDValue Op, 6651 DAGCombinerInfo &DCI) const { 6652 if (DCI.isAfterLegalizeVectorOps()) 6653 return SDValue(); 6654 6655 EVT VT = Op.getValueType(); 6656 6657 if ((VT == MVT::f32 && PPCSubTarget.hasFRES()) || 6658 (VT == MVT::f64 && PPCSubTarget.hasFRE()) || 6659 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6660 6661 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6662 // For the reciprocal, we need to find the zero of the function: 6663 // F(X) = A X - 1 [which has a zero at X = 1/A] 6664 // => 6665 // X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 6666 // does not require additional intermediate precision] 6667 6668 // Convergence is quadratic, so we essentially double the number of digits 6669 // correct after every iteration. The minimum architected relative 6670 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6671 // 23 digits and double has 52 digits. 6672 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6673 if (VT.getScalarType() == MVT::f64) 6674 ++Iterations; 6675 6676 SelectionDAG &DAG = DCI.DAG; 6677 SDLoc dl(Op); 6678 6679 SDValue FPOne = 6680 DAG.getConstantFP(1.0, VT.getScalarType()); 6681 if (VT.isVector()) { 6682 assert(VT.getVectorNumElements() == 4 && 6683 "Unknown vector type"); 6684 FPOne = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6685 FPOne, FPOne, FPOne, FPOne); 6686 } 6687 6688 SDValue Est = DAG.getNode(PPCISD::FRE, dl, VT, Op); 6689 DCI.AddToWorklist(Est.getNode()); 6690 6691 // Newton iterations: Est = Est + Est (1 - Arg * Est) 6692 for (int i = 0; i < Iterations; ++i) { 6693 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Op, Est); 6694 DCI.AddToWorklist(NewEst.getNode()); 6695 6696 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPOne, NewEst); 6697 DCI.AddToWorklist(NewEst.getNode()); 6698 6699 NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6700 DCI.AddToWorklist(NewEst.getNode()); 6701 6702 Est = DAG.getNode(ISD::FADD, dl, VT, Est, NewEst); 6703 DCI.AddToWorklist(Est.getNode()); 6704 } 6705 6706 return Est; 6707 } 6708 6709 return SDValue(); 6710} 6711 6712SDValue PPCTargetLowering::DAGCombineFastRecipFSQRT(SDValue Op, 6713 DAGCombinerInfo &DCI) const { 6714 if (DCI.isAfterLegalizeVectorOps()) 6715 return SDValue(); 6716 6717 EVT VT = Op.getValueType(); 6718 6719 if ((VT == MVT::f32 && PPCSubTarget.hasFRSQRTES()) || 6720 (VT == MVT::f64 && PPCSubTarget.hasFRSQRTE()) || 6721 (VT == MVT::v4f32 && PPCSubTarget.hasAltivec())) { 6722 6723 // Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 6724 // For the reciprocal sqrt, we need to find the zero of the function: 6725 // F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 6726 // => 6727 // X_{i+1} = X_i (1.5 - A X_i^2 / 2) 6728 // As a result, we precompute A/2 prior to the iteration loop. 6729 6730 // Convergence is quadratic, so we essentially double the number of digits 6731 // correct after every iteration. The minimum architected relative 6732 // accuracy is 2^-5. When hasRecipPrec(), this is 2^-14. IEEE float has 6733 // 23 digits and double has 52 digits. 6734 int Iterations = PPCSubTarget.hasRecipPrec() ? 1 : 3; 6735 if (VT.getScalarType() == MVT::f64) 6736 ++Iterations; 6737 6738 SelectionDAG &DAG = DCI.DAG; 6739 SDLoc dl(Op); 6740 6741 SDValue FPThreeHalves = 6742 DAG.getConstantFP(1.5, VT.getScalarType()); 6743 if (VT.isVector()) { 6744 assert(VT.getVectorNumElements() == 4 && 6745 "Unknown vector type"); 6746 FPThreeHalves = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, 6747 FPThreeHalves, FPThreeHalves, 6748 FPThreeHalves, FPThreeHalves); 6749 } 6750 6751 SDValue Est = DAG.getNode(PPCISD::FRSQRTE, dl, VT, Op); 6752 DCI.AddToWorklist(Est.getNode()); 6753 6754 // We now need 0.5*Arg which we can write as (1.5*Arg - Arg) so that 6755 // this entire sequence requires only one FP constant. 6756 SDValue HalfArg = DAG.getNode(ISD::FMUL, dl, VT, FPThreeHalves, Op); 6757 DCI.AddToWorklist(HalfArg.getNode()); 6758 6759 HalfArg = DAG.getNode(ISD::FSUB, dl, VT, HalfArg, Op); 6760 DCI.AddToWorklist(HalfArg.getNode()); 6761 6762 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 6763 for (int i = 0; i < Iterations; ++i) { 6764 SDValue NewEst = DAG.getNode(ISD::FMUL, dl, VT, Est, Est); 6765 DCI.AddToWorklist(NewEst.getNode()); 6766 6767 NewEst = DAG.getNode(ISD::FMUL, dl, VT, HalfArg, NewEst); 6768 DCI.AddToWorklist(NewEst.getNode()); 6769 6770 NewEst = DAG.getNode(ISD::FSUB, dl, VT, FPThreeHalves, NewEst); 6771 DCI.AddToWorklist(NewEst.getNode()); 6772 6773 Est = DAG.getNode(ISD::FMUL, dl, VT, Est, NewEst); 6774 DCI.AddToWorklist(Est.getNode()); 6775 } 6776 6777 return Est; 6778 } 6779 6780 return SDValue(); 6781} 6782 6783// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 6784// not enforce equality of the chain operands. 6785static bool isConsecutiveLS(LSBaseSDNode *LS, LSBaseSDNode *Base, 6786 unsigned Bytes, int Dist, 6787 SelectionDAG &DAG) { 6788 EVT VT = LS->getMemoryVT(); 6789 if (VT.getSizeInBits() / 8 != Bytes) 6790 return false; 6791 6792 SDValue Loc = LS->getBasePtr(); 6793 SDValue BaseLoc = Base->getBasePtr(); 6794 if (Loc.getOpcode() == ISD::FrameIndex) { 6795 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6796 return false; 6797 const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 6798 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6799 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6800 int FS = MFI->getObjectSize(FI); 6801 int BFS = MFI->getObjectSize(BFI); 6802 if (FS != BFS || FS != (int)Bytes) return false; 6803 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6804 } 6805 6806 // Handle X+C 6807 if (DAG.isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6808 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6809 return true; 6810 6811 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6812 const GlobalValue *GV1 = NULL; 6813 const GlobalValue *GV2 = NULL; 6814 int64_t Offset1 = 0; 6815 int64_t Offset2 = 0; 6816 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6817 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6818 if (isGA1 && isGA2 && GV1 == GV2) 6819 return Offset1 == (Offset2 + Dist*Bytes); 6820 return false; 6821} 6822 6823// Return true is there is a nearyby consecutive load to the one provided 6824// (regardless of alignment). We search up and down the chain, looking though 6825// token factors and other loads (but nothing else). As a result, a true 6826// results indicates that it is safe to create a new consecutive load adjacent 6827// to the load provided. 6828static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 6829 SDValue Chain = LD->getChain(); 6830 EVT VT = LD->getMemoryVT(); 6831 6832 SmallSet<SDNode *, 16> LoadRoots; 6833 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 6834 SmallSet<SDNode *, 16> Visited; 6835 6836 // First, search up the chain, branching to follow all token-factor operands. 6837 // If we find a consecutive load, then we're done, otherwise, record all 6838 // nodes just above the top-level loads and token factors. 6839 while (!Queue.empty()) { 6840 SDNode *ChainNext = Queue.pop_back_val(); 6841 if (!Visited.insert(ChainNext)) 6842 continue; 6843 6844 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(ChainNext)) { 6845 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6846 return true; 6847 6848 if (!Visited.count(ChainLD->getChain().getNode())) 6849 Queue.push_back(ChainLD->getChain().getNode()); 6850 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 6851 for (SDNode::op_iterator O = ChainNext->op_begin(), 6852 OE = ChainNext->op_end(); O != OE; ++O) 6853 if (!Visited.count(O->getNode())) 6854 Queue.push_back(O->getNode()); 6855 } else 6856 LoadRoots.insert(ChainNext); 6857 } 6858 6859 // Second, search down the chain, starting from the top-level nodes recorded 6860 // in the first phase. These top-level nodes are the nodes just above all 6861 // loads and token factors. Starting with their uses, recursively look though 6862 // all loads (just the chain uses) and token factors to find a consecutive 6863 // load. 6864 Visited.clear(); 6865 Queue.clear(); 6866 6867 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 6868 IE = LoadRoots.end(); I != IE; ++I) { 6869 Queue.push_back(*I); 6870 6871 while (!Queue.empty()) { 6872 SDNode *LoadRoot = Queue.pop_back_val(); 6873 if (!Visited.insert(LoadRoot)) 6874 continue; 6875 6876 if (LoadSDNode *ChainLD = dyn_cast<LoadSDNode>(LoadRoot)) 6877 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 6878 return true; 6879 6880 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 6881 UE = LoadRoot->use_end(); UI != UE; ++UI) 6882 if (((isa<LoadSDNode>(*UI) && 6883 cast<LoadSDNode>(*UI)->getChain().getNode() == LoadRoot) || 6884 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 6885 Queue.push_back(*UI); 6886 } 6887 } 6888 6889 return false; 6890} 6891 6892SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 6893 DAGCombinerInfo &DCI) const { 6894 const TargetMachine &TM = getTargetMachine(); 6895 SelectionDAG &DAG = DCI.DAG; 6896 SDLoc dl(N); 6897 switch (N->getOpcode()) { 6898 default: break; 6899 case PPCISD::SHL: 6900 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6901 if (C->isNullValue()) // 0 << V -> 0. 6902 return N->getOperand(0); 6903 } 6904 break; 6905 case PPCISD::SRL: 6906 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6907 if (C->isNullValue()) // 0 >>u V -> 0. 6908 return N->getOperand(0); 6909 } 6910 break; 6911 case PPCISD::SRA: 6912 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 6913 if (C->isNullValue() || // 0 >>s V -> 0. 6914 C->isAllOnesValue()) // -1 >>s V -> -1. 6915 return N->getOperand(0); 6916 } 6917 break; 6918 case ISD::FDIV: { 6919 assert(TM.Options.UnsafeFPMath && 6920 "Reciprocal estimates require UnsafeFPMath"); 6921 6922 if (N->getOperand(1).getOpcode() == ISD::FSQRT) { 6923 SDValue RV = 6924 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0), DCI); 6925 if (RV.getNode() != 0) { 6926 DCI.AddToWorklist(RV.getNode()); 6927 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6928 N->getOperand(0), RV); 6929 } 6930 } else if (N->getOperand(1).getOpcode() == ISD::FP_EXTEND && 6931 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 6932 SDValue RV = 6933 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 6934 DCI); 6935 if (RV.getNode() != 0) { 6936 DCI.AddToWorklist(RV.getNode()); 6937 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N->getOperand(1)), 6938 N->getValueType(0), RV); 6939 DCI.AddToWorklist(RV.getNode()); 6940 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6941 N->getOperand(0), RV); 6942 } 6943 } else if (N->getOperand(1).getOpcode() == ISD::FP_ROUND && 6944 N->getOperand(1).getOperand(0).getOpcode() == ISD::FSQRT) { 6945 SDValue RV = 6946 DAGCombineFastRecipFSQRT(N->getOperand(1).getOperand(0).getOperand(0), 6947 DCI); 6948 if (RV.getNode() != 0) { 6949 DCI.AddToWorklist(RV.getNode()); 6950 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N->getOperand(1)), 6951 N->getValueType(0), RV, 6952 N->getOperand(1).getOperand(1)); 6953 DCI.AddToWorklist(RV.getNode()); 6954 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6955 N->getOperand(0), RV); 6956 } 6957 } 6958 6959 SDValue RV = DAGCombineFastRecip(N->getOperand(1), DCI); 6960 if (RV.getNode() != 0) { 6961 DCI.AddToWorklist(RV.getNode()); 6962 return DAG.getNode(ISD::FMUL, dl, N->getValueType(0), 6963 N->getOperand(0), RV); 6964 } 6965 6966 } 6967 break; 6968 case ISD::FSQRT: { 6969 assert(TM.Options.UnsafeFPMath && 6970 "Reciprocal estimates require UnsafeFPMath"); 6971 6972 // Compute this as 1/(1/sqrt(X)), which is the reciprocal of the 6973 // reciprocal sqrt. 6974 SDValue RV = DAGCombineFastRecipFSQRT(N->getOperand(0), DCI); 6975 if (RV.getNode() != 0) { 6976 DCI.AddToWorklist(RV.getNode()); 6977 RV = DAGCombineFastRecip(RV, DCI); 6978 if (RV.getNode() != 0) 6979 return RV; 6980 } 6981 6982 } 6983 break; 6984 case ISD::SINT_TO_FP: 6985 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) { 6986 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) { 6987 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores. 6988 // We allow the src/dst to be either f32/f64, but the intermediate 6989 // type must be i64. 6990 if (N->getOperand(0).getValueType() == MVT::i64 && 6991 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { 6992 SDValue Val = N->getOperand(0).getOperand(0); 6993 if (Val.getValueType() == MVT::f32) { 6994 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 6995 DCI.AddToWorklist(Val.getNode()); 6996 } 6997 6998 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val); 6999 DCI.AddToWorklist(Val.getNode()); 7000 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val); 7001 DCI.AddToWorklist(Val.getNode()); 7002 if (N->getValueType(0) == MVT::f32) { 7003 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 7004 DAG.getIntPtrConstant(0)); 7005 DCI.AddToWorklist(Val.getNode()); 7006 } 7007 return Val; 7008 } else if (N->getOperand(0).getValueType() == MVT::i32) { 7009 // If the intermediate type is i32, we can avoid the load/store here 7010 // too. 7011 } 7012 } 7013 } 7014 break; 7015 case ISD::STORE: 7016 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)). 7017 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() && 7018 !cast<StoreSDNode>(N)->isTruncatingStore() && 7019 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && 7020 N->getOperand(1).getValueType() == MVT::i32 && 7021 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { 7022 SDValue Val = N->getOperand(1).getOperand(0); 7023 if (Val.getValueType() == MVT::f32) { 7024 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 7025 DCI.AddToWorklist(Val.getNode()); 7026 } 7027 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val); 7028 DCI.AddToWorklist(Val.getNode()); 7029 7030 SDValue Ops[] = { 7031 N->getOperand(0), Val, N->getOperand(2), 7032 DAG.getValueType(N->getOperand(1).getValueType()) 7033 }; 7034 7035 Val = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7036 DAG.getVTList(MVT::Other), Ops, array_lengthof(Ops), 7037 cast<StoreSDNode>(N)->getMemoryVT(), 7038 cast<StoreSDNode>(N)->getMemOperand()); 7039 DCI.AddToWorklist(Val.getNode()); 7040 return Val; 7041 } 7042 7043 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 7044 if (cast<StoreSDNode>(N)->isUnindexed() && 7045 N->getOperand(1).getOpcode() == ISD::BSWAP && 7046 N->getOperand(1).getNode()->hasOneUse() && 7047 (N->getOperand(1).getValueType() == MVT::i32 || 7048 N->getOperand(1).getValueType() == MVT::i16 || 7049 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7050 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7051 N->getOperand(1).getValueType() == MVT::i64))) { 7052 SDValue BSwapOp = N->getOperand(1).getOperand(0); 7053 // Do an any-extend to 32-bits if this is a half-word input. 7054 if (BSwapOp.getValueType() == MVT::i16) 7055 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 7056 7057 SDValue Ops[] = { 7058 N->getOperand(0), BSwapOp, N->getOperand(2), 7059 DAG.getValueType(N->getOperand(1).getValueType()) 7060 }; 7061 return 7062 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 7063 Ops, array_lengthof(Ops), 7064 cast<StoreSDNode>(N)->getMemoryVT(), 7065 cast<StoreSDNode>(N)->getMemOperand()); 7066 } 7067 break; 7068 case ISD::LOAD: { 7069 LoadSDNode *LD = cast<LoadSDNode>(N); 7070 EVT VT = LD->getValueType(0); 7071 Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); 7072 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); 7073 if (ISD::isNON_EXTLoad(N) && VT.isVector() && 7074 TM.getSubtarget<PPCSubtarget>().hasAltivec() && 7075 DCI.getDAGCombineLevel() == AfterLegalizeTypes && 7076 LD->getAlignment() < ABIAlignment) { 7077 // This is a type-legal unaligned Altivec load. 7078 SDValue Chain = LD->getChain(); 7079 SDValue Ptr = LD->getBasePtr(); 7080 7081 // This implements the loading of unaligned vectors as described in 7082 // the venerable Apple Velocity Engine overview. Specifically: 7083 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 7084 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 7085 // 7086 // The general idea is to expand a sequence of one or more unaligned 7087 // loads into a alignment-based permutation-control instruction (lvsl), 7088 // a series of regular vector loads (which always truncate their 7089 // input address to an aligned address), and a series of permutations. 7090 // The results of these permutations are the requested loaded values. 7091 // The trick is that the last "extra" load is not taken from the address 7092 // you might suspect (sizeof(vector) bytes after the last requested 7093 // load), but rather sizeof(vector) - 1 bytes after the last 7094 // requested vector. The point of this is to avoid a page fault if the 7095 // base address happend to be aligned. This works because if the base 7096 // address is aligned, then adding less than a full vector length will 7097 // cause the last vector in the sequence to be (re)loaded. Otherwise, 7098 // the next vector will be fetched as you might suspect was necessary. 7099 7100 // We might be able to reuse the permutation generation from 7101 // a different base address offset from this one by an aligned amount. 7102 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 7103 // optimization later. 7104 SDValue PermCntl = BuildIntrinsicOp(Intrinsic::ppc_altivec_lvsl, Ptr, 7105 DAG, dl, MVT::v16i8); 7106 7107 // Refine the alignment of the original load (a "new" load created here 7108 // which was identical to the first except for the alignment would be 7109 // merged with the existing node regardless). 7110 MachineFunction &MF = DAG.getMachineFunction(); 7111 MachineMemOperand *MMO = 7112 MF.getMachineMemOperand(LD->getPointerInfo(), 7113 LD->getMemOperand()->getFlags(), 7114 LD->getMemoryVT().getStoreSize(), 7115 ABIAlignment); 7116 LD->refineAlignment(MMO); 7117 SDValue BaseLoad = SDValue(LD, 0); 7118 7119 // Note that the value of IncOffset (which is provided to the next 7120 // load's pointer info offset value, and thus used to calculate the 7121 // alignment), and the value of IncValue (which is actually used to 7122 // increment the pointer value) are different! This is because we 7123 // require the next load to appear to be aligned, even though it 7124 // is actually offset from the base pointer by a lesser amount. 7125 int IncOffset = VT.getSizeInBits() / 8; 7126 int IncValue = IncOffset; 7127 7128 // Walk (both up and down) the chain looking for another load at the real 7129 // (aligned) offset (the alignment of the other load does not matter in 7130 // this case). If found, then do not use the offset reduction trick, as 7131 // that will prevent the loads from being later combined (as they would 7132 // otherwise be duplicates). 7133 if (!findConsecutiveLoad(LD, DAG)) 7134 --IncValue; 7135 7136 SDValue Increment = DAG.getConstant(IncValue, getPointerTy()); 7137 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 7138 7139 SDValue ExtraLoad = 7140 DAG.getLoad(VT, dl, Chain, Ptr, 7141 LD->getPointerInfo().getWithOffset(IncOffset), 7142 LD->isVolatile(), LD->isNonTemporal(), 7143 LD->isInvariant(), ABIAlignment); 7144 7145 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7146 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 7147 7148 if (BaseLoad.getValueType() != MVT::v4i32) 7149 BaseLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, BaseLoad); 7150 7151 if (ExtraLoad.getValueType() != MVT::v4i32) 7152 ExtraLoad = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ExtraLoad); 7153 7154 SDValue Perm = BuildIntrinsicOp(Intrinsic::ppc_altivec_vperm, 7155 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 7156 7157 if (VT != MVT::v4i32) 7158 Perm = DAG.getNode(ISD::BITCAST, dl, VT, Perm); 7159 7160 // Now we need to be really careful about how we update the users of the 7161 // original load. We cannot just call DCI.CombineTo (or 7162 // DAG.ReplaceAllUsesWith for that matter), because the load still has 7163 // uses created here (the permutation for example) that need to stay. 7164 SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 7165 while (UI != UE) { 7166 SDUse &Use = UI.getUse(); 7167 SDNode *User = *UI; 7168 // Note: BaseLoad is checked here because it might not be N, but a 7169 // bitcast of N. 7170 if (User == Perm.getNode() || User == BaseLoad.getNode() || 7171 User == TF.getNode() || Use.getResNo() > 1) { 7172 ++UI; 7173 continue; 7174 } 7175 7176 SDValue To = Use.getResNo() ? TF : Perm; 7177 ++UI; 7178 7179 SmallVector<SDValue, 8> Ops; 7180 for (SDNode::op_iterator O = User->op_begin(), 7181 OE = User->op_end(); O != OE; ++O) { 7182 if (*O == Use) 7183 Ops.push_back(To); 7184 else 7185 Ops.push_back(*O); 7186 } 7187 7188 DAG.UpdateNodeOperands(User, Ops.data(), Ops.size()); 7189 } 7190 7191 return SDValue(N, 0); 7192 } 7193 } 7194 break; 7195 case ISD::INTRINSIC_WO_CHAIN: 7196 if (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() == 7197 Intrinsic::ppc_altivec_lvsl && 7198 N->getOperand(1)->getOpcode() == ISD::ADD) { 7199 SDValue Add = N->getOperand(1); 7200 7201 if (DAG.MaskedValueIsZero(Add->getOperand(1), 7202 APInt::getAllOnesValue(4 /* 16 byte alignment */).zext( 7203 Add.getValueType().getScalarType().getSizeInBits()))) { 7204 SDNode *BasePtr = Add->getOperand(0).getNode(); 7205 for (SDNode::use_iterator UI = BasePtr->use_begin(), 7206 UE = BasePtr->use_end(); UI != UE; ++UI) { 7207 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7208 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == 7209 Intrinsic::ppc_altivec_lvsl) { 7210 // We've found another LVSL, and this address if an aligned 7211 // multiple of that one. The results will be the same, so use the 7212 // one we've just found instead. 7213 7214 return SDValue(*UI, 0); 7215 } 7216 } 7217 } 7218 } 7219 case ISD::BSWAP: 7220 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 7221 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 7222 N->getOperand(0).hasOneUse() && 7223 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 7224 (TM.getSubtarget<PPCSubtarget>().hasLDBRX() && 7225 TM.getSubtarget<PPCSubtarget>().isPPC64() && 7226 N->getValueType(0) == MVT::i64))) { 7227 SDValue Load = N->getOperand(0); 7228 LoadSDNode *LD = cast<LoadSDNode>(Load); 7229 // Create the byte-swapping load. 7230 SDValue Ops[] = { 7231 LD->getChain(), // Chain 7232 LD->getBasePtr(), // Ptr 7233 DAG.getValueType(N->getValueType(0)) // VT 7234 }; 7235 SDValue BSLoad = 7236 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 7237 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 7238 MVT::i64 : MVT::i32, MVT::Other), 7239 Ops, 3, LD->getMemoryVT(), LD->getMemOperand()); 7240 7241 // If this is an i16 load, insert the truncate. 7242 SDValue ResVal = BSLoad; 7243 if (N->getValueType(0) == MVT::i16) 7244 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 7245 7246 // First, combine the bswap away. This makes the value produced by the 7247 // load dead. 7248 DCI.CombineTo(N, ResVal); 7249 7250 // Next, combine the load away, we give it a bogus result value but a real 7251 // chain result. The result value is dead because the bswap is dead. 7252 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 7253 7254 // Return N so it doesn't get rechecked! 7255 return SDValue(N, 0); 7256 } 7257 7258 break; 7259 case PPCISD::VCMP: { 7260 // If a VCMPo node already exists with exactly the same operands as this 7261 // node, use its result instead of this node (VCMPo computes both a CR6 and 7262 // a normal output). 7263 // 7264 if (!N->getOperand(0).hasOneUse() && 7265 !N->getOperand(1).hasOneUse() && 7266 !N->getOperand(2).hasOneUse()) { 7267 7268 // Scan all of the users of the LHS, looking for VCMPo's that match. 7269 SDNode *VCMPoNode = 0; 7270 7271 SDNode *LHSN = N->getOperand(0).getNode(); 7272 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 7273 UI != E; ++UI) 7274 if (UI->getOpcode() == PPCISD::VCMPo && 7275 UI->getOperand(1) == N->getOperand(1) && 7276 UI->getOperand(2) == N->getOperand(2) && 7277 UI->getOperand(0) == N->getOperand(0)) { 7278 VCMPoNode = *UI; 7279 break; 7280 } 7281 7282 // If there is no VCMPo node, or if the flag value has a single use, don't 7283 // transform this. 7284 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 7285 break; 7286 7287 // Look at the (necessarily single) use of the flag value. If it has a 7288 // chain, this transformation is more complex. Note that multiple things 7289 // could use the value result, which we should ignore. 7290 SDNode *FlagUser = 0; 7291 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 7292 FlagUser == 0; ++UI) { 7293 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 7294 SDNode *User = *UI; 7295 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 7296 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 7297 FlagUser = User; 7298 break; 7299 } 7300 } 7301 } 7302 7303 // If the user is a MFOCRF instruction, we know this is safe. 7304 // Otherwise we give up for right now. 7305 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 7306 return SDValue(VCMPoNode, 0); 7307 } 7308 break; 7309 } 7310 case ISD::BR_CC: { 7311 // If this is a branch on an altivec predicate comparison, lower this so 7312 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 7313 // lowering is done pre-legalize, because the legalizer lowers the predicate 7314 // compare down to code that is difficult to reassemble. 7315 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 7316 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 7317 7318 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 7319 // value. If so, pass-through the AND to get to the intrinsic. 7320 if (LHS.getOpcode() == ISD::AND && 7321 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 7322 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 7323 Intrinsic::ppc_is_decremented_ctr_nonzero && 7324 isa<ConstantSDNode>(LHS.getOperand(1)) && 7325 !cast<ConstantSDNode>(LHS.getOperand(1))->getConstantIntValue()-> 7326 isZero()) 7327 LHS = LHS.getOperand(0); 7328 7329 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 7330 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 7331 Intrinsic::ppc_is_decremented_ctr_nonzero && 7332 isa<ConstantSDNode>(RHS)) { 7333 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 7334 "Counter decrement comparison is not EQ or NE"); 7335 7336 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7337 bool isBDNZ = (CC == ISD::SETEQ && Val) || 7338 (CC == ISD::SETNE && !Val); 7339 7340 // We now need to make the intrinsic dead (it cannot be instruction 7341 // selected). 7342 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 7343 assert(LHS.getNode()->hasOneUse() && 7344 "Counter decrement has more than one use"); 7345 7346 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 7347 N->getOperand(0), N->getOperand(4)); 7348 } 7349 7350 int CompareOpc; 7351 bool isDot; 7352 7353 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 7354 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 7355 getAltivecCompareInfo(LHS, CompareOpc, isDot)) { 7356 assert(isDot && "Can't compare against a vector result!"); 7357 7358 // If this is a comparison against something other than 0/1, then we know 7359 // that the condition is never/always true. 7360 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 7361 if (Val != 0 && Val != 1) { 7362 if (CC == ISD::SETEQ) // Cond never true, remove branch. 7363 return N->getOperand(0); 7364 // Always !=, turn it into an unconditional branch. 7365 return DAG.getNode(ISD::BR, dl, MVT::Other, 7366 N->getOperand(0), N->getOperand(4)); 7367 } 7368 7369 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 7370 7371 // Create the PPCISD altivec 'dot' comparison node. 7372 SDValue Ops[] = { 7373 LHS.getOperand(2), // LHS of compare 7374 LHS.getOperand(3), // RHS of compare 7375 DAG.getConstant(CompareOpc, MVT::i32) 7376 }; 7377 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 7378 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3); 7379 7380 // Unpack the result based on how the target uses it. 7381 PPC::Predicate CompOpc; 7382 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 7383 default: // Can't happen, don't crash on invalid number though. 7384 case 0: // Branch on the value of the EQ bit of CR6. 7385 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 7386 break; 7387 case 1: // Branch on the inverted value of the EQ bit of CR6. 7388 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 7389 break; 7390 case 2: // Branch on the value of the LT bit of CR6. 7391 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 7392 break; 7393 case 3: // Branch on the inverted value of the LT bit of CR6. 7394 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 7395 break; 7396 } 7397 7398 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 7399 DAG.getConstant(CompOpc, MVT::i32), 7400 DAG.getRegister(PPC::CR6, MVT::i32), 7401 N->getOperand(4), CompNode.getValue(1)); 7402 } 7403 break; 7404 } 7405 } 7406 7407 return SDValue(); 7408} 7409 7410//===----------------------------------------------------------------------===// 7411// Inline Assembly Support 7412//===----------------------------------------------------------------------===// 7413 7414void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 7415 APInt &KnownZero, 7416 APInt &KnownOne, 7417 const SelectionDAG &DAG, 7418 unsigned Depth) const { 7419 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 7420 switch (Op.getOpcode()) { 7421 default: break; 7422 case PPCISD::LBRX: { 7423 // lhbrx is known to have the top bits cleared out. 7424 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 7425 KnownZero = 0xFFFF0000; 7426 break; 7427 } 7428 case ISD::INTRINSIC_WO_CHAIN: { 7429 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 7430 default: break; 7431 case Intrinsic::ppc_altivec_vcmpbfp_p: 7432 case Intrinsic::ppc_altivec_vcmpeqfp_p: 7433 case Intrinsic::ppc_altivec_vcmpequb_p: 7434 case Intrinsic::ppc_altivec_vcmpequh_p: 7435 case Intrinsic::ppc_altivec_vcmpequw_p: 7436 case Intrinsic::ppc_altivec_vcmpgefp_p: 7437 case Intrinsic::ppc_altivec_vcmpgtfp_p: 7438 case Intrinsic::ppc_altivec_vcmpgtsb_p: 7439 case Intrinsic::ppc_altivec_vcmpgtsh_p: 7440 case Intrinsic::ppc_altivec_vcmpgtsw_p: 7441 case Intrinsic::ppc_altivec_vcmpgtub_p: 7442 case Intrinsic::ppc_altivec_vcmpgtuh_p: 7443 case Intrinsic::ppc_altivec_vcmpgtuw_p: 7444 KnownZero = ~1U; // All bits but the low one are known to be zero. 7445 break; 7446 } 7447 } 7448 } 7449} 7450 7451 7452/// getConstraintType - Given a constraint, return the type of 7453/// constraint it is for this target. 7454PPCTargetLowering::ConstraintType 7455PPCTargetLowering::getConstraintType(const std::string &Constraint) const { 7456 if (Constraint.size() == 1) { 7457 switch (Constraint[0]) { 7458 default: break; 7459 case 'b': 7460 case 'r': 7461 case 'f': 7462 case 'v': 7463 case 'y': 7464 return C_RegisterClass; 7465 case 'Z': 7466 // FIXME: While Z does indicate a memory constraint, it specifically 7467 // indicates an r+r address (used in conjunction with the 'y' modifier 7468 // in the replacement string). Currently, we're forcing the base 7469 // register to be r0 in the asm printer (which is interpreted as zero) 7470 // and forming the complete address in the second register. This is 7471 // suboptimal. 7472 return C_Memory; 7473 } 7474 } 7475 return TargetLowering::getConstraintType(Constraint); 7476} 7477 7478/// Examine constraint type and operand type and determine a weight value. 7479/// This object must already have been set up with the operand type 7480/// and the current alternative constraint selected. 7481TargetLowering::ConstraintWeight 7482PPCTargetLowering::getSingleConstraintMatchWeight( 7483 AsmOperandInfo &info, const char *constraint) const { 7484 ConstraintWeight weight = CW_Invalid; 7485 Value *CallOperandVal = info.CallOperandVal; 7486 // If we don't have a value, we can't do a match, 7487 // but allow it at the lowest weight. 7488 if (CallOperandVal == NULL) 7489 return CW_Default; 7490 Type *type = CallOperandVal->getType(); 7491 // Look at the constraint type. 7492 switch (*constraint) { 7493 default: 7494 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 7495 break; 7496 case 'b': 7497 if (type->isIntegerTy()) 7498 weight = CW_Register; 7499 break; 7500 case 'f': 7501 if (type->isFloatTy()) 7502 weight = CW_Register; 7503 break; 7504 case 'd': 7505 if (type->isDoubleTy()) 7506 weight = CW_Register; 7507 break; 7508 case 'v': 7509 if (type->isVectorTy()) 7510 weight = CW_Register; 7511 break; 7512 case 'y': 7513 weight = CW_Register; 7514 break; 7515 case 'Z': 7516 weight = CW_Memory; 7517 break; 7518 } 7519 return weight; 7520} 7521 7522std::pair<unsigned, const TargetRegisterClass*> 7523PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 7524 MVT VT) const { 7525 if (Constraint.size() == 1) { 7526 // GCC RS6000 Constraint Letters 7527 switch (Constraint[0]) { 7528 case 'b': // R1-R31 7529 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7530 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 7531 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 7532 case 'r': // R0-R31 7533 if (VT == MVT::i64 && PPCSubTarget.isPPC64()) 7534 return std::make_pair(0U, &PPC::G8RCRegClass); 7535 return std::make_pair(0U, &PPC::GPRCRegClass); 7536 case 'f': 7537 if (VT == MVT::f32 || VT == MVT::i32) 7538 return std::make_pair(0U, &PPC::F4RCRegClass); 7539 if (VT == MVT::f64 || VT == MVT::i64) 7540 return std::make_pair(0U, &PPC::F8RCRegClass); 7541 break; 7542 case 'v': 7543 return std::make_pair(0U, &PPC::VRRCRegClass); 7544 case 'y': // crrc 7545 return std::make_pair(0U, &PPC::CRRCRegClass); 7546 } 7547 } 7548 7549 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 7550} 7551 7552 7553/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 7554/// vector. If it is invalid, don't add anything to Ops. 7555void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 7556 std::string &Constraint, 7557 std::vector<SDValue>&Ops, 7558 SelectionDAG &DAG) const { 7559 SDValue Result(0,0); 7560 7561 // Only support length 1 constraints. 7562 if (Constraint.length() > 1) return; 7563 7564 char Letter = Constraint[0]; 7565 switch (Letter) { 7566 default: break; 7567 case 'I': 7568 case 'J': 7569 case 'K': 7570 case 'L': 7571 case 'M': 7572 case 'N': 7573 case 'O': 7574 case 'P': { 7575 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 7576 if (!CST) return; // Must be an immediate to match. 7577 unsigned Value = CST->getZExtValue(); 7578 switch (Letter) { 7579 default: llvm_unreachable("Unknown constraint letter!"); 7580 case 'I': // "I" is a signed 16-bit constant. 7581 if ((short)Value == (int)Value) 7582 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7583 break; 7584 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 7585 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 7586 if ((short)Value == 0) 7587 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7588 break; 7589 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 7590 if ((Value >> 16) == 0) 7591 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7592 break; 7593 case 'M': // "M" is a constant that is greater than 31. 7594 if (Value > 31) 7595 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7596 break; 7597 case 'N': // "N" is a positive constant that is an exact power of two. 7598 if ((int)Value > 0 && isPowerOf2_32(Value)) 7599 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7600 break; 7601 case 'O': // "O" is the constant zero. 7602 if (Value == 0) 7603 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7604 break; 7605 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 7606 if ((short)-Value == (int)-Value) 7607 Result = DAG.getTargetConstant(Value, Op.getValueType()); 7608 break; 7609 } 7610 break; 7611 } 7612 } 7613 7614 if (Result.getNode()) { 7615 Ops.push_back(Result); 7616 return; 7617 } 7618 7619 // Handle standard constraint letters. 7620 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7621} 7622 7623// isLegalAddressingMode - Return true if the addressing mode represented 7624// by AM is legal for this target, for a load/store of the specified type. 7625bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 7626 Type *Ty) const { 7627 // FIXME: PPC does not allow r+i addressing modes for vectors! 7628 7629 // PPC allows a sign-extended 16-bit immediate field. 7630 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 7631 return false; 7632 7633 // No global is ever allowed as a base. 7634 if (AM.BaseGV) 7635 return false; 7636 7637 // PPC only support r+r, 7638 switch (AM.Scale) { 7639 case 0: // "r+i" or just "i", depending on HasBaseReg. 7640 break; 7641 case 1: 7642 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 7643 return false; 7644 // Otherwise we have r+r or r+i. 7645 break; 7646 case 2: 7647 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 7648 return false; 7649 // Allow 2*r as r+r. 7650 break; 7651 default: 7652 // No other scales are supported. 7653 return false; 7654 } 7655 7656 return true; 7657} 7658 7659SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 7660 SelectionDAG &DAG) const { 7661 MachineFunction &MF = DAG.getMachineFunction(); 7662 MachineFrameInfo *MFI = MF.getFrameInfo(); 7663 MFI->setReturnAddressIsTaken(true); 7664 7665 SDLoc dl(Op); 7666 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7667 7668 // Make sure the function does not optimize away the store of the RA to 7669 // the stack. 7670 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 7671 FuncInfo->setLRStoreRequired(); 7672 bool isPPC64 = PPCSubTarget.isPPC64(); 7673 bool isDarwinABI = PPCSubTarget.isDarwinABI(); 7674 7675 if (Depth > 0) { 7676 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 7677 SDValue Offset = 7678 7679 DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI), 7680 isPPC64? MVT::i64 : MVT::i32); 7681 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7682 DAG.getNode(ISD::ADD, dl, getPointerTy(), 7683 FrameAddr, Offset), 7684 MachinePointerInfo(), false, false, false, 0); 7685 } 7686 7687 // Just load the return address off the stack. 7688 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 7689 return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 7690 RetAddrFI, MachinePointerInfo(), false, false, false, 0); 7691} 7692 7693SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 7694 SelectionDAG &DAG) const { 7695 SDLoc dl(Op); 7696 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 7697 7698 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 7699 bool isPPC64 = PtrVT == MVT::i64; 7700 7701 MachineFunction &MF = DAG.getMachineFunction(); 7702 MachineFrameInfo *MFI = MF.getFrameInfo(); 7703 MFI->setFrameAddressIsTaken(true); 7704 7705 // Naked functions never have a frame pointer, and so we use r1. For all 7706 // other functions, this decision must be delayed until during PEI. 7707 unsigned FrameReg; 7708 if (MF.getFunction()->getAttributes().hasAttribute( 7709 AttributeSet::FunctionIndex, Attribute::Naked)) 7710 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 7711 else 7712 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 7713 7714 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 7715 PtrVT); 7716 while (Depth--) 7717 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 7718 FrameAddr, MachinePointerInfo(), false, false, 7719 false, 0); 7720 return FrameAddr; 7721} 7722 7723bool 7724PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 7725 // The PowerPC target isn't yet aware of offsets. 7726 return false; 7727} 7728 7729/// getOptimalMemOpType - Returns the target specific optimal type for load 7730/// and store operations as a result of memset, memcpy, and memmove 7731/// lowering. If DstAlign is zero that means it's safe to destination 7732/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 7733/// means there isn't a need to check it against alignment requirement, 7734/// probably because the source does not need to be loaded. If 'IsMemset' is 7735/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 7736/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 7737/// source is constant so it does not need to be loaded. 7738/// It returns EVT::Other if the type should be determined using generic 7739/// target-independent logic. 7740EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, 7741 unsigned DstAlign, unsigned SrcAlign, 7742 bool IsMemset, bool ZeroMemset, 7743 bool MemcpyStrSrc, 7744 MachineFunction &MF) const { 7745 if (this->PPCSubTarget.isPPC64()) { 7746 return MVT::i64; 7747 } else { 7748 return MVT::i32; 7749 } 7750} 7751 7752bool PPCTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 7753 bool *Fast) const { 7754 if (DisablePPCUnaligned) 7755 return false; 7756 7757 // PowerPC supports unaligned memory access for simple non-vector types. 7758 // Although accessing unaligned addresses is not as efficient as accessing 7759 // aligned addresses, it is generally more efficient than manual expansion, 7760 // and generally only traps for software emulation when crossing page 7761 // boundaries. 7762 7763 if (!VT.isSimple()) 7764 return false; 7765 7766 if (VT.getSimpleVT().isVector()) 7767 return false; 7768 7769 if (VT == MVT::ppcf128) 7770 return false; 7771 7772 if (Fast) 7773 *Fast = true; 7774 7775 return true; 7776} 7777 7778/// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 7779/// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 7780/// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 7781/// is expanded to mul + add. 7782bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const { 7783 if (!VT.isSimple()) 7784 return false; 7785 7786 switch (VT.getSimpleVT().SimpleTy) { 7787 case MVT::f32: 7788 case MVT::f64: 7789 case MVT::v4f32: 7790 return true; 7791 default: 7792 break; 7793 } 7794 7795 return false; 7796} 7797 7798Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 7799 if (DisableILPPref) 7800 return TargetLowering::getSchedulingPreference(N); 7801 7802 return Sched::ILP; 7803} 7804 7805