SystemZISelLowering.cpp revision f6ea5e0d8007234fc74c1ff6ac2c3ca316c41d92
1//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SystemZTargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "systemz-lower" 15 16#include "SystemZISelLowering.h" 17#include "SystemZCallingConv.h" 18#include "SystemZConstantPoolValue.h" 19#include "SystemZMachineFunctionInfo.h" 20#include "SystemZTargetMachine.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/CodeGen/MachineInstrBuilder.h" 23#include "llvm/CodeGen/MachineRegisterInfo.h" 24#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26using namespace llvm; 27 28// Classify VT as either 32 or 64 bit. 29static bool is32Bit(EVT VT) { 30 switch (VT.getSimpleVT().SimpleTy) { 31 case MVT::i32: 32 return true; 33 case MVT::i64: 34 return false; 35 default: 36 llvm_unreachable("Unsupported type"); 37 } 38} 39 40// Return a version of MachineOperand that can be safely used before the 41// final use. 42static MachineOperand earlyUseOperand(MachineOperand Op) { 43 if (Op.isReg()) 44 Op.setIsKill(false); 45 return Op; 46} 47 48SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 49 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 50 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 51 MVT PtrVT = getPointerTy(); 52 53 // Set up the register classes. 54 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 55 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 56 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 57 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 58 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 59 60 // Compute derived properties from the register classes 61 computeRegisterProperties(); 62 63 // Set up special registers. 64 setExceptionPointerRegister(SystemZ::R6D); 65 setExceptionSelectorRegister(SystemZ::R7D); 66 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 67 68 // TODO: It may be better to default to latency-oriented scheduling, however 69 // LLVM's current latency-oriented scheduler can't handle physreg definitions 70 // such as SystemZ has with CC, so set this to the register-pressure 71 // scheduler, because it can. 72 setSchedulingPreference(Sched::RegPressure); 73 74 setBooleanContents(ZeroOrOneBooleanContent); 75 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 76 77 // Instructions are strings of 2-byte aligned 2-byte values. 78 setMinFunctionAlignment(2); 79 80 // Handle operations that are handled in a similar way for all types. 81 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 82 I <= MVT::LAST_FP_VALUETYPE; 83 ++I) { 84 MVT VT = MVT::SimpleValueType(I); 85 if (isTypeLegal(VT)) { 86 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). 87 setOperationAction(ISD::SETCC, VT, Expand); 88 89 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 90 setOperationAction(ISD::SELECT, VT, Expand); 91 92 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 93 setOperationAction(ISD::SELECT_CC, VT, Custom); 94 setOperationAction(ISD::BR_CC, VT, Custom); 95 } 96 } 97 98 // Expand jump table branches as address arithmetic followed by an 99 // indirect jump. 100 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 101 102 // Expand BRCOND into a BR_CC (see above). 103 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 104 105 // Handle integer types. 106 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 107 I <= MVT::LAST_INTEGER_VALUETYPE; 108 ++I) { 109 MVT VT = MVT::SimpleValueType(I); 110 if (isTypeLegal(VT)) { 111 // Expand individual DIV and REMs into DIVREMs. 112 setOperationAction(ISD::SDIV, VT, Expand); 113 setOperationAction(ISD::UDIV, VT, Expand); 114 setOperationAction(ISD::SREM, VT, Expand); 115 setOperationAction(ISD::UREM, VT, Expand); 116 setOperationAction(ISD::SDIVREM, VT, Custom); 117 setOperationAction(ISD::UDIVREM, VT, Custom); 118 119 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. 120 // FIXME: probably much too conservative. 121 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); 122 setOperationAction(ISD::ATOMIC_STORE, VT, Expand); 123 124 // No special instructions for these. 125 setOperationAction(ISD::CTPOP, VT, Expand); 126 setOperationAction(ISD::CTTZ, VT, Expand); 127 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 128 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 129 setOperationAction(ISD::ROTR, VT, Expand); 130 131 // Use *MUL_LOHI where possible and a wider multiplication otherwise. 132 setOperationAction(ISD::MULHS, VT, Expand); 133 setOperationAction(ISD::MULHU, VT, Expand); 134 135 // We have instructions for signed but not unsigned FP conversion. 136 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 137 } 138 } 139 140 // Type legalization will convert 8- and 16-bit atomic operations into 141 // forms that operate on i32s (but still keeping the original memory VT). 142 // Lower them into full i32 operations. 143 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 144 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 145 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 146 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 147 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 148 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 149 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 150 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 151 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 152 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 153 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // We have instructions for signed but not unsigned FP conversion. 157 // Handle unsigned 32-bit types as signed 64-bit types. 158 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 159 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 160 161 // We have native support for a 64-bit CTLZ, via FLOGR. 162 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 163 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 164 165 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 166 setOperationAction(ISD::OR, MVT::i64, Custom); 167 168 // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR), 169 // but they aren't really worth using. There is no 64-bit SMUL_LOHI, 170 // but there is a 64-bit UMUL_LOHI: MLGR. 171 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 172 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 173 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 174 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); 175 176 // FIXME: Can we support these natively? 177 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 178 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 179 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 180 181 // We have native instructions for i8, i16 and i32 extensions, but not i1. 182 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 183 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 184 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 186 187 // Handle the various types of symbolic address. 188 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 189 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 190 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 191 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 192 setOperationAction(ISD::JumpTable, PtrVT, Custom); 193 194 // We need to handle dynamic allocations specially because of the 195 // 160-byte area at the bottom of the stack. 196 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 197 198 // Use custom expanders so that we can force the function to use 199 // a frame pointer. 200 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 201 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 202 203 // Handle floating-point types. 204 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 205 I <= MVT::LAST_FP_VALUETYPE; 206 ++I) { 207 MVT VT = MVT::SimpleValueType(I); 208 if (isTypeLegal(VT)) { 209 // We can use FI for FRINT. 210 setOperationAction(ISD::FRINT, VT, Legal); 211 212 // No special instructions for these. 213 setOperationAction(ISD::FSIN, VT, Expand); 214 setOperationAction(ISD::FCOS, VT, Expand); 215 setOperationAction(ISD::FREM, VT, Expand); 216 } 217 } 218 219 // We have fused multiply-addition for f32 and f64 but not f128. 220 setOperationAction(ISD::FMA, MVT::f32, Legal); 221 setOperationAction(ISD::FMA, MVT::f64, Legal); 222 setOperationAction(ISD::FMA, MVT::f128, Expand); 223 224 // Needed so that we don't try to implement f128 constant loads using 225 // a load-and-extend of a f80 constant (in cases where the constant 226 // would fit in an f80). 227 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 228 229 // Floating-point truncation and stores need to be done separately. 230 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 231 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 232 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 233 234 // We have 64-bit FPR<->GPR moves, but need special handling for 235 // 32-bit forms. 236 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 237 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 238 239 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 240 // structure, but VAEND is a no-op. 241 setOperationAction(ISD::VASTART, MVT::Other, Custom); 242 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 243 setOperationAction(ISD::VAEND, MVT::Other, Expand); 244 245 // We want to use MVC in preference to even a single load/store pair. 246 MaxStoresPerMemcpy = 0; 247 MaxStoresPerMemcpyOptSize = 0; 248 249 // The main memset sequence is a byte store followed by an MVC. 250 // Two STC or MV..I stores win over that, but the kind of fused stores 251 // generated by target-independent code don't when the byte value is 252 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 253 // than "STC;MVC". Handle the choice in target-specific code instead. 254 MaxStoresPerMemset = 0; 255 MaxStoresPerMemsetOptSize = 0; 256} 257 258bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 259 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 260 return Imm.isZero() || Imm.isNegZero(); 261} 262 263bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 264 bool *Fast) const { 265 // Unaligned accesses should never be slower than the expanded version. 266 // We check specifically for aligned accesses in the few cases where 267 // they are required. 268 if (Fast) 269 *Fast = true; 270 return true; 271} 272 273//===----------------------------------------------------------------------===// 274// Inline asm support 275//===----------------------------------------------------------------------===// 276 277TargetLowering::ConstraintType 278SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 279 if (Constraint.size() == 1) { 280 switch (Constraint[0]) { 281 case 'a': // Address register 282 case 'd': // Data register (equivalent to 'r') 283 case 'f': // Floating-point register 284 case 'r': // General-purpose register 285 return C_RegisterClass; 286 287 case 'Q': // Memory with base and unsigned 12-bit displacement 288 case 'R': // Likewise, plus an index 289 case 'S': // Memory with base and signed 20-bit displacement 290 case 'T': // Likewise, plus an index 291 case 'm': // Equivalent to 'T'. 292 return C_Memory; 293 294 case 'I': // Unsigned 8-bit constant 295 case 'J': // Unsigned 12-bit constant 296 case 'K': // Signed 16-bit constant 297 case 'L': // Signed 20-bit displacement (on all targets we support) 298 case 'M': // 0x7fffffff 299 return C_Other; 300 301 default: 302 break; 303 } 304 } 305 return TargetLowering::getConstraintType(Constraint); 306} 307 308TargetLowering::ConstraintWeight SystemZTargetLowering:: 309getSingleConstraintMatchWeight(AsmOperandInfo &info, 310 const char *constraint) const { 311 ConstraintWeight weight = CW_Invalid; 312 Value *CallOperandVal = info.CallOperandVal; 313 // If we don't have a value, we can't do a match, 314 // but allow it at the lowest weight. 315 if (CallOperandVal == NULL) 316 return CW_Default; 317 Type *type = CallOperandVal->getType(); 318 // Look at the constraint type. 319 switch (*constraint) { 320 default: 321 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 322 break; 323 324 case 'a': // Address register 325 case 'd': // Data register (equivalent to 'r') 326 case 'r': // General-purpose register 327 if (CallOperandVal->getType()->isIntegerTy()) 328 weight = CW_Register; 329 break; 330 331 case 'f': // Floating-point register 332 if (type->isFloatingPointTy()) 333 weight = CW_Register; 334 break; 335 336 case 'I': // Unsigned 8-bit constant 337 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 338 if (isUInt<8>(C->getZExtValue())) 339 weight = CW_Constant; 340 break; 341 342 case 'J': // Unsigned 12-bit constant 343 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 344 if (isUInt<12>(C->getZExtValue())) 345 weight = CW_Constant; 346 break; 347 348 case 'K': // Signed 16-bit constant 349 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 350 if (isInt<16>(C->getSExtValue())) 351 weight = CW_Constant; 352 break; 353 354 case 'L': // Signed 20-bit displacement (on all targets we support) 355 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 356 if (isInt<20>(C->getSExtValue())) 357 weight = CW_Constant; 358 break; 359 360 case 'M': // 0x7fffffff 361 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 362 if (C->getZExtValue() == 0x7fffffff) 363 weight = CW_Constant; 364 break; 365 } 366 return weight; 367} 368 369std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 370getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 371 if (Constraint.size() == 1) { 372 // GCC Constraint Letters 373 switch (Constraint[0]) { 374 default: break; 375 case 'd': // Data register (equivalent to 'r') 376 case 'r': // General-purpose register 377 if (VT == MVT::i64) 378 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 379 else if (VT == MVT::i128) 380 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 381 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 382 383 case 'a': // Address register 384 if (VT == MVT::i64) 385 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 386 else if (VT == MVT::i128) 387 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 388 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 389 390 case 'f': // Floating-point register 391 if (VT == MVT::f64) 392 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 393 else if (VT == MVT::f128) 394 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 395 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 396 } 397 } 398 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 399} 400 401void SystemZTargetLowering:: 402LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 403 std::vector<SDValue> &Ops, 404 SelectionDAG &DAG) const { 405 // Only support length 1 constraints for now. 406 if (Constraint.length() == 1) { 407 switch (Constraint[0]) { 408 case 'I': // Unsigned 8-bit constant 409 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 410 if (isUInt<8>(C->getZExtValue())) 411 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 412 Op.getValueType())); 413 return; 414 415 case 'J': // Unsigned 12-bit constant 416 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 417 if (isUInt<12>(C->getZExtValue())) 418 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 419 Op.getValueType())); 420 return; 421 422 case 'K': // Signed 16-bit constant 423 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 424 if (isInt<16>(C->getSExtValue())) 425 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 426 Op.getValueType())); 427 return; 428 429 case 'L': // Signed 20-bit displacement (on all targets we support) 430 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 431 if (isInt<20>(C->getSExtValue())) 432 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 433 Op.getValueType())); 434 return; 435 436 case 'M': // 0x7fffffff 437 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 438 if (C->getZExtValue() == 0x7fffffff) 439 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 440 Op.getValueType())); 441 return; 442 } 443 } 444 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 445} 446 447//===----------------------------------------------------------------------===// 448// Calling conventions 449//===----------------------------------------------------------------------===// 450 451#include "SystemZGenCallingConv.inc" 452 453// Value is a value that has been passed to us in the location described by VA 454// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 455// any loads onto Chain. 456static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 457 CCValAssign &VA, SDValue Chain, 458 SDValue Value) { 459 // If the argument has been promoted from a smaller type, insert an 460 // assertion to capture this. 461 if (VA.getLocInfo() == CCValAssign::SExt) 462 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 463 DAG.getValueType(VA.getValVT())); 464 else if (VA.getLocInfo() == CCValAssign::ZExt) 465 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 466 DAG.getValueType(VA.getValVT())); 467 468 if (VA.isExtInLoc()) 469 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 470 else if (VA.getLocInfo() == CCValAssign::Indirect) 471 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 472 MachinePointerInfo(), false, false, false, 0); 473 else 474 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 475 return Value; 476} 477 478// Value is a value of type VA.getValVT() that we need to copy into 479// the location described by VA. Return a copy of Value converted to 480// VA.getValVT(). The caller is responsible for handling indirect values. 481static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 482 CCValAssign &VA, SDValue Value) { 483 switch (VA.getLocInfo()) { 484 case CCValAssign::SExt: 485 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 486 case CCValAssign::ZExt: 487 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 488 case CCValAssign::AExt: 489 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 490 case CCValAssign::Full: 491 return Value; 492 default: 493 llvm_unreachable("Unhandled getLocInfo()"); 494 } 495} 496 497SDValue SystemZTargetLowering:: 498LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 499 const SmallVectorImpl<ISD::InputArg> &Ins, 500 SDLoc DL, SelectionDAG &DAG, 501 SmallVectorImpl<SDValue> &InVals) const { 502 MachineFunction &MF = DAG.getMachineFunction(); 503 MachineFrameInfo *MFI = MF.getFrameInfo(); 504 MachineRegisterInfo &MRI = MF.getRegInfo(); 505 SystemZMachineFunctionInfo *FuncInfo = 506 MF.getInfo<SystemZMachineFunctionInfo>(); 507 const SystemZFrameLowering *TFL = 508 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 509 510 // Assign locations to all of the incoming arguments. 511 SmallVector<CCValAssign, 16> ArgLocs; 512 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 513 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 514 515 unsigned NumFixedGPRs = 0; 516 unsigned NumFixedFPRs = 0; 517 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 518 SDValue ArgValue; 519 CCValAssign &VA = ArgLocs[I]; 520 EVT LocVT = VA.getLocVT(); 521 if (VA.isRegLoc()) { 522 // Arguments passed in registers 523 const TargetRegisterClass *RC; 524 switch (LocVT.getSimpleVT().SimpleTy) { 525 default: 526 // Integers smaller than i64 should be promoted to i64. 527 llvm_unreachable("Unexpected argument type"); 528 case MVT::i32: 529 NumFixedGPRs += 1; 530 RC = &SystemZ::GR32BitRegClass; 531 break; 532 case MVT::i64: 533 NumFixedGPRs += 1; 534 RC = &SystemZ::GR64BitRegClass; 535 break; 536 case MVT::f32: 537 NumFixedFPRs += 1; 538 RC = &SystemZ::FP32BitRegClass; 539 break; 540 case MVT::f64: 541 NumFixedFPRs += 1; 542 RC = &SystemZ::FP64BitRegClass; 543 break; 544 } 545 546 unsigned VReg = MRI.createVirtualRegister(RC); 547 MRI.addLiveIn(VA.getLocReg(), VReg); 548 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 549 } else { 550 assert(VA.isMemLoc() && "Argument not register or memory"); 551 552 // Create the frame index object for this incoming parameter. 553 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 554 VA.getLocMemOffset(), true); 555 556 // Create the SelectionDAG nodes corresponding to a load 557 // from this parameter. Unpromoted ints and floats are 558 // passed as right-justified 8-byte values. 559 EVT PtrVT = getPointerTy(); 560 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 561 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 562 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 563 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 564 MachinePointerInfo::getFixedStack(FI), 565 false, false, false, 0); 566 } 567 568 // Convert the value of the argument register into the value that's 569 // being passed. 570 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 571 } 572 573 if (IsVarArg) { 574 // Save the number of non-varargs registers for later use by va_start, etc. 575 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 576 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 577 578 // Likewise the address (in the form of a frame index) of where the 579 // first stack vararg would be. The 1-byte size here is arbitrary. 580 int64_t StackSize = CCInfo.getNextStackOffset(); 581 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 582 583 // ...and a similar frame index for the caller-allocated save area 584 // that will be used to store the incoming registers. 585 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 586 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 587 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 588 589 // Store the FPR varargs in the reserved frame slots. (We store the 590 // GPRs as part of the prologue.) 591 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 592 SDValue MemOps[SystemZ::NumArgFPRs]; 593 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 594 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 595 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 596 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 597 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 598 &SystemZ::FP64BitRegClass); 599 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 600 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 601 MachinePointerInfo::getFixedStack(FI), 602 false, false, 0); 603 604 } 605 // Join the stores, which are independent of one another. 606 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 607 &MemOps[NumFixedFPRs], 608 SystemZ::NumArgFPRs - NumFixedFPRs); 609 } 610 } 611 612 return Chain; 613} 614 615SDValue 616SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 617 SmallVectorImpl<SDValue> &InVals) const { 618 SelectionDAG &DAG = CLI.DAG; 619 SDLoc &DL = CLI.DL; 620 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 621 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 622 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 623 SDValue Chain = CLI.Chain; 624 SDValue Callee = CLI.Callee; 625 bool &isTailCall = CLI.IsTailCall; 626 CallingConv::ID CallConv = CLI.CallConv; 627 bool IsVarArg = CLI.IsVarArg; 628 MachineFunction &MF = DAG.getMachineFunction(); 629 EVT PtrVT = getPointerTy(); 630 631 // SystemZ target does not yet support tail call optimization. 632 isTailCall = false; 633 634 // Analyze the operands of the call, assigning locations to each operand. 635 SmallVector<CCValAssign, 16> ArgLocs; 636 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 637 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 638 639 // Get a count of how many bytes are to be pushed on the stack. 640 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 641 642 // Mark the start of the call. 643 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 644 DL); 645 646 // Copy argument values to their designated locations. 647 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 648 SmallVector<SDValue, 8> MemOpChains; 649 SDValue StackPtr; 650 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 651 CCValAssign &VA = ArgLocs[I]; 652 SDValue ArgValue = OutVals[I]; 653 654 if (VA.getLocInfo() == CCValAssign::Indirect) { 655 // Store the argument in a stack slot and pass its address. 656 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 657 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 658 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 659 MachinePointerInfo::getFixedStack(FI), 660 false, false, 0)); 661 ArgValue = SpillSlot; 662 } else 663 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 664 665 if (VA.isRegLoc()) 666 // Queue up the argument copies and emit them at the end. 667 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 668 else { 669 assert(VA.isMemLoc() && "Argument not register or memory"); 670 671 // Work out the address of the stack slot. Unpromoted ints and 672 // floats are passed as right-justified 8-byte values. 673 if (!StackPtr.getNode()) 674 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 675 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 676 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 677 Offset += 4; 678 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 679 DAG.getIntPtrConstant(Offset)); 680 681 // Emit the store. 682 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 683 MachinePointerInfo(), 684 false, false, 0)); 685 } 686 } 687 688 // Join the stores, which are independent of one another. 689 if (!MemOpChains.empty()) 690 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 691 &MemOpChains[0], MemOpChains.size()); 692 693 // Build a sequence of copy-to-reg nodes, chained and glued together. 694 SDValue Glue; 695 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 696 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 697 RegsToPass[I].second, Glue); 698 Glue = Chain.getValue(1); 699 } 700 701 // Accept direct calls by converting symbolic call addresses to the 702 // associated Target* opcodes. 703 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 704 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 705 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 706 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 707 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 708 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 709 } 710 711 // The first call operand is the chain and the second is the target address. 712 SmallVector<SDValue, 8> Ops; 713 Ops.push_back(Chain); 714 Ops.push_back(Callee); 715 716 // Add argument registers to the end of the list so that they are 717 // known live into the call. 718 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 719 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 720 RegsToPass[I].second.getValueType())); 721 722 // Glue the call to the argument copies, if any. 723 if (Glue.getNode()) 724 Ops.push_back(Glue); 725 726 // Emit the call. 727 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 728 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 729 Glue = Chain.getValue(1); 730 731 // Mark the end of the call, which is glued to the call itself. 732 Chain = DAG.getCALLSEQ_END(Chain, 733 DAG.getConstant(NumBytes, PtrVT, true), 734 DAG.getConstant(0, PtrVT, true), 735 Glue, DL); 736 Glue = Chain.getValue(1); 737 738 // Assign locations to each value returned by this call. 739 SmallVector<CCValAssign, 16> RetLocs; 740 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 741 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 742 743 // Copy all of the result registers out of their specified physreg. 744 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 745 CCValAssign &VA = RetLocs[I]; 746 747 // Copy the value out, gluing the copy to the end of the call sequence. 748 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 749 VA.getLocVT(), Glue); 750 Chain = RetValue.getValue(1); 751 Glue = RetValue.getValue(2); 752 753 // Convert the value of the return register into the value that's 754 // being returned. 755 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 756 } 757 758 return Chain; 759} 760 761SDValue 762SystemZTargetLowering::LowerReturn(SDValue Chain, 763 CallingConv::ID CallConv, bool IsVarArg, 764 const SmallVectorImpl<ISD::OutputArg> &Outs, 765 const SmallVectorImpl<SDValue> &OutVals, 766 SDLoc DL, SelectionDAG &DAG) const { 767 MachineFunction &MF = DAG.getMachineFunction(); 768 769 // Assign locations to each returned value. 770 SmallVector<CCValAssign, 16> RetLocs; 771 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 772 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 773 774 // Quick exit for void returns 775 if (RetLocs.empty()) 776 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 777 778 // Copy the result values into the output registers. 779 SDValue Glue; 780 SmallVector<SDValue, 4> RetOps; 781 RetOps.push_back(Chain); 782 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 783 CCValAssign &VA = RetLocs[I]; 784 SDValue RetValue = OutVals[I]; 785 786 // Make the return register live on exit. 787 assert(VA.isRegLoc() && "Can only return in registers!"); 788 789 // Promote the value as required. 790 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 791 792 // Chain and glue the copies together. 793 unsigned Reg = VA.getLocReg(); 794 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 795 Glue = Chain.getValue(1); 796 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 797 } 798 799 // Update chain and glue. 800 RetOps[0] = Chain; 801 if (Glue.getNode()) 802 RetOps.push_back(Glue); 803 804 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 805 RetOps.data(), RetOps.size()); 806} 807 808// CC is a comparison that will be implemented using an integer or 809// floating-point comparison. Return the condition code mask for 810// a branch on true. In the integer case, CCMASK_CMP_UO is set for 811// unsigned comparisons and clear for signed ones. In the floating-point 812// case, CCMASK_CMP_UO has its normal mask meaning (unordered). 813static unsigned CCMaskForCondCode(ISD::CondCode CC) { 814#define CONV(X) \ 815 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 816 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 817 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 818 819 switch (CC) { 820 default: 821 llvm_unreachable("Invalid integer condition!"); 822 823 CONV(EQ); 824 CONV(NE); 825 CONV(GT); 826 CONV(GE); 827 CONV(LT); 828 CONV(LE); 829 830 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 831 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 832 } 833#undef CONV 834} 835 836// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 837// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. 838static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, 839 SDValue &CmpOp0, SDValue &CmpOp1, 840 unsigned &CCMask) { 841 // For us to make any changes, it must a comparison between a single-use 842 // load and a constant. 843 if (!CmpOp0.hasOneUse() || 844 CmpOp0.getOpcode() != ISD::LOAD || 845 CmpOp1.getOpcode() != ISD::Constant) 846 return; 847 848 // We must have an 8- or 16-bit load. 849 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0); 850 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 851 if (NumBits != 8 && NumBits != 16) 852 return; 853 854 // The load must be an extending one and the constant must be within the 855 // range of the unextended value. 856 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1); 857 uint64_t Value = Constant->getZExtValue(); 858 uint64_t Mask = (1 << NumBits) - 1; 859 if (Load->getExtensionType() == ISD::SEXTLOAD) { 860 int64_t SignedValue = Constant->getSExtValue(); 861 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) 862 return; 863 // Unsigned comparison between two sign-extended values is equivalent 864 // to unsigned comparison between two zero-extended values. 865 if (IsUnsigned) 866 Value &= Mask; 867 else if (CCMask == SystemZ::CCMASK_CMP_EQ || 868 CCMask == SystemZ::CCMASK_CMP_NE) 869 // Any choice of IsUnsigned is OK for equality comparisons. 870 // We could use either CHHSI or CLHHSI for 16-bit comparisons, 871 // but since we use CLHHSI for zero extensions, it seems better 872 // to be consistent and do the same here. 873 Value &= Mask, IsUnsigned = true; 874 else if (NumBits == 8) { 875 // Try to treat the comparison as unsigned, so that we can use CLI. 876 // Adjust CCMask and Value as necessary. 877 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) 878 // Test whether the high bit of the byte is set. 879 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; 880 else if (SignedValue == -1 && CCMask == SystemZ::CCMASK_CMP_GT) 881 // Test whether the high bit of the byte is clear. 882 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; 883 else 884 // No instruction exists for this combination. 885 return; 886 } 887 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 888 if (Value > Mask) 889 return; 890 // Signed comparison between two zero-extended values is equivalent 891 // to unsigned comparison. 892 IsUnsigned = true; 893 } else 894 return; 895 896 // Make sure that the first operand is an i32 of the right extension type. 897 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; 898 if (CmpOp0.getValueType() != MVT::i32 || 899 Load->getExtensionType() != ExtType) 900 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 901 Load->getChain(), Load->getBasePtr(), 902 Load->getPointerInfo(), Load->getMemoryVT(), 903 Load->isVolatile(), Load->isNonTemporal(), 904 Load->getAlignment()); 905 906 // Make sure that the second operand is an i32 with the right value. 907 if (CmpOp1.getValueType() != MVT::i32 || 908 Value != Constant->getZExtValue()) 909 CmpOp1 = DAG.getConstant(Value, MVT::i32); 910} 911 912// Return true if a comparison described by CCMask, CmpOp0 and CmpOp1 913// is an equality comparison that is better implemented using unsigned 914// rather than signed comparison instructions. 915static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0, 916 SDValue CmpOp1, unsigned CCMask) { 917 // The test must be for equality or inequality. 918 if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE) 919 return false; 920 921 if (CmpOp1.getOpcode() == ISD::Constant) { 922 uint64_t Value = cast<ConstantSDNode>(CmpOp1)->getSExtValue(); 923 924 // If we're comparing with memory, prefer unsigned comparisons for 925 // values that are in the unsigned 16-bit range but not the signed 926 // 16-bit range. We want to use CLFHSI and CLGHSI. 927 if (CmpOp0.hasOneUse() && 928 ISD::isNormalLoad(CmpOp0.getNode()) && 929 (Value >= 32768 && Value < 65536)) 930 return true; 931 932 // Use unsigned comparisons for values that are in the CLGFI range 933 // but not in the CGFI range. 934 if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1) 935 return true; 936 937 return false; 938 } 939 940 // Prefer CL for zero-extended loads. 941 if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND || 942 ISD::isZEXTLoad(CmpOp1.getNode())) 943 return true; 944 945 // ...and for "in-register" zero extensions. 946 if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) { 947 SDValue Mask = CmpOp1.getOperand(1); 948 if (Mask.getOpcode() == ISD::Constant && 949 cast<ConstantSDNode>(Mask)->getZExtValue() == 0xffffffff) 950 return true; 951 } 952 953 return false; 954} 955 956// Return a target node that compares CmpOp0 and CmpOp1. Set CCMask to the 957// 4-bit condition-code mask for CC. 958static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 959 ISD::CondCode CC, unsigned &CCMask) { 960 bool IsUnsigned = false; 961 CCMask = CCMaskForCondCode(CC); 962 if (!CmpOp0.getValueType().isFloatingPoint()) { 963 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; 964 CCMask &= ~SystemZ::CCMASK_CMP_UO; 965 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 966 if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask)) 967 IsUnsigned = true; 968 } 969 970 SDLoc DL(CmpOp0); 971 return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP), 972 DL, MVT::Glue, CmpOp0, CmpOp1); 973} 974 975// Lower a binary operation that produces two VT results, one in each 976// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 977// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 978// on the extended Op0 and (unextended) Op1. Store the even register result 979// in Even and the odd register result in Odd. 980static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 981 unsigned Extend, unsigned Opcode, 982 SDValue Op0, SDValue Op1, 983 SDValue &Even, SDValue &Odd) { 984 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 985 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 986 SDValue(In128, 0), Op1); 987 bool Is32Bit = is32Bit(VT); 988 SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT); 989 SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT); 990 SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 991 VT, Result, SubReg0); 992 SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 993 VT, Result, SubReg1); 994 Even = SDValue(Reg0, 0); 995 Odd = SDValue(Reg1, 0); 996} 997 998SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 999 SDValue Chain = Op.getOperand(0); 1000 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1001 SDValue CmpOp0 = Op.getOperand(2); 1002 SDValue CmpOp1 = Op.getOperand(3); 1003 SDValue Dest = Op.getOperand(4); 1004 SDLoc DL(Op); 1005 1006 unsigned CCMask; 1007 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); 1008 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1009 Chain, DAG.getConstant(CCMask, MVT::i32), Dest, Flags); 1010} 1011 1012SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1013 SelectionDAG &DAG) const { 1014 SDValue CmpOp0 = Op.getOperand(0); 1015 SDValue CmpOp1 = Op.getOperand(1); 1016 SDValue TrueOp = Op.getOperand(2); 1017 SDValue FalseOp = Op.getOperand(3); 1018 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1019 SDLoc DL(Op); 1020 1021 unsigned CCMask; 1022 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); 1023 1024 SmallVector<SDValue, 4> Ops; 1025 Ops.push_back(TrueOp); 1026 Ops.push_back(FalseOp); 1027 Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); 1028 Ops.push_back(Flags); 1029 1030 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1031 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1032} 1033 1034SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1035 SelectionDAG &DAG) const { 1036 SDLoc DL(Node); 1037 const GlobalValue *GV = Node->getGlobal(); 1038 int64_t Offset = Node->getOffset(); 1039 EVT PtrVT = getPointerTy(); 1040 Reloc::Model RM = TM.getRelocationModel(); 1041 CodeModel::Model CM = TM.getCodeModel(); 1042 1043 SDValue Result; 1044 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1045 // Make sure that the offset is aligned to a halfword. If it isn't, 1046 // create an "anchor" at the previous 12-bit boundary. 1047 // FIXME check whether there is a better way of handling this. 1048 if (Offset & 1) { 1049 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 1050 Offset & ~uint64_t(0xfff)); 1051 Offset &= 0xfff; 1052 } else { 1053 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); 1054 Offset = 0; 1055 } 1056 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1057 } else { 1058 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1059 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1060 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1061 MachinePointerInfo::getGOT(), false, false, false, 0); 1062 } 1063 1064 // If there was a non-zero offset that we didn't fold, create an explicit 1065 // addition for it. 1066 if (Offset != 0) 1067 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1068 DAG.getConstant(Offset, PtrVT)); 1069 1070 return Result; 1071} 1072 1073SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1074 SelectionDAG &DAG) const { 1075 SDLoc DL(Node); 1076 const GlobalValue *GV = Node->getGlobal(); 1077 EVT PtrVT = getPointerTy(); 1078 TLSModel::Model model = TM.getTLSModel(GV); 1079 1080 if (model != TLSModel::LocalExec) 1081 llvm_unreachable("only local-exec TLS mode supported"); 1082 1083 // The high part of the thread pointer is in access register 0. 1084 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1085 DAG.getConstant(0, MVT::i32)); 1086 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1087 1088 // The low part of the thread pointer is in access register 1. 1089 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1090 DAG.getConstant(1, MVT::i32)); 1091 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1092 1093 // Merge them into a single 64-bit address. 1094 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1095 DAG.getConstant(32, PtrVT)); 1096 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1097 1098 // Get the offset of GA from the thread pointer. 1099 SystemZConstantPoolValue *CPV = 1100 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1101 1102 // Force the offset into the constant pool and load it from there. 1103 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1104 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1105 CPAddr, MachinePointerInfo::getConstantPool(), 1106 false, false, false, 0); 1107 1108 // Add the base and offset together. 1109 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1110} 1111 1112SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1113 SelectionDAG &DAG) const { 1114 SDLoc DL(Node); 1115 const BlockAddress *BA = Node->getBlockAddress(); 1116 int64_t Offset = Node->getOffset(); 1117 EVT PtrVT = getPointerTy(); 1118 1119 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1120 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1121 return Result; 1122} 1123 1124SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1125 SelectionDAG &DAG) const { 1126 SDLoc DL(JT); 1127 EVT PtrVT = getPointerTy(); 1128 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1129 1130 // Use LARL to load the address of the table. 1131 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1132} 1133 1134SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1135 SelectionDAG &DAG) const { 1136 SDLoc DL(CP); 1137 EVT PtrVT = getPointerTy(); 1138 1139 SDValue Result; 1140 if (CP->isMachineConstantPoolEntry()) 1141 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1142 CP->getAlignment()); 1143 else 1144 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1145 CP->getAlignment(), CP->getOffset()); 1146 1147 // Use LARL to load the address of the constant pool entry. 1148 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1149} 1150 1151SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1152 SelectionDAG &DAG) const { 1153 SDLoc DL(Op); 1154 SDValue In = Op.getOperand(0); 1155 EVT InVT = In.getValueType(); 1156 EVT ResVT = Op.getValueType(); 1157 1158 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1159 SDValue Shift32 = DAG.getConstant(32, MVT::i64); 1160 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1161 SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1162 SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32); 1163 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift); 1164 SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1165 MVT::f32, Out64, SubReg32); 1166 return SDValue(Out, 0); 1167 } 1168 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1169 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1170 SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1171 MVT::f64, SDValue(U64, 0), In, SubReg32); 1172 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0)); 1173 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32); 1174 SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1175 return Out; 1176 } 1177 llvm_unreachable("Unexpected bitcast combination"); 1178} 1179 1180SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1181 SelectionDAG &DAG) const { 1182 MachineFunction &MF = DAG.getMachineFunction(); 1183 SystemZMachineFunctionInfo *FuncInfo = 1184 MF.getInfo<SystemZMachineFunctionInfo>(); 1185 EVT PtrVT = getPointerTy(); 1186 1187 SDValue Chain = Op.getOperand(0); 1188 SDValue Addr = Op.getOperand(1); 1189 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1190 SDLoc DL(Op); 1191 1192 // The initial values of each field. 1193 const unsigned NumFields = 4; 1194 SDValue Fields[NumFields] = { 1195 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1196 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1197 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1198 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1199 }; 1200 1201 // Store each field into its respective slot. 1202 SDValue MemOps[NumFields]; 1203 unsigned Offset = 0; 1204 for (unsigned I = 0; I < NumFields; ++I) { 1205 SDValue FieldAddr = Addr; 1206 if (Offset != 0) 1207 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1208 DAG.getIntPtrConstant(Offset)); 1209 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1210 MachinePointerInfo(SV, Offset), 1211 false, false, 0); 1212 Offset += 8; 1213 } 1214 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1215} 1216 1217SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1218 SelectionDAG &DAG) const { 1219 SDValue Chain = Op.getOperand(0); 1220 SDValue DstPtr = Op.getOperand(1); 1221 SDValue SrcPtr = Op.getOperand(2); 1222 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1223 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1224 SDLoc DL(Op); 1225 1226 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1227 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1228 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1229} 1230 1231SDValue SystemZTargetLowering:: 1232lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1233 SDValue Chain = Op.getOperand(0); 1234 SDValue Size = Op.getOperand(1); 1235 SDLoc DL(Op); 1236 1237 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1238 1239 // Get a reference to the stack pointer. 1240 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1241 1242 // Get the new stack pointer value. 1243 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1244 1245 // Copy the new stack pointer back. 1246 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1247 1248 // The allocated data lives above the 160 bytes allocated for the standard 1249 // frame, plus any outgoing stack arguments. We don't know how much that 1250 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1251 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1252 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1253 1254 SDValue Ops[2] = { Result, Chain }; 1255 return DAG.getMergeValues(Ops, 2, DL); 1256} 1257 1258SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 1259 SelectionDAG &DAG) const { 1260 EVT VT = Op.getValueType(); 1261 SDLoc DL(Op); 1262 assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI"); 1263 1264 // UMUL_LOHI64 returns the low result in the odd register and the high 1265 // result in the even register. UMUL_LOHI is defined to return the 1266 // low half first, so the results are in reverse order. 1267 SDValue Ops[2]; 1268 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1269 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1270 return DAG.getMergeValues(Ops, 2, DL); 1271} 1272 1273SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 1274 SelectionDAG &DAG) const { 1275 SDValue Op0 = Op.getOperand(0); 1276 SDValue Op1 = Op.getOperand(1); 1277 EVT VT = Op.getValueType(); 1278 SDLoc DL(Op); 1279 unsigned Opcode; 1280 1281 // We use DSGF for 32-bit division. 1282 if (is32Bit(VT)) { 1283 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 1284 Opcode = SystemZISD::SDIVREM32; 1285 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 1286 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 1287 Opcode = SystemZISD::SDIVREM32; 1288 } else 1289 Opcode = SystemZISD::SDIVREM64; 1290 1291 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 1292 // input is "don't care". The instruction returns the remainder in 1293 // the even register and the quotient in the odd register. 1294 SDValue Ops[2]; 1295 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 1296 Op0, Op1, Ops[1], Ops[0]); 1297 return DAG.getMergeValues(Ops, 2, DL); 1298} 1299 1300SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 1301 SelectionDAG &DAG) const { 1302 EVT VT = Op.getValueType(); 1303 SDLoc DL(Op); 1304 1305 // DL(G) uses a double-width dividend, so we need to clear the even 1306 // register in the GR128 input. The instruction returns the remainder 1307 // in the even register and the quotient in the odd register. 1308 SDValue Ops[2]; 1309 if (is32Bit(VT)) 1310 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 1311 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1312 else 1313 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 1314 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1315 return DAG.getMergeValues(Ops, 2, DL); 1316} 1317 1318SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 1319 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 1320 1321 // Get the known-zero masks for each operand. 1322 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1323 APInt KnownZero[2], KnownOne[2]; 1324 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 1325 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 1326 1327 // See if the upper 32 bits of one operand and the lower 32 bits of the 1328 // other are known zero. They are the low and high operands respectively. 1329 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 1330 KnownZero[1].getZExtValue() }; 1331 unsigned High, Low; 1332 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 1333 High = 1, Low = 0; 1334 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 1335 High = 0, Low = 1; 1336 else 1337 return Op; 1338 1339 SDValue LowOp = Ops[Low]; 1340 SDValue HighOp = Ops[High]; 1341 1342 // If the high part is a constant, we're better off using IILH. 1343 if (HighOp.getOpcode() == ISD::Constant) 1344 return Op; 1345 1346 // If the low part is a constant that is outside the range of LHI, 1347 // then we're better off using IILF. 1348 if (LowOp.getOpcode() == ISD::Constant) { 1349 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 1350 if (!isInt<16>(Value)) 1351 return Op; 1352 } 1353 1354 // Check whether the high part is an AND that doesn't change the 1355 // high 32 bits and just masks out low bits. We can skip it if so. 1356 if (HighOp.getOpcode() == ISD::AND && 1357 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 1358 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1)); 1359 uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; 1360 if ((Mask >> 32) == 0xffffffff) 1361 HighOp = HighOp.getOperand(0); 1362 } 1363 1364 // Take advantage of the fact that all GR32 operations only change the 1365 // low 32 bits by truncating Low to an i32 and inserting it directly 1366 // using a subreg. The interesting cases are those where the truncation 1367 // can be folded. 1368 SDLoc DL(Op); 1369 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 1370 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1371 SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1372 MVT::i64, HighOp, Low32, SubReg32); 1373 return SDValue(Result, 0); 1374} 1375 1376// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 1377// two into the fullword ATOMIC_LOADW_* operation given by Opcode. 1378SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 1379 SelectionDAG &DAG, 1380 unsigned Opcode) const { 1381 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1382 1383 // 32-bit operations need no code outside the main loop. 1384 EVT NarrowVT = Node->getMemoryVT(); 1385 EVT WideVT = MVT::i32; 1386 if (NarrowVT == WideVT) 1387 return Op; 1388 1389 int64_t BitSize = NarrowVT.getSizeInBits(); 1390 SDValue ChainIn = Node->getChain(); 1391 SDValue Addr = Node->getBasePtr(); 1392 SDValue Src2 = Node->getVal(); 1393 MachineMemOperand *MMO = Node->getMemOperand(); 1394 SDLoc DL(Node); 1395 EVT PtrVT = Addr.getValueType(); 1396 1397 // Convert atomic subtracts of constants into additions. 1398 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 1399 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 1400 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 1401 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 1402 } 1403 1404 // Get the address of the containing word. 1405 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1406 DAG.getConstant(-4, PtrVT)); 1407 1408 // Get the number of bits that the word must be rotated left in order 1409 // to bring the field to the top bits of a GR32. 1410 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1411 DAG.getConstant(3, PtrVT)); 1412 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1413 1414 // Get the complementing shift amount, for rotating a field in the top 1415 // bits back to its proper position. 1416 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1417 DAG.getConstant(0, WideVT), BitShift); 1418 1419 // Extend the source operand to 32 bits and prepare it for the inner loop. 1420 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 1421 // operations require the source to be shifted in advance. (This shift 1422 // can be folded if the source is constant.) For AND and NAND, the lower 1423 // bits must be set, while for other opcodes they should be left clear. 1424 if (Opcode != SystemZISD::ATOMIC_SWAPW) 1425 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 1426 DAG.getConstant(32 - BitSize, WideVT)); 1427 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 1428 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 1429 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 1430 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 1431 1432 // Construct the ATOMIC_LOADW_* node. 1433 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1434 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 1435 DAG.getConstant(BitSize, WideVT) }; 1436 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 1437 array_lengthof(Ops), 1438 NarrowVT, MMO); 1439 1440 // Rotate the result of the final CS so that the field is in the lower 1441 // bits of a GR32, then truncate it. 1442 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 1443 DAG.getConstant(BitSize, WideVT)); 1444 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 1445 1446 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 1447 return DAG.getMergeValues(RetOps, 2, DL); 1448} 1449 1450// Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 1451// into a fullword ATOMIC_CMP_SWAPW operation. 1452SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 1453 SelectionDAG &DAG) const { 1454 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1455 1456 // We have native support for 32-bit compare and swap. 1457 EVT NarrowVT = Node->getMemoryVT(); 1458 EVT WideVT = MVT::i32; 1459 if (NarrowVT == WideVT) 1460 return Op; 1461 1462 int64_t BitSize = NarrowVT.getSizeInBits(); 1463 SDValue ChainIn = Node->getOperand(0); 1464 SDValue Addr = Node->getOperand(1); 1465 SDValue CmpVal = Node->getOperand(2); 1466 SDValue SwapVal = Node->getOperand(3); 1467 MachineMemOperand *MMO = Node->getMemOperand(); 1468 SDLoc DL(Node); 1469 EVT PtrVT = Addr.getValueType(); 1470 1471 // Get the address of the containing word. 1472 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1473 DAG.getConstant(-4, PtrVT)); 1474 1475 // Get the number of bits that the word must be rotated left in order 1476 // to bring the field to the top bits of a GR32. 1477 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1478 DAG.getConstant(3, PtrVT)); 1479 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1480 1481 // Get the complementing shift amount, for rotating a field in the top 1482 // bits back to its proper position. 1483 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1484 DAG.getConstant(0, WideVT), BitShift); 1485 1486 // Construct the ATOMIC_CMP_SWAPW node. 1487 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1488 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 1489 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 1490 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 1491 VTList, Ops, array_lengthof(Ops), 1492 NarrowVT, MMO); 1493 return AtomicOp; 1494} 1495 1496SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 1497 SelectionDAG &DAG) const { 1498 MachineFunction &MF = DAG.getMachineFunction(); 1499 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1500 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 1501 SystemZ::R15D, Op.getValueType()); 1502} 1503 1504SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 1505 SelectionDAG &DAG) const { 1506 MachineFunction &MF = DAG.getMachineFunction(); 1507 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1508 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 1509 SystemZ::R15D, Op.getOperand(1)); 1510} 1511 1512SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 1513 SelectionDAG &DAG) const { 1514 switch (Op.getOpcode()) { 1515 case ISD::BR_CC: 1516 return lowerBR_CC(Op, DAG); 1517 case ISD::SELECT_CC: 1518 return lowerSELECT_CC(Op, DAG); 1519 case ISD::GlobalAddress: 1520 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 1521 case ISD::GlobalTLSAddress: 1522 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 1523 case ISD::BlockAddress: 1524 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 1525 case ISD::JumpTable: 1526 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 1527 case ISD::ConstantPool: 1528 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 1529 case ISD::BITCAST: 1530 return lowerBITCAST(Op, DAG); 1531 case ISD::VASTART: 1532 return lowerVASTART(Op, DAG); 1533 case ISD::VACOPY: 1534 return lowerVACOPY(Op, DAG); 1535 case ISD::DYNAMIC_STACKALLOC: 1536 return lowerDYNAMIC_STACKALLOC(Op, DAG); 1537 case ISD::UMUL_LOHI: 1538 return lowerUMUL_LOHI(Op, DAG); 1539 case ISD::SDIVREM: 1540 return lowerSDIVREM(Op, DAG); 1541 case ISD::UDIVREM: 1542 return lowerUDIVREM(Op, DAG); 1543 case ISD::OR: 1544 return lowerOR(Op, DAG); 1545 case ISD::ATOMIC_SWAP: 1546 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); 1547 case ISD::ATOMIC_LOAD_ADD: 1548 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 1549 case ISD::ATOMIC_LOAD_SUB: 1550 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 1551 case ISD::ATOMIC_LOAD_AND: 1552 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 1553 case ISD::ATOMIC_LOAD_OR: 1554 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 1555 case ISD::ATOMIC_LOAD_XOR: 1556 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 1557 case ISD::ATOMIC_LOAD_NAND: 1558 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 1559 case ISD::ATOMIC_LOAD_MIN: 1560 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 1561 case ISD::ATOMIC_LOAD_MAX: 1562 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 1563 case ISD::ATOMIC_LOAD_UMIN: 1564 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 1565 case ISD::ATOMIC_LOAD_UMAX: 1566 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 1567 case ISD::ATOMIC_CMP_SWAP: 1568 return lowerATOMIC_CMP_SWAP(Op, DAG); 1569 case ISD::STACKSAVE: 1570 return lowerSTACKSAVE(Op, DAG); 1571 case ISD::STACKRESTORE: 1572 return lowerSTACKRESTORE(Op, DAG); 1573 default: 1574 llvm_unreachable("Unexpected node to lower"); 1575 } 1576} 1577 1578const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 1579#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 1580 switch (Opcode) { 1581 OPCODE(RET_FLAG); 1582 OPCODE(CALL); 1583 OPCODE(PCREL_WRAPPER); 1584 OPCODE(CMP); 1585 OPCODE(UCMP); 1586 OPCODE(BR_CCMASK); 1587 OPCODE(SELECT_CCMASK); 1588 OPCODE(ADJDYNALLOC); 1589 OPCODE(EXTRACT_ACCESS); 1590 OPCODE(UMUL_LOHI64); 1591 OPCODE(SDIVREM64); 1592 OPCODE(UDIVREM32); 1593 OPCODE(UDIVREM64); 1594 OPCODE(MVC); 1595 OPCODE(ATOMIC_SWAPW); 1596 OPCODE(ATOMIC_LOADW_ADD); 1597 OPCODE(ATOMIC_LOADW_SUB); 1598 OPCODE(ATOMIC_LOADW_AND); 1599 OPCODE(ATOMIC_LOADW_OR); 1600 OPCODE(ATOMIC_LOADW_XOR); 1601 OPCODE(ATOMIC_LOADW_NAND); 1602 OPCODE(ATOMIC_LOADW_MIN); 1603 OPCODE(ATOMIC_LOADW_MAX); 1604 OPCODE(ATOMIC_LOADW_UMIN); 1605 OPCODE(ATOMIC_LOADW_UMAX); 1606 OPCODE(ATOMIC_CMP_SWAPW); 1607 } 1608 return NULL; 1609#undef OPCODE 1610} 1611 1612//===----------------------------------------------------------------------===// 1613// Custom insertion 1614//===----------------------------------------------------------------------===// 1615 1616// Create a new basic block after MBB. 1617static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 1618 MachineFunction &MF = *MBB->getParent(); 1619 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 1620 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 1621 return NewMBB; 1622} 1623 1624// Split MBB after MI and return the new block (the one that contains 1625// instructions after MI). 1626static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 1627 MachineBasicBlock *MBB) { 1628 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 1629 NewMBB->splice(NewMBB->begin(), MBB, 1630 llvm::next(MachineBasicBlock::iterator(MI)), 1631 MBB->end()); 1632 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 1633 return NewMBB; 1634} 1635 1636bool SystemZTargetLowering:: 1637convertPrevCompareToBranch(MachineBasicBlock *MBB, 1638 MachineBasicBlock::iterator MBBI, 1639 unsigned CCMask, MachineBasicBlock *Target) const { 1640 MachineBasicBlock::iterator Compare = MBBI; 1641 MachineBasicBlock::iterator Begin = MBB->begin(); 1642 do 1643 { 1644 if (Compare == Begin) 1645 return false; 1646 --Compare; 1647 } 1648 while (Compare->isDebugValue()); 1649 1650 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1651 unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(), 1652 Compare); 1653 if (!FusedOpcode) 1654 return false; 1655 1656 DebugLoc DL = Compare->getDebugLoc(); 1657 BuildMI(*MBB, MBBI, DL, TII->get(FusedOpcode)) 1658 .addOperand(Compare->getOperand(0)).addOperand(Compare->getOperand(1)) 1659 .addImm(CCMask).addMBB(Target); 1660 Compare->removeFromParent(); 1661 return true; 1662} 1663 1664// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 1665MachineBasicBlock * 1666SystemZTargetLowering::emitSelect(MachineInstr *MI, 1667 MachineBasicBlock *MBB) const { 1668 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1669 1670 unsigned DestReg = MI->getOperand(0).getReg(); 1671 unsigned TrueReg = MI->getOperand(1).getReg(); 1672 unsigned FalseReg = MI->getOperand(2).getReg(); 1673 unsigned CCMask = MI->getOperand(3).getImm(); 1674 DebugLoc DL = MI->getDebugLoc(); 1675 1676 MachineBasicBlock *StartMBB = MBB; 1677 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1678 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1679 1680 // StartMBB: 1681 // BRC CCMask, JoinMBB 1682 // # fallthrough to FalseMBB 1683 // 1684 // The original DAG glues comparisons to their uses, both to ensure 1685 // that no CC-clobbering instructions are inserted between them, and 1686 // to ensure that comparison results are not reused. This means that 1687 // this Select is the sole user of any preceding comparison instruction 1688 // and that we can try to use a fused compare and branch instead. 1689 MBB = StartMBB; 1690 if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) 1691 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); 1692 MBB->addSuccessor(JoinMBB); 1693 MBB->addSuccessor(FalseMBB); 1694 1695 // FalseMBB: 1696 // # fallthrough to JoinMBB 1697 MBB = FalseMBB; 1698 MBB->addSuccessor(JoinMBB); 1699 1700 // JoinMBB: 1701 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 1702 // ... 1703 MBB = JoinMBB; 1704 BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg) 1705 .addReg(TrueReg).addMBB(StartMBB) 1706 .addReg(FalseReg).addMBB(FalseMBB); 1707 1708 MI->eraseFromParent(); 1709 return JoinMBB; 1710} 1711 1712// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 1713// StoreOpcode is the store to use and Invert says whether the store should 1714// happen when the condition is false rather than true. 1715MachineBasicBlock * 1716SystemZTargetLowering::emitCondStore(MachineInstr *MI, 1717 MachineBasicBlock *MBB, 1718 unsigned StoreOpcode, bool Invert) const { 1719 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1720 1721 MachineOperand Base = MI->getOperand(0); 1722 int64_t Disp = MI->getOperand(1).getImm(); 1723 unsigned IndexReg = MI->getOperand(2).getReg(); 1724 unsigned SrcReg = MI->getOperand(3).getReg(); 1725 unsigned CCMask = MI->getOperand(4).getImm(); 1726 DebugLoc DL = MI->getDebugLoc(); 1727 1728 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 1729 1730 // Get the condition needed to branch around the store. 1731 if (!Invert) 1732 CCMask = CCMask ^ SystemZ::CCMASK_ANY; 1733 1734 MachineBasicBlock *StartMBB = MBB; 1735 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1736 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1737 1738 // StartMBB: 1739 // BRC CCMask, JoinMBB 1740 // # fallthrough to FalseMBB 1741 // 1742 // The original DAG glues comparisons to their uses, both to ensure 1743 // that no CC-clobbering instructions are inserted between them, and 1744 // to ensure that comparison results are not reused. This means that 1745 // this CondStore is the sole user of any preceding comparison instruction 1746 // and that we can try to use a fused compare and branch instead. 1747 MBB = StartMBB; 1748 if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) 1749 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); 1750 MBB->addSuccessor(JoinMBB); 1751 MBB->addSuccessor(FalseMBB); 1752 1753 // FalseMBB: 1754 // store %SrcReg, %Disp(%Index,%Base) 1755 // # fallthrough to JoinMBB 1756 MBB = FalseMBB; 1757 BuildMI(MBB, DL, TII->get(StoreOpcode)) 1758 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 1759 MBB->addSuccessor(JoinMBB); 1760 1761 MI->eraseFromParent(); 1762 return JoinMBB; 1763} 1764 1765// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 1766// or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 1767// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 1768// BitSize is the width of the field in bits, or 0 if this is a partword 1769// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 1770// is one of the operands. Invert says whether the field should be 1771// inverted after performing BinOpcode (e.g. for NAND). 1772MachineBasicBlock * 1773SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 1774 MachineBasicBlock *MBB, 1775 unsigned BinOpcode, 1776 unsigned BitSize, 1777 bool Invert) const { 1778 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1779 MachineFunction &MF = *MBB->getParent(); 1780 MachineRegisterInfo &MRI = MF.getRegInfo(); 1781 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 1782 bool IsSubWord = (BitSize < 32); 1783 1784 // Extract the operands. Base can be a register or a frame index. 1785 // Src2 can be a register or immediate. 1786 unsigned Dest = MI->getOperand(0).getReg(); 1787 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1788 int64_t Disp = MI->getOperand(2).getImm(); 1789 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 1790 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1791 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1792 DebugLoc DL = MI->getDebugLoc(); 1793 if (IsSubWord) 1794 BitSize = MI->getOperand(6).getImm(); 1795 1796 // Subword operations use 32-bit registers. 1797 const TargetRegisterClass *RC = (BitSize <= 32 ? 1798 &SystemZ::GR32BitRegClass : 1799 &SystemZ::GR64BitRegClass); 1800 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1801 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1802 1803 // Get the right opcodes for the displacement. 1804 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1805 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1806 assert(LOpcode && CSOpcode && "Displacement out of range"); 1807 1808 // Create virtual registers for temporary results. 1809 unsigned OrigVal = MRI.createVirtualRegister(RC); 1810 unsigned OldVal = MRI.createVirtualRegister(RC); 1811 unsigned NewVal = (BinOpcode || IsSubWord ? 1812 MRI.createVirtualRegister(RC) : Src2.getReg()); 1813 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1814 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1815 1816 // Insert a basic block for the main loop. 1817 MachineBasicBlock *StartMBB = MBB; 1818 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1819 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1820 1821 // StartMBB: 1822 // ... 1823 // %OrigVal = L Disp(%Base) 1824 // # fall through to LoopMMB 1825 MBB = StartMBB; 1826 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1827 .addOperand(Base).addImm(Disp).addReg(0); 1828 MBB->addSuccessor(LoopMBB); 1829 1830 // LoopMBB: 1831 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 1832 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1833 // %RotatedNewVal = OP %RotatedOldVal, %Src2 1834 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1835 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1836 // JNE LoopMBB 1837 // # fall through to DoneMMB 1838 MBB = LoopMBB; 1839 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1840 .addReg(OrigVal).addMBB(StartMBB) 1841 .addReg(Dest).addMBB(LoopMBB); 1842 if (IsSubWord) 1843 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1844 .addReg(OldVal).addReg(BitShift).addImm(0); 1845 if (Invert) { 1846 // Perform the operation normally and then invert every bit of the field. 1847 unsigned Tmp = MRI.createVirtualRegister(RC); 1848 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 1849 .addReg(RotatedOldVal).addOperand(Src2); 1850 if (BitSize < 32) 1851 // XILF with the upper BitSize bits set. 1852 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1853 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 1854 else if (BitSize == 32) 1855 // XILF with every bit set. 1856 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1857 .addReg(Tmp).addImm(~uint32_t(0)); 1858 else { 1859 // Use LCGR and add -1 to the result, which is more compact than 1860 // an XILF, XILH pair. 1861 unsigned Tmp2 = MRI.createVirtualRegister(RC); 1862 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 1863 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 1864 .addReg(Tmp2).addImm(-1); 1865 } 1866 } else if (BinOpcode) 1867 // A simply binary operation. 1868 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 1869 .addReg(RotatedOldVal).addOperand(Src2); 1870 else if (IsSubWord) 1871 // Use RISBG to rotate Src2 into position and use it to replace the 1872 // field in RotatedOldVal. 1873 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 1874 .addReg(RotatedOldVal).addReg(Src2.getReg()) 1875 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 1876 if (IsSubWord) 1877 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 1878 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 1879 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 1880 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 1881 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 1882 MBB->addSuccessor(LoopMBB); 1883 MBB->addSuccessor(DoneMBB); 1884 1885 MI->eraseFromParent(); 1886 return DoneMBB; 1887} 1888 1889// Implement EmitInstrWithCustomInserter for pseudo 1890// ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 1891// instruction that should be used to compare the current field with the 1892// minimum or maximum value. KeepOldMask is the BRC condition-code mask 1893// for when the current field should be kept. BitSize is the width of 1894// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 1895MachineBasicBlock * 1896SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 1897 MachineBasicBlock *MBB, 1898 unsigned CompareOpcode, 1899 unsigned KeepOldMask, 1900 unsigned BitSize) const { 1901 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1902 MachineFunction &MF = *MBB->getParent(); 1903 MachineRegisterInfo &MRI = MF.getRegInfo(); 1904 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 1905 bool IsSubWord = (BitSize < 32); 1906 1907 // Extract the operands. Base can be a register or a frame index. 1908 unsigned Dest = MI->getOperand(0).getReg(); 1909 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1910 int64_t Disp = MI->getOperand(2).getImm(); 1911 unsigned Src2 = MI->getOperand(3).getReg(); 1912 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1913 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1914 DebugLoc DL = MI->getDebugLoc(); 1915 if (IsSubWord) 1916 BitSize = MI->getOperand(6).getImm(); 1917 1918 // Subword operations use 32-bit registers. 1919 const TargetRegisterClass *RC = (BitSize <= 32 ? 1920 &SystemZ::GR32BitRegClass : 1921 &SystemZ::GR64BitRegClass); 1922 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1923 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1924 1925 // Get the right opcodes for the displacement. 1926 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1927 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1928 assert(LOpcode && CSOpcode && "Displacement out of range"); 1929 1930 // Create virtual registers for temporary results. 1931 unsigned OrigVal = MRI.createVirtualRegister(RC); 1932 unsigned OldVal = MRI.createVirtualRegister(RC); 1933 unsigned NewVal = MRI.createVirtualRegister(RC); 1934 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1935 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 1936 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1937 1938 // Insert 3 basic blocks for the loop. 1939 MachineBasicBlock *StartMBB = MBB; 1940 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1941 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1942 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 1943 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 1944 1945 // StartMBB: 1946 // ... 1947 // %OrigVal = L Disp(%Base) 1948 // # fall through to LoopMMB 1949 MBB = StartMBB; 1950 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1951 .addOperand(Base).addImm(Disp).addReg(0); 1952 MBB->addSuccessor(LoopMBB); 1953 1954 // LoopMBB: 1955 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 1956 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1957 // CompareOpcode %RotatedOldVal, %Src2 1958 // BRC KeepOldMask, UpdateMBB 1959 MBB = LoopMBB; 1960 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1961 .addReg(OrigVal).addMBB(StartMBB) 1962 .addReg(Dest).addMBB(UpdateMBB); 1963 if (IsSubWord) 1964 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1965 .addReg(OldVal).addReg(BitShift).addImm(0); 1966 unsigned FusedOpcode = TII->getCompareAndBranch(CompareOpcode); 1967 if (FusedOpcode) 1968 BuildMI(MBB, DL, TII->get(FusedOpcode)) 1969 .addReg(RotatedOldVal).addReg(Src2) 1970 .addImm(KeepOldMask).addMBB(UpdateMBB); 1971 else { 1972 BuildMI(MBB, DL, TII->get(CompareOpcode)) 1973 .addReg(RotatedOldVal).addReg(Src2); 1974 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 1975 .addImm(KeepOldMask).addMBB(UpdateMBB); 1976 } 1977 MBB->addSuccessor(UpdateMBB); 1978 MBB->addSuccessor(UseAltMBB); 1979 1980 // UseAltMBB: 1981 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 1982 // # fall through to UpdateMMB 1983 MBB = UseAltMBB; 1984 if (IsSubWord) 1985 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 1986 .addReg(RotatedOldVal).addReg(Src2) 1987 .addImm(32).addImm(31 + BitSize).addImm(0); 1988 MBB->addSuccessor(UpdateMBB); 1989 1990 // UpdateMBB: 1991 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 1992 // [ %RotatedAltVal, UseAltMBB ] 1993 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1994 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1995 // JNE LoopMBB 1996 // # fall through to DoneMMB 1997 MBB = UpdateMBB; 1998 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 1999 .addReg(RotatedOldVal).addMBB(LoopMBB) 2000 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2001 if (IsSubWord) 2002 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2003 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2004 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2005 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2006 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 2007 MBB->addSuccessor(LoopMBB); 2008 MBB->addSuccessor(DoneMBB); 2009 2010 MI->eraseFromParent(); 2011 return DoneMBB; 2012} 2013 2014// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2015// instruction MI. 2016MachineBasicBlock * 2017SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2018 MachineBasicBlock *MBB) const { 2019 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2020 MachineFunction &MF = *MBB->getParent(); 2021 MachineRegisterInfo &MRI = MF.getRegInfo(); 2022 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 2023 2024 // Extract the operands. Base can be a register or a frame index. 2025 unsigned Dest = MI->getOperand(0).getReg(); 2026 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2027 int64_t Disp = MI->getOperand(2).getImm(); 2028 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2029 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2030 unsigned BitShift = MI->getOperand(5).getReg(); 2031 unsigned NegBitShift = MI->getOperand(6).getReg(); 2032 int64_t BitSize = MI->getOperand(7).getImm(); 2033 DebugLoc DL = MI->getDebugLoc(); 2034 2035 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2036 2037 // Get the right opcodes for the displacement. 2038 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2039 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2040 assert(LOpcode && CSOpcode && "Displacement out of range"); 2041 2042 // Create virtual registers for temporary results. 2043 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2044 unsigned OldVal = MRI.createVirtualRegister(RC); 2045 unsigned CmpVal = MRI.createVirtualRegister(RC); 2046 unsigned SwapVal = MRI.createVirtualRegister(RC); 2047 unsigned StoreVal = MRI.createVirtualRegister(RC); 2048 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2049 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2050 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2051 2052 // Insert 2 basic blocks for the loop. 2053 MachineBasicBlock *StartMBB = MBB; 2054 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 2055 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2056 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2057 2058 // StartMBB: 2059 // ... 2060 // %OrigOldVal = L Disp(%Base) 2061 // # fall through to LoopMMB 2062 MBB = StartMBB; 2063 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2064 .addOperand(Base).addImm(Disp).addReg(0); 2065 MBB->addSuccessor(LoopMBB); 2066 2067 // LoopMBB: 2068 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2069 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2070 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2071 // %Dest = RLL %OldVal, BitSize(%BitShift) 2072 // ^^ The low BitSize bits contain the field 2073 // of interest. 2074 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2075 // ^^ Replace the upper 32-BitSize bits of the 2076 // comparison value with those that we loaded, 2077 // so that we can use a full word comparison. 2078 // CRJNE %Dest, %RetryCmpVal, DoneMBB 2079 // # Fall through to SetMBB 2080 MBB = LoopMBB; 2081 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2082 .addReg(OrigOldVal).addMBB(StartMBB) 2083 .addReg(RetryOldVal).addMBB(SetMBB); 2084 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2085 .addReg(OrigCmpVal).addMBB(StartMBB) 2086 .addReg(RetryCmpVal).addMBB(SetMBB); 2087 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2088 .addReg(OrigSwapVal).addMBB(StartMBB) 2089 .addReg(RetrySwapVal).addMBB(SetMBB); 2090 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2091 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2092 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2093 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2094 BuildMI(MBB, DL, TII->get(SystemZ::CRJ)) 2095 .addReg(Dest).addReg(RetryCmpVal) 2096 .addImm(MaskNE).addMBB(DoneMBB); 2097 MBB->addSuccessor(DoneMBB); 2098 MBB->addSuccessor(SetMBB); 2099 2100 // SetMBB: 2101 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2102 // ^^ Replace the upper 32-BitSize bits of the new 2103 // value with those that we loaded. 2104 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2105 // ^^ Rotate the new field to its proper position. 2106 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2107 // JNE LoopMBB 2108 // # fall through to ExitMMB 2109 MBB = SetMBB; 2110 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2111 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2112 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2113 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2114 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2115 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2116 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 2117 MBB->addSuccessor(LoopMBB); 2118 MBB->addSuccessor(DoneMBB); 2119 2120 MI->eraseFromParent(); 2121 return DoneMBB; 2122} 2123 2124// Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2125// if the high register of the GR128 value must be cleared or false if 2126// it's "don't care". SubReg is subreg_odd32 when extending a GR32 2127// and subreg_odd when extending a GR64. 2128MachineBasicBlock * 2129SystemZTargetLowering::emitExt128(MachineInstr *MI, 2130 MachineBasicBlock *MBB, 2131 bool ClearEven, unsigned SubReg) const { 2132 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2133 MachineFunction &MF = *MBB->getParent(); 2134 MachineRegisterInfo &MRI = MF.getRegInfo(); 2135 DebugLoc DL = MI->getDebugLoc(); 2136 2137 unsigned Dest = MI->getOperand(0).getReg(); 2138 unsigned Src = MI->getOperand(1).getReg(); 2139 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2140 2141 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2142 if (ClearEven) { 2143 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2144 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2145 2146 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2147 .addImm(0); 2148 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2149 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high); 2150 In128 = NewIn128; 2151 } 2152 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2153 .addReg(In128).addReg(Src).addImm(SubReg); 2154 2155 MI->eraseFromParent(); 2156 return MBB; 2157} 2158 2159MachineBasicBlock * 2160SystemZTargetLowering::emitMVCWrapper(MachineInstr *MI, 2161 MachineBasicBlock *MBB) const { 2162 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2163 DebugLoc DL = MI->getDebugLoc(); 2164 2165 MachineOperand DestBase = MI->getOperand(0); 2166 uint64_t DestDisp = MI->getOperand(1).getImm(); 2167 MachineOperand SrcBase = MI->getOperand(2); 2168 uint64_t SrcDisp = MI->getOperand(3).getImm(); 2169 uint64_t Length = MI->getOperand(4).getImm(); 2170 2171 BuildMI(*MBB, MI, DL, TII->get(SystemZ::MVC)) 2172 .addOperand(DestBase).addImm(DestDisp).addImm(Length) 2173 .addOperand(SrcBase).addImm(SrcDisp); 2174 2175 MI->eraseFromParent(); 2176 return MBB; 2177} 2178 2179MachineBasicBlock *SystemZTargetLowering:: 2180EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 2181 switch (MI->getOpcode()) { 2182 case SystemZ::Select32: 2183 case SystemZ::SelectF32: 2184 case SystemZ::Select64: 2185 case SystemZ::SelectF64: 2186 case SystemZ::SelectF128: 2187 return emitSelect(MI, MBB); 2188 2189 case SystemZ::CondStore8_32: 2190 return emitCondStore(MI, MBB, SystemZ::STC32, false); 2191 case SystemZ::CondStore8_32Inv: 2192 return emitCondStore(MI, MBB, SystemZ::STC32, true); 2193 case SystemZ::CondStore16_32: 2194 return emitCondStore(MI, MBB, SystemZ::STH32, false); 2195 case SystemZ::CondStore16_32Inv: 2196 return emitCondStore(MI, MBB, SystemZ::STH32, true); 2197 case SystemZ::CondStore32_32: 2198 return emitCondStore(MI, MBB, SystemZ::ST32, false); 2199 case SystemZ::CondStore32_32Inv: 2200 return emitCondStore(MI, MBB, SystemZ::ST32, true); 2201 case SystemZ::CondStore8: 2202 return emitCondStore(MI, MBB, SystemZ::STC, false); 2203 case SystemZ::CondStore8Inv: 2204 return emitCondStore(MI, MBB, SystemZ::STC, true); 2205 case SystemZ::CondStore16: 2206 return emitCondStore(MI, MBB, SystemZ::STH, false); 2207 case SystemZ::CondStore16Inv: 2208 return emitCondStore(MI, MBB, SystemZ::STH, true); 2209 case SystemZ::CondStore32: 2210 return emitCondStore(MI, MBB, SystemZ::ST, false); 2211 case SystemZ::CondStore32Inv: 2212 return emitCondStore(MI, MBB, SystemZ::ST, true); 2213 case SystemZ::CondStore64: 2214 return emitCondStore(MI, MBB, SystemZ::STG, false); 2215 case SystemZ::CondStore64Inv: 2216 return emitCondStore(MI, MBB, SystemZ::STG, true); 2217 case SystemZ::CondStoreF32: 2218 return emitCondStore(MI, MBB, SystemZ::STE, false); 2219 case SystemZ::CondStoreF32Inv: 2220 return emitCondStore(MI, MBB, SystemZ::STE, true); 2221 case SystemZ::CondStoreF64: 2222 return emitCondStore(MI, MBB, SystemZ::STD, false); 2223 case SystemZ::CondStoreF64Inv: 2224 return emitCondStore(MI, MBB, SystemZ::STD, true); 2225 2226 case SystemZ::AEXT128_64: 2227 return emitExt128(MI, MBB, false, SystemZ::subreg_low); 2228 case SystemZ::ZEXT128_32: 2229 return emitExt128(MI, MBB, true, SystemZ::subreg_low32); 2230 case SystemZ::ZEXT128_64: 2231 return emitExt128(MI, MBB, true, SystemZ::subreg_low); 2232 2233 case SystemZ::ATOMIC_SWAPW: 2234 return emitAtomicLoadBinary(MI, MBB, 0, 0); 2235 case SystemZ::ATOMIC_SWAP_32: 2236 return emitAtomicLoadBinary(MI, MBB, 0, 32); 2237 case SystemZ::ATOMIC_SWAP_64: 2238 return emitAtomicLoadBinary(MI, MBB, 0, 64); 2239 2240 case SystemZ::ATOMIC_LOADW_AR: 2241 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 2242 case SystemZ::ATOMIC_LOADW_AFI: 2243 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 2244 case SystemZ::ATOMIC_LOAD_AR: 2245 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 2246 case SystemZ::ATOMIC_LOAD_AHI: 2247 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 2248 case SystemZ::ATOMIC_LOAD_AFI: 2249 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 2250 case SystemZ::ATOMIC_LOAD_AGR: 2251 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 2252 case SystemZ::ATOMIC_LOAD_AGHI: 2253 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 2254 case SystemZ::ATOMIC_LOAD_AGFI: 2255 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 2256 2257 case SystemZ::ATOMIC_LOADW_SR: 2258 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 2259 case SystemZ::ATOMIC_LOAD_SR: 2260 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 2261 case SystemZ::ATOMIC_LOAD_SGR: 2262 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 2263 2264 case SystemZ::ATOMIC_LOADW_NR: 2265 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 2266 case SystemZ::ATOMIC_LOADW_NILH: 2267 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); 2268 case SystemZ::ATOMIC_LOAD_NR: 2269 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 2270 case SystemZ::ATOMIC_LOAD_NILL32: 2271 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); 2272 case SystemZ::ATOMIC_LOAD_NILH32: 2273 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); 2274 case SystemZ::ATOMIC_LOAD_NILF32: 2275 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); 2276 case SystemZ::ATOMIC_LOAD_NGR: 2277 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 2278 case SystemZ::ATOMIC_LOAD_NILL: 2279 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); 2280 case SystemZ::ATOMIC_LOAD_NILH: 2281 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); 2282 case SystemZ::ATOMIC_LOAD_NIHL: 2283 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); 2284 case SystemZ::ATOMIC_LOAD_NIHH: 2285 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); 2286 case SystemZ::ATOMIC_LOAD_NILF: 2287 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); 2288 case SystemZ::ATOMIC_LOAD_NIHF: 2289 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); 2290 2291 case SystemZ::ATOMIC_LOADW_OR: 2292 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 2293 case SystemZ::ATOMIC_LOADW_OILH: 2294 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); 2295 case SystemZ::ATOMIC_LOAD_OR: 2296 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 2297 case SystemZ::ATOMIC_LOAD_OILL32: 2298 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); 2299 case SystemZ::ATOMIC_LOAD_OILH32: 2300 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); 2301 case SystemZ::ATOMIC_LOAD_OILF32: 2302 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); 2303 case SystemZ::ATOMIC_LOAD_OGR: 2304 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 2305 case SystemZ::ATOMIC_LOAD_OILL: 2306 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); 2307 case SystemZ::ATOMIC_LOAD_OILH: 2308 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); 2309 case SystemZ::ATOMIC_LOAD_OIHL: 2310 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); 2311 case SystemZ::ATOMIC_LOAD_OIHH: 2312 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); 2313 case SystemZ::ATOMIC_LOAD_OILF: 2314 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); 2315 case SystemZ::ATOMIC_LOAD_OIHF: 2316 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); 2317 2318 case SystemZ::ATOMIC_LOADW_XR: 2319 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 2320 case SystemZ::ATOMIC_LOADW_XILF: 2321 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); 2322 case SystemZ::ATOMIC_LOAD_XR: 2323 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 2324 case SystemZ::ATOMIC_LOAD_XILF32: 2325 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); 2326 case SystemZ::ATOMIC_LOAD_XGR: 2327 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 2328 case SystemZ::ATOMIC_LOAD_XILF: 2329 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); 2330 case SystemZ::ATOMIC_LOAD_XIHF: 2331 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); 2332 2333 case SystemZ::ATOMIC_LOADW_NRi: 2334 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 2335 case SystemZ::ATOMIC_LOADW_NILHi: 2336 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); 2337 case SystemZ::ATOMIC_LOAD_NRi: 2338 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 2339 case SystemZ::ATOMIC_LOAD_NILL32i: 2340 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); 2341 case SystemZ::ATOMIC_LOAD_NILH32i: 2342 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); 2343 case SystemZ::ATOMIC_LOAD_NILF32i: 2344 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); 2345 case SystemZ::ATOMIC_LOAD_NGRi: 2346 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 2347 case SystemZ::ATOMIC_LOAD_NILLi: 2348 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); 2349 case SystemZ::ATOMIC_LOAD_NILHi: 2350 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); 2351 case SystemZ::ATOMIC_LOAD_NIHLi: 2352 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); 2353 case SystemZ::ATOMIC_LOAD_NIHHi: 2354 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); 2355 case SystemZ::ATOMIC_LOAD_NILFi: 2356 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); 2357 case SystemZ::ATOMIC_LOAD_NIHFi: 2358 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); 2359 2360 case SystemZ::ATOMIC_LOADW_MIN: 2361 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2362 SystemZ::CCMASK_CMP_LE, 0); 2363 case SystemZ::ATOMIC_LOAD_MIN_32: 2364 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2365 SystemZ::CCMASK_CMP_LE, 32); 2366 case SystemZ::ATOMIC_LOAD_MIN_64: 2367 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2368 SystemZ::CCMASK_CMP_LE, 64); 2369 2370 case SystemZ::ATOMIC_LOADW_MAX: 2371 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2372 SystemZ::CCMASK_CMP_GE, 0); 2373 case SystemZ::ATOMIC_LOAD_MAX_32: 2374 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2375 SystemZ::CCMASK_CMP_GE, 32); 2376 case SystemZ::ATOMIC_LOAD_MAX_64: 2377 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2378 SystemZ::CCMASK_CMP_GE, 64); 2379 2380 case SystemZ::ATOMIC_LOADW_UMIN: 2381 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2382 SystemZ::CCMASK_CMP_LE, 0); 2383 case SystemZ::ATOMIC_LOAD_UMIN_32: 2384 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2385 SystemZ::CCMASK_CMP_LE, 32); 2386 case SystemZ::ATOMIC_LOAD_UMIN_64: 2387 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2388 SystemZ::CCMASK_CMP_LE, 64); 2389 2390 case SystemZ::ATOMIC_LOADW_UMAX: 2391 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2392 SystemZ::CCMASK_CMP_GE, 0); 2393 case SystemZ::ATOMIC_LOAD_UMAX_32: 2394 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2395 SystemZ::CCMASK_CMP_GE, 32); 2396 case SystemZ::ATOMIC_LOAD_UMAX_64: 2397 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2398 SystemZ::CCMASK_CMP_GE, 64); 2399 2400 case SystemZ::ATOMIC_CMP_SWAPW: 2401 return emitAtomicCmpSwapW(MI, MBB); 2402 case SystemZ::BRC: 2403 // The original DAG glues comparisons to their uses, both to ensure 2404 // that no CC-clobbering instructions are inserted between them, and 2405 // to ensure that comparison results are not reused. This means that 2406 // a BRC is the sole user of a preceding comparison and that we can 2407 // try to use a fused compare and branch instead. 2408 if (convertPrevCompareToBranch(MBB, MI, MI->getOperand(0).getImm(), 2409 MI->getOperand(1).getMBB())) 2410 MI->eraseFromParent(); 2411 return MBB; 2412 case SystemZ::MVCWrapper: 2413 return emitMVCWrapper(MI, MBB); 2414 default: 2415 llvm_unreachable("Unexpected instr type to insert"); 2416 } 2417} 2418