1//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief Custom DAG lowering for SI 12// 13//===----------------------------------------------------------------------===// 14 15#ifdef _MSC_VER 16// Provide M_PI. 17#define _USE_MATH_DEFINES 18#include <cmath> 19#endif 20 21#include "SIISelLowering.h" 22#include "AMDGPU.h" 23#include "AMDGPUDiagnosticInfoUnsupported.h" 24#include "AMDGPUIntrinsicInfo.h" 25#include "AMDGPUSubtarget.h" 26#include "SIInstrInfo.h" 27#include "SIMachineFunctionInfo.h" 28#include "SIRegisterInfo.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/CodeGen/CallingConvLower.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/CodeGen/SelectionDAG.h" 34#include "llvm/IR/Function.h" 35#include "llvm/ADT/SmallString.h" 36 37using namespace llvm; 38 39SITargetLowering::SITargetLowering(TargetMachine &TM, 40 const AMDGPUSubtarget &STI) 41 : AMDGPUTargetLowering(TM, STI) { 42 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 43 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 44 45 addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass); 46 addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass); 47 48 addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); 49 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 50 51 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 52 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 53 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 54 55 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 56 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 57 58 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 59 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 60 61 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 62 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 63 64 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 65 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 66 67 computeRegisterProperties(STI.getRegisterInfo()); 68 69 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 70 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 71 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 72 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 73 74 setOperationAction(ISD::ADD, MVT::i32, Legal); 75 setOperationAction(ISD::ADDC, MVT::i32, Legal); 76 setOperationAction(ISD::ADDE, MVT::i32, Legal); 77 setOperationAction(ISD::SUBC, MVT::i32, Legal); 78 setOperationAction(ISD::SUBE, MVT::i32, Legal); 79 80 setOperationAction(ISD::FSIN, MVT::f32, Custom); 81 setOperationAction(ISD::FCOS, MVT::f32, Custom); 82 83 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 84 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 85 86 // We need to custom lower vector stores from local memory 87 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 88 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 89 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 90 91 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 92 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 93 94 setOperationAction(ISD::STORE, MVT::i1, Custom); 95 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 96 97 setOperationAction(ISD::SELECT, MVT::i64, Custom); 98 setOperationAction(ISD::SELECT, MVT::f64, Promote); 99 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 100 101 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 102 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 103 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 104 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 105 106 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 107 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 108 109 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 110 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 111 112 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Legal); 113 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 114 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 115 116 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal); 117 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 119 120 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal); 121 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 122 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 123 124 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 125 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 126 127 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 128 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 129 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom); 130 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 131 132 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 133 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 134 135 for (MVT VT : MVT::integer_valuetypes()) { 136 if (VT == MVT::i64) 137 continue; 138 139 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 140 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); 141 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); 142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 143 144 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 145 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); 146 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); 147 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 148 149 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 150 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); 151 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); 152 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 153 } 154 155 for (MVT VT : MVT::integer_vector_valuetypes()) { 156 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i16, Expand); 157 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v16i16, Expand); 158 } 159 160 for (MVT VT : MVT::fp_valuetypes()) 161 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 162 163 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 164 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); 165 166 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 167 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 168 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 169 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 170 171 172 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); 173 174 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 175 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); 176 177 setOperationAction(ISD::LOAD, MVT::i1, Custom); 178 179 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 180 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); 181 182 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 183 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); 184 185 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); 186 187 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 188 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 189 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 190 191 // These should use UDIVREM, so set them to expand 192 setOperationAction(ISD::UDIV, MVT::i64, Expand); 193 setOperationAction(ISD::UREM, MVT::i64, Expand); 194 195 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 196 setOperationAction(ISD::SELECT, MVT::i1, Promote); 197 198 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 199 200 201 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 202 203 // We only support LOAD/STORE and vector manipulation ops for vectors 204 // with > 4 elements. 205 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { 206 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 207 switch(Op) { 208 case ISD::LOAD: 209 case ISD::STORE: 210 case ISD::BUILD_VECTOR: 211 case ISD::BITCAST: 212 case ISD::EXTRACT_VECTOR_ELT: 213 case ISD::INSERT_VECTOR_ELT: 214 case ISD::INSERT_SUBVECTOR: 215 case ISD::EXTRACT_SUBVECTOR: 216 case ISD::SCALAR_TO_VECTOR: 217 break; 218 case ISD::CONCAT_VECTORS: 219 setOperationAction(Op, VT, Custom); 220 break; 221 default: 222 setOperationAction(Op, VT, Expand); 223 break; 224 } 225 } 226 } 227 228 // Most operations are naturally 32-bit vector operations. We only support 229 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 230 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 231 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 232 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 233 234 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 235 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 236 237 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 238 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 239 240 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 241 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 242 } 243 244 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { 245 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 246 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 247 setOperationAction(ISD::FRINT, MVT::f64, Legal); 248 } 249 250 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 251 setOperationAction(ISD::FDIV, MVT::f32, Custom); 252 setOperationAction(ISD::FDIV, MVT::f64, Custom); 253 254 setTargetDAGCombine(ISD::FADD); 255 setTargetDAGCombine(ISD::FSUB); 256 setTargetDAGCombine(ISD::FMINNUM); 257 setTargetDAGCombine(ISD::FMAXNUM); 258 setTargetDAGCombine(ISD::SMIN); 259 setTargetDAGCombine(ISD::SMAX); 260 setTargetDAGCombine(ISD::UMIN); 261 setTargetDAGCombine(ISD::UMAX); 262 setTargetDAGCombine(ISD::SELECT_CC); 263 setTargetDAGCombine(ISD::SETCC); 264 setTargetDAGCombine(ISD::AND); 265 setTargetDAGCombine(ISD::OR); 266 setTargetDAGCombine(ISD::UINT_TO_FP); 267 268 // All memory operations. Some folding on the pointer operand is done to help 269 // matching the constant offsets in the addressing modes. 270 setTargetDAGCombine(ISD::LOAD); 271 setTargetDAGCombine(ISD::STORE); 272 setTargetDAGCombine(ISD::ATOMIC_LOAD); 273 setTargetDAGCombine(ISD::ATOMIC_STORE); 274 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 275 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 276 setTargetDAGCombine(ISD::ATOMIC_SWAP); 277 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 278 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 279 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 280 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 281 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 282 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 283 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 284 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 285 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 286 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 287 288 setSchedulingPreference(Sched::RegPressure); 289} 290 291//===----------------------------------------------------------------------===// 292// TargetLowering queries 293//===----------------------------------------------------------------------===// 294 295bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, 296 EVT) const { 297 // SI has some legal vector types, but no legal vector operations. Say no 298 // shuffles are legal in order to prefer scalarizing some vector operations. 299 return false; 300} 301 302bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 303 // Flat instructions do not have offsets, and only have the register 304 // address. 305 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); 306} 307 308bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 309 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 310 // additionally can do r + r + i with addr64. 32-bit has more addressing 311 // mode options. Depending on the resource constant, it can also do 312 // (i64 r0) + (i32 r1) * (i14 i). 313 // 314 // Private arrays end up using a scratch buffer most of the time, so also 315 // assume those use MUBUF instructions. Scratch loads / stores are currently 316 // implemented as mubuf instructions with offen bit set, so slightly 317 // different than the normal addr64. 318 if (!isUInt<12>(AM.BaseOffs)) 319 return false; 320 321 // FIXME: Since we can split immediate into soffset and immediate offset, 322 // would it make sense to allow any immediate? 323 324 switch (AM.Scale) { 325 case 0: // r + i or just i, depending on HasBaseReg. 326 return true; 327 case 1: 328 return true; // We have r + r or r + i. 329 case 2: 330 if (AM.HasBaseReg) { 331 // Reject 2 * r + r. 332 return false; 333 } 334 335 // Allow 2 * r as r + r 336 // Or 2 * r + i is allowed as r + r + i. 337 return true; 338 default: // Don't allow n * r 339 return false; 340 } 341} 342 343bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 344 const AddrMode &AM, Type *Ty, 345 unsigned AS) const { 346 // No global is ever allowed as a base. 347 if (AM.BaseGV) 348 return false; 349 350 switch (AS) { 351 case AMDGPUAS::GLOBAL_ADDRESS: { 352 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 353 // Assume the we will use FLAT for all global memory accesses 354 // on VI. 355 // FIXME: This assumption is currently wrong. On VI we still use 356 // MUBUF instructions for the r + i addressing mode. As currently 357 // implemented, the MUBUF instructions only work on buffer < 4GB. 358 // It may be possible to support > 4GB buffers with MUBUF instructions, 359 // by setting the stride value in the resource descriptor which would 360 // increase the size limit to (stride * 4GB). However, this is risky, 361 // because it has never been validated. 362 return isLegalFlatAddressingMode(AM); 363 } 364 365 return isLegalMUBUFAddressingMode(AM); 366 } 367 case AMDGPUAS::CONSTANT_ADDRESS: { 368 // If the offset isn't a multiple of 4, it probably isn't going to be 369 // correctly aligned. 370 if (AM.BaseOffs % 4 != 0) 371 return isLegalMUBUFAddressingMode(AM); 372 373 // There are no SMRD extloads, so if we have to do a small type access we 374 // will use a MUBUF load. 375 // FIXME?: We also need to do this if unaligned, but we don't know the 376 // alignment here. 377 if (DL.getTypeStoreSize(Ty) < 4) 378 return isLegalMUBUFAddressingMode(AM); 379 380 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 381 // SMRD instructions have an 8-bit, dword offset on SI. 382 if (!isUInt<8>(AM.BaseOffs / 4)) 383 return false; 384 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { 385 // On CI+, this can also be a 32-bit literal constant offset. If it fits 386 // in 8-bits, it can use a smaller encoding. 387 if (!isUInt<32>(AM.BaseOffs / 4)) 388 return false; 389 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) { 390 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 391 if (!isUInt<20>(AM.BaseOffs)) 392 return false; 393 } else 394 llvm_unreachable("unhandled generation"); 395 396 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 397 return true; 398 399 if (AM.Scale == 1 && AM.HasBaseReg) 400 return true; 401 402 return false; 403 } 404 405 case AMDGPUAS::PRIVATE_ADDRESS: 406 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: 407 return isLegalMUBUFAddressingMode(AM); 408 409 case AMDGPUAS::LOCAL_ADDRESS: 410 case AMDGPUAS::REGION_ADDRESS: { 411 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 412 // field. 413 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 414 // an 8-bit dword offset but we don't know the alignment here. 415 if (!isUInt<16>(AM.BaseOffs)) 416 return false; 417 418 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 419 return true; 420 421 if (AM.Scale == 1 && AM.HasBaseReg) 422 return true; 423 424 return false; 425 } 426 case AMDGPUAS::FLAT_ADDRESS: 427 return isLegalFlatAddressingMode(AM); 428 429 default: 430 llvm_unreachable("unhandled address space"); 431 } 432} 433 434bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 435 unsigned AddrSpace, 436 unsigned Align, 437 bool *IsFast) const { 438 if (IsFast) 439 *IsFast = false; 440 441 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 442 // which isn't a simple VT. 443 if (!VT.isSimple() || VT == MVT::Other) 444 return false; 445 446 // TODO - CI+ supports unaligned memory accesses, but this requires driver 447 // support. 448 449 // XXX - The only mention I see of this in the ISA manual is for LDS direct 450 // reads the "byte address and must be dword aligned". Is it also true for the 451 // normal loads and stores? 452 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) { 453 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 454 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 455 // with adjacent offsets. 456 bool AlignedBy4 = (Align % 4 == 0); 457 if (IsFast) 458 *IsFast = AlignedBy4; 459 return AlignedBy4; 460 } 461 462 // Smaller than dword value must be aligned. 463 // FIXME: This should be allowed on CI+ 464 if (VT.bitsLT(MVT::i32)) 465 return false; 466 467 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 468 // byte-address are ignored, thus forcing Dword alignment. 469 // This applies to private, global, and constant memory. 470 if (IsFast) 471 *IsFast = true; 472 473 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 474} 475 476EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 477 unsigned SrcAlign, bool IsMemset, 478 bool ZeroMemset, 479 bool MemcpyStrSrc, 480 MachineFunction &MF) const { 481 // FIXME: Should account for address space here. 482 483 // The default fallback uses the private pointer size as a guess for a type to 484 // use. Make sure we switch these to 64-bit accesses. 485 486 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 487 return MVT::v4i32; 488 489 if (Size >= 8 && DstAlign >= 4) 490 return MVT::v2i32; 491 492 // Use the default. 493 return MVT::Other; 494} 495 496static bool isFlatGlobalAddrSpace(unsigned AS) { 497 return AS == AMDGPUAS::GLOBAL_ADDRESS || 498 AS == AMDGPUAS::FLAT_ADDRESS || 499 AS == AMDGPUAS::CONSTANT_ADDRESS; 500} 501 502bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 503 unsigned DestAS) const { 504 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); 505} 506 507 508bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 509 const MemSDNode *MemNode = cast<MemSDNode>(N); 510 const Value *Ptr = MemNode->getMemOperand()->getValue(); 511 512 // UndefValue means this is a load of a kernel input. These are uniform. 513 // Sometimes LDS instructions have constant pointers 514 if (isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || isa<Constant>(Ptr) || 515 isa<GlobalValue>(Ptr)) 516 return true; 517 518 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); 519 return I && I->getMetadata("amdgpu.uniform"); 520} 521 522TargetLoweringBase::LegalizeTypeAction 523SITargetLowering::getPreferredVectorAction(EVT VT) const { 524 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 525 return TypeSplitVector; 526 527 return TargetLoweringBase::getPreferredVectorAction(VT); 528} 529 530bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 531 Type *Ty) const { 532 const SIInstrInfo *TII = 533 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 534 return TII->isInlineConstant(Imm); 535} 536 537SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, 538 SDLoc SL, SDValue Chain, 539 unsigned Offset, bool Signed) const { 540 const DataLayout &DL = DAG.getDataLayout(); 541 MachineFunction &MF = DAG.getMachineFunction(); 542 const SIRegisterInfo *TRI = 543 static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); 544 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 545 546 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 547 548 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 549 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 550 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); 551 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 552 MRI.getLiveInVirtReg(InputPtrReg), PtrVT); 553 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 554 DAG.getConstant(Offset, SL, PtrVT)); 555 SDValue PtrOffset = DAG.getUNDEF(PtrVT); 556 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 557 558 unsigned Align = DL.getABITypeAlignment(Ty); 559 560 ISD::LoadExtType ExtTy = Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 561 if (MemVT.isFloatingPoint()) 562 ExtTy = ISD::EXTLOAD; 563 564 return DAG.getLoad(ISD::UNINDEXED, ExtTy, 565 VT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemVT, 566 false, // isVolatile 567 true, // isNonTemporal 568 true, // isInvariant 569 Align); // Alignment 570} 571 572SDValue SITargetLowering::LowerFormalArguments( 573 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 574 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, 575 SmallVectorImpl<SDValue> &InVals) const { 576 const SIRegisterInfo *TRI = 577 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); 578 579 MachineFunction &MF = DAG.getMachineFunction(); 580 FunctionType *FType = MF.getFunction()->getFunctionType(); 581 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 582 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>(); 583 584 if (Subtarget->isAmdHsaOS() && Info->getShaderType() != ShaderType::COMPUTE) { 585 const Function *Fn = MF.getFunction(); 586 DiagnosticInfoUnsupported NoGraphicsHSA(*Fn, "non-compute shaders with HSA"); 587 DAG.getContext()->diagnose(NoGraphicsHSA); 588 return SDValue(); 589 } 590 591 // FIXME: We currently assume all calling conventions are kernels. 592 593 SmallVector<ISD::InputArg, 16> Splits; 594 BitVector Skipped(Ins.size()); 595 596 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { 597 const ISD::InputArg &Arg = Ins[i]; 598 599 // First check if it's a PS input addr 600 if (Info->getShaderType() == ShaderType::PIXEL && !Arg.Flags.isInReg() && 601 !Arg.Flags.isByVal()) { 602 603 assert((PSInputNum <= 15) && "Too many PS inputs!"); 604 605 if (!Arg.Used) { 606 // We can safely skip PS inputs 607 Skipped.set(i); 608 ++PSInputNum; 609 continue; 610 } 611 612 Info->PSInputAddr |= 1 << PSInputNum++; 613 } 614 615 // Second split vertices into their elements 616 if (Info->getShaderType() != ShaderType::COMPUTE && Arg.VT.isVector()) { 617 ISD::InputArg NewArg = Arg; 618 NewArg.Flags.setSplit(); 619 NewArg.VT = Arg.VT.getVectorElementType(); 620 621 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a 622 // three or five element vertex only needs three or five registers, 623 // NOT four or eight. 624 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 625 unsigned NumElements = ParamType->getVectorNumElements(); 626 627 for (unsigned j = 0; j != NumElements; ++j) { 628 Splits.push_back(NewArg); 629 NewArg.PartOffset += NewArg.VT.getStoreSize(); 630 } 631 632 } else if (Info->getShaderType() != ShaderType::COMPUTE) { 633 Splits.push_back(Arg); 634 } 635 } 636 637 SmallVector<CCValAssign, 16> ArgLocs; 638 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 639 *DAG.getContext()); 640 641 // At least one interpolation mode must be enabled or else the GPU will hang. 642 if (Info->getShaderType() == ShaderType::PIXEL && 643 (Info->PSInputAddr & 0x7F) == 0) { 644 Info->PSInputAddr |= 1; 645 CCInfo.AllocateReg(AMDGPU::VGPR0); 646 CCInfo.AllocateReg(AMDGPU::VGPR1); 647 } 648 649 if (Info->getShaderType() == ShaderType::COMPUTE) { 650 getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins, 651 Splits); 652 } 653 654 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 655 if (Info->hasPrivateSegmentBuffer()) { 656 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 657 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 658 CCInfo.AllocateReg(PrivateSegmentBufferReg); 659 } 660 661 if (Info->hasDispatchPtr()) { 662 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 663 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SReg_64RegClass); 664 CCInfo.AllocateReg(DispatchPtrReg); 665 } 666 667 if (Info->hasKernargSegmentPtr()) { 668 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 669 MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass); 670 CCInfo.AllocateReg(InputPtrReg); 671 } 672 673 AnalyzeFormalArguments(CCInfo, Splits); 674 675 SmallVector<SDValue, 16> Chains; 676 677 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 678 679 const ISD::InputArg &Arg = Ins[i]; 680 if (Skipped[i]) { 681 InVals.push_back(DAG.getUNDEF(Arg.VT)); 682 continue; 683 } 684 685 CCValAssign &VA = ArgLocs[ArgIdx++]; 686 MVT VT = VA.getLocVT(); 687 688 if (VA.isMemLoc()) { 689 VT = Ins[i].VT; 690 EVT MemVT = Splits[i].VT; 691 const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + 692 VA.getLocMemOffset(); 693 // The first 36 bytes of the input buffer contains information about 694 // thread group and global sizes. 695 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, 696 Offset, Ins[i].Flags.isSExt()); 697 Chains.push_back(Arg.getValue(1)); 698 699 auto *ParamTy = 700 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 701 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 702 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 703 // On SI local pointers are just offsets into LDS, so they are always 704 // less than 16-bits. On CI and newer they could potentially be 705 // real pointers, so we can't guarantee their size. 706 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 707 DAG.getValueType(MVT::i16)); 708 } 709 710 InVals.push_back(Arg); 711 Info->ABIArgOffset = Offset + MemVT.getStoreSize(); 712 continue; 713 } 714 assert(VA.isRegLoc() && "Parameter must be in a register!"); 715 716 unsigned Reg = VA.getLocReg(); 717 718 if (VT == MVT::i64) { 719 // For now assume it is a pointer 720 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, 721 &AMDGPU::SReg_64RegClass); 722 Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); 723 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 724 InVals.push_back(Copy); 725 continue; 726 } 727 728 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 729 730 Reg = MF.addLiveIn(Reg, RC); 731 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 732 733 if (Arg.VT.isVector()) { 734 735 // Build a vector from the registers 736 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 737 unsigned NumElements = ParamType->getVectorNumElements(); 738 739 SmallVector<SDValue, 4> Regs; 740 Regs.push_back(Val); 741 for (unsigned j = 1; j != NumElements; ++j) { 742 Reg = ArgLocs[ArgIdx++].getLocReg(); 743 Reg = MF.addLiveIn(Reg, RC); 744 745 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 746 Regs.push_back(Copy); 747 } 748 749 // Fill up the missing vector elements 750 NumElements = Arg.VT.getVectorNumElements() - NumElements; 751 Regs.append(NumElements, DAG.getUNDEF(VT)); 752 753 InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs)); 754 continue; 755 } 756 757 InVals.push_back(Val); 758 } 759 760 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 761 // these from the dispatch pointer. 762 763 // Start adding system SGPRs. 764 if (Info->hasWorkGroupIDX()) { 765 unsigned Reg = Info->addWorkGroupIDX(); 766 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 767 CCInfo.AllocateReg(Reg); 768 } else 769 llvm_unreachable("work group id x is always enabled"); 770 771 if (Info->hasWorkGroupIDY()) { 772 unsigned Reg = Info->addWorkGroupIDY(); 773 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 774 CCInfo.AllocateReg(Reg); 775 } 776 777 if (Info->hasWorkGroupIDZ()) { 778 unsigned Reg = Info->addWorkGroupIDZ(); 779 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 780 CCInfo.AllocateReg(Reg); 781 } 782 783 if (Info->hasWorkGroupInfo()) { 784 unsigned Reg = Info->addWorkGroupInfo(); 785 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass); 786 CCInfo.AllocateReg(Reg); 787 } 788 789 if (Info->hasPrivateSegmentWaveByteOffset()) { 790 // Scratch wave offset passed in system SGPR. 791 unsigned PrivateSegmentWaveByteOffsetReg 792 = Info->addPrivateSegmentWaveByteOffset(); 793 794 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 795 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 796 } 797 798 // Now that we've figured out where the scratch register inputs are, see if 799 // should reserve the arguments and use them directly. 800 801 bool HasStackObjects = MF.getFrameInfo()->hasStackObjects(); 802 803 if (ST.isAmdHsaOS()) { 804 // TODO: Assume we will spill without optimizations. 805 if (HasStackObjects) { 806 // If we have stack objects, we unquestionably need the private buffer 807 // resource. For the HSA ABI, this will be the first 4 user SGPR 808 // inputs. We can reserve those and use them directly. 809 810 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( 811 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); 812 Info->setScratchRSrcReg(PrivateSegmentBufferReg); 813 814 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( 815 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 816 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 817 } else { 818 unsigned ReservedBufferReg 819 = TRI->reservedPrivateSegmentBufferReg(MF); 820 unsigned ReservedOffsetReg 821 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 822 823 // We tentatively reserve the last registers (skipping the last two 824 // which may contain VCC). After register allocation, we'll replace 825 // these with the ones immediately after those which were really 826 // allocated. In the prologue copies will be inserted from the argument 827 // to these reserved registers. 828 Info->setScratchRSrcReg(ReservedBufferReg); 829 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 830 } 831 } else { 832 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); 833 834 // Without HSA, relocations are used for the scratch pointer and the 835 // buffer resource setup is always inserted in the prologue. Scratch wave 836 // offset is still in an input SGPR. 837 Info->setScratchRSrcReg(ReservedBufferReg); 838 839 if (HasStackObjects) { 840 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( 841 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 842 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); 843 } else { 844 unsigned ReservedOffsetReg 845 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); 846 Info->setScratchWaveOffsetReg(ReservedOffsetReg); 847 } 848 } 849 850 if (Info->hasWorkItemIDX()) { 851 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); 852 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 853 CCInfo.AllocateReg(Reg); 854 } else 855 llvm_unreachable("workitem id x should always be enabled"); 856 857 if (Info->hasWorkItemIDY()) { 858 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); 859 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 860 CCInfo.AllocateReg(Reg); 861 } 862 863 if (Info->hasWorkItemIDZ()) { 864 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); 865 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 866 CCInfo.AllocateReg(Reg); 867 } 868 869 if (Chains.empty()) 870 return Chain; 871 872 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 873} 874 875MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( 876 MachineInstr * MI, MachineBasicBlock * BB) const { 877 878 switch (MI->getOpcode()) { 879 default: 880 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 881 case AMDGPU::BRANCH: 882 return BB; 883 } 884 return BB; 885} 886 887bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 888 // This currently forces unfolding various combinations of fsub into fma with 889 // free fneg'd operands. As long as we have fast FMA (controlled by 890 // isFMAFasterThanFMulAndFAdd), we should perform these. 891 892 // When fma is quarter rate, for f64 where add / sub are at best half rate, 893 // most of these combines appear to be cycle neutral but save on instruction 894 // count / code size. 895 return true; 896} 897 898EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 899 EVT VT) const { 900 if (!VT.isVector()) { 901 return MVT::i1; 902 } 903 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 904} 905 906MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const { 907 return MVT::i32; 908} 909 910// Answering this is somewhat tricky and depends on the specific device which 911// have different rates for fma or all f64 operations. 912// 913// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 914// regardless of which device (although the number of cycles differs between 915// devices), so it is always profitable for f64. 916// 917// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 918// only on full rate devices. Normally, we should prefer selecting v_mad_f32 919// which we can always do even without fused FP ops since it returns the same 920// result as the separate operations and since it is always full 921// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 922// however does not support denormals, so we do report fma as faster if we have 923// a fast fma device and require denormals. 924// 925bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 926 VT = VT.getScalarType(); 927 928 if (!VT.isSimple()) 929 return false; 930 931 switch (VT.getSimpleVT().SimpleTy) { 932 case MVT::f32: 933 // This is as fast on some subtargets. However, we always have full rate f32 934 // mad available which returns the same result as the separate operations 935 // which we should prefer over fma. We can't use this if we want to support 936 // denormals, so only report this in these cases. 937 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); 938 case MVT::f64: 939 return true; 940 default: 941 break; 942 } 943 944 return false; 945} 946 947//===----------------------------------------------------------------------===// 948// Custom DAG Lowering Operations 949//===----------------------------------------------------------------------===// 950 951SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 952 switch (Op.getOpcode()) { 953 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 954 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); 955 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 956 case ISD::LOAD: { 957 SDValue Result = LowerLOAD(Op, DAG); 958 assert((!Result.getNode() || 959 Result.getNode()->getNumValues() == 2) && 960 "Load should return a value and a chain"); 961 return Result; 962 } 963 964 case ISD::FSIN: 965 case ISD::FCOS: 966 return LowerTrig(Op, DAG); 967 case ISD::SELECT: return LowerSELECT(Op, DAG); 968 case ISD::FDIV: return LowerFDIV(Op, DAG); 969 case ISD::STORE: return LowerSTORE(Op, DAG); 970 case ISD::GlobalAddress: { 971 MachineFunction &MF = DAG.getMachineFunction(); 972 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 973 return LowerGlobalAddress(MFI, Op, DAG); 974 } 975 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 976 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 977 } 978 return SDValue(); 979} 980 981/// \brief Helper function for LowerBRCOND 982static SDNode *findUser(SDValue Value, unsigned Opcode) { 983 984 SDNode *Parent = Value.getNode(); 985 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 986 I != E; ++I) { 987 988 if (I.getUse().get() != Value) 989 continue; 990 991 if (I->getOpcode() == Opcode) 992 return *I; 993 } 994 return nullptr; 995} 996 997SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { 998 999 SDLoc SL(Op); 1000 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op); 1001 unsigned FrameIndex = FINode->getIndex(); 1002 1003 // A FrameIndex node represents a 32-bit offset into scratch memory. If 1004 // the high bit of a frame index offset were to be set, this would mean 1005 // that it represented an offset of ~2GB * 64 = ~128GB from the start of the 1006 // scratch buffer, with 64 being the number of threads per wave. 1007 // 1008 // If we know the machine uses less than 128GB of scratch, then we can 1009 // amrk the high bit of the FrameIndex node as known zero, 1010 // which is important, because it means in most situations we can 1011 // prove that values derived from FrameIndex nodes are non-negative. 1012 // This enables us to take advantage of more addressing modes when 1013 // accessing scratch buffers, since for scratch reads/writes, the register 1014 // offset must always be positive. 1015 1016 SDValue TFI = DAG.getTargetFrameIndex(FrameIndex, MVT::i32); 1017 if (Subtarget->enableHugeScratchBuffer()) 1018 return TFI; 1019 1020 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, TFI, 1021 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 31))); 1022} 1023 1024/// This transforms the control flow intrinsics to get the branch destination as 1025/// last parameter, also switches branch target with BR if the need arise 1026SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 1027 SelectionDAG &DAG) const { 1028 1029 SDLoc DL(BRCOND); 1030 1031 SDNode *Intr = BRCOND.getOperand(1).getNode(); 1032 SDValue Target = BRCOND.getOperand(2); 1033 SDNode *BR = nullptr; 1034 1035 if (Intr->getOpcode() == ISD::SETCC) { 1036 // As long as we negate the condition everything is fine 1037 SDNode *SetCC = Intr; 1038 assert(SetCC->getConstantOperandVal(1) == 1); 1039 assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 1040 ISD::SETNE); 1041 Intr = SetCC->getOperand(0).getNode(); 1042 1043 } else { 1044 // Get the target from BR if we don't negate the condition 1045 BR = findUser(BRCOND, ISD::BR); 1046 Target = BR->getOperand(1); 1047 } 1048 1049 assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN); 1050 1051 // Build the result and 1052 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 1053 1054 // operands of the new intrinsic call 1055 SmallVector<SDValue, 4> Ops; 1056 Ops.push_back(BRCOND.getOperand(0)); 1057 Ops.append(Intr->op_begin() + 1, Intr->op_end()); 1058 Ops.push_back(Target); 1059 1060 // build the new intrinsic call 1061 SDNode *Result = DAG.getNode( 1062 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, 1063 DAG.getVTList(Res), Ops).getNode(); 1064 1065 if (BR) { 1066 // Give the branch instruction our target 1067 SDValue Ops[] = { 1068 BR->getOperand(0), 1069 BRCOND.getOperand(2) 1070 }; 1071 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 1072 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 1073 BR = NewBR.getNode(); 1074 } 1075 1076 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 1077 1078 // Copy the intrinsic results to registers 1079 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 1080 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 1081 if (!CopyToReg) 1082 continue; 1083 1084 Chain = DAG.getCopyToReg( 1085 Chain, DL, 1086 CopyToReg->getOperand(1), 1087 SDValue(Result, i - 1), 1088 SDValue()); 1089 1090 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 1091 } 1092 1093 // Remove the old intrinsic from the chain 1094 DAG.ReplaceAllUsesOfValueWith( 1095 SDValue(Intr, Intr->getNumValues() - 1), 1096 Intr->getOperand(0)); 1097 1098 return Chain; 1099} 1100 1101SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 1102 SDValue Op, 1103 SelectionDAG &DAG) const { 1104 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 1105 1106 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) 1107 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 1108 1109 SDLoc DL(GSD); 1110 const GlobalValue *GV = GSD->getGlobal(); 1111 MVT PtrVT = getPointerTy(DAG.getDataLayout(), GSD->getAddressSpace()); 1112 1113 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32); 1114 return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, DL, PtrVT, GA); 1115} 1116 1117SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, 1118 SDValue V) const { 1119 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 1120 // so we will end up with redundant moves to m0. 1121 // 1122 // We can't use S_MOV_B32, because there is no way to specify m0 as the 1123 // destination register. 1124 // 1125 // We have to use them both. Machine cse will combine all the S_MOV_B32 1126 // instructions and the register coalescer eliminate the extra copies. 1127 SDNode *M0 = DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, V.getValueType(), V); 1128 return DAG.getCopyToReg(Chain, DL, DAG.getRegister(AMDGPU::M0, MVT::i32), 1129 SDValue(M0, 0), SDValue()); // Glue 1130 // A Null SDValue creates 1131 // a glue result. 1132} 1133 1134SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 1135 SDValue Op, 1136 MVT VT, 1137 unsigned Offset) const { 1138 SDLoc SL(Op); 1139 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, 1140 DAG.getEntryNode(), Offset, false); 1141 // The local size values will have the hi 16-bits as zero. 1142 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 1143 DAG.getValueType(VT)); 1144} 1145 1146SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 1147 SelectionDAG &DAG) const { 1148 MachineFunction &MF = DAG.getMachineFunction(); 1149 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 1150 const SIRegisterInfo *TRI = 1151 static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); 1152 1153 EVT VT = Op.getValueType(); 1154 SDLoc DL(Op); 1155 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1156 1157 // TODO: Should this propagate fast-math-flags? 1158 1159 switch (IntrinsicID) { 1160 case Intrinsic::amdgcn_dispatch_ptr: 1161 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, 1162 TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_PTR), VT); 1163 1164 case Intrinsic::r600_read_ngroups_x: 1165 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1166 SI::KernelInputOffsets::NGROUPS_X, false); 1167 case Intrinsic::r600_read_ngroups_y: 1168 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1169 SI::KernelInputOffsets::NGROUPS_Y, false); 1170 case Intrinsic::r600_read_ngroups_z: 1171 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1172 SI::KernelInputOffsets::NGROUPS_Z, false); 1173 case Intrinsic::r600_read_global_size_x: 1174 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1175 SI::KernelInputOffsets::GLOBAL_SIZE_X, false); 1176 case Intrinsic::r600_read_global_size_y: 1177 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1178 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); 1179 case Intrinsic::r600_read_global_size_z: 1180 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 1181 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); 1182 case Intrinsic::r600_read_local_size_x: 1183 return lowerImplicitZextParam(DAG, Op, MVT::i16, 1184 SI::KernelInputOffsets::LOCAL_SIZE_X); 1185 case Intrinsic::r600_read_local_size_y: 1186 return lowerImplicitZextParam(DAG, Op, MVT::i16, 1187 SI::KernelInputOffsets::LOCAL_SIZE_Y); 1188 case Intrinsic::r600_read_local_size_z: 1189 return lowerImplicitZextParam(DAG, Op, MVT::i16, 1190 SI::KernelInputOffsets::LOCAL_SIZE_Z); 1191 case Intrinsic::AMDGPU_read_workdim: 1192 // Really only 2 bits. 1193 return lowerImplicitZextParam(DAG, Op, MVT::i8, 1194 getImplicitParameterOffset(MFI, GRID_DIM)); 1195 case Intrinsic::r600_read_tgid_x: 1196 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 1197 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); 1198 case Intrinsic::r600_read_tgid_y: 1199 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 1200 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); 1201 case Intrinsic::r600_read_tgid_z: 1202 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, 1203 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); 1204 case Intrinsic::r600_read_tidig_x: 1205 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 1206 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); 1207 case Intrinsic::r600_read_tidig_y: 1208 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 1209 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); 1210 case Intrinsic::r600_read_tidig_z: 1211 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, 1212 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); 1213 case AMDGPUIntrinsic::SI_load_const: { 1214 SDValue Ops[] = { 1215 Op.getOperand(1), 1216 Op.getOperand(2) 1217 }; 1218 1219 MachineMemOperand *MMO = MF.getMachineMemOperand( 1220 MachinePointerInfo(), 1221 MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, 1222 VT.getStoreSize(), 4); 1223 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 1224 Op->getVTList(), Ops, VT, MMO); 1225 } 1226 case AMDGPUIntrinsic::SI_sample: 1227 return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG); 1228 case AMDGPUIntrinsic::SI_sampleb: 1229 return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG); 1230 case AMDGPUIntrinsic::SI_sampled: 1231 return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG); 1232 case AMDGPUIntrinsic::SI_samplel: 1233 return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG); 1234 case AMDGPUIntrinsic::SI_vs_load_input: 1235 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, 1236 Op.getOperand(1), 1237 Op.getOperand(2), 1238 Op.getOperand(3)); 1239 1240 case AMDGPUIntrinsic::AMDGPU_fract: 1241 case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name. 1242 return DAG.getNode(ISD::FSUB, DL, VT, Op.getOperand(1), 1243 DAG.getNode(ISD::FFLOOR, DL, VT, Op.getOperand(1))); 1244 case AMDGPUIntrinsic::SI_fs_constant: { 1245 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 1246 SDValue Glue = M0.getValue(1); 1247 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, 1248 DAG.getConstant(2, DL, MVT::i32), // P0 1249 Op.getOperand(1), Op.getOperand(2), Glue); 1250 } 1251 case AMDGPUIntrinsic::SI_packf16: 1252 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) 1253 return DAG.getUNDEF(MVT::i32); 1254 return Op; 1255 case AMDGPUIntrinsic::SI_fs_interp: { 1256 SDValue IJ = Op.getOperand(4); 1257 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 1258 DAG.getConstant(0, DL, MVT::i32)); 1259 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, 1260 DAG.getConstant(1, DL, MVT::i32)); 1261 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); 1262 SDValue Glue = M0.getValue(1); 1263 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, 1264 DAG.getVTList(MVT::f32, MVT::Glue), 1265 I, Op.getOperand(1), Op.getOperand(2), Glue); 1266 Glue = SDValue(P1.getNode(), 1); 1267 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, 1268 Op.getOperand(1), Op.getOperand(2), Glue); 1269 } 1270 case Intrinsic::amdgcn_interp_p1: { 1271 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 1272 SDValue Glue = M0.getValue(1); 1273 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 1274 Op.getOperand(2), Op.getOperand(3), Glue); 1275 } 1276 case Intrinsic::amdgcn_interp_p2: { 1277 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 1278 SDValue Glue = SDValue(M0.getNode(), 1); 1279 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 1280 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 1281 Glue); 1282 } 1283 default: 1284 return AMDGPUTargetLowering::LowerOperation(Op, DAG); 1285 } 1286} 1287 1288SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 1289 SelectionDAG &DAG) const { 1290 MachineFunction &MF = DAG.getMachineFunction(); 1291 SDLoc DL(Op); 1292 SDValue Chain = Op.getOperand(0); 1293 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1294 1295 switch (IntrinsicID) { 1296 case AMDGPUIntrinsic::SI_sendmsg: { 1297 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 1298 SDValue Glue = Chain.getValue(1); 1299 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, 1300 Op.getOperand(2), Glue); 1301 } 1302 case AMDGPUIntrinsic::SI_tbuffer_store: { 1303 SDValue Ops[] = { 1304 Chain, 1305 Op.getOperand(2), 1306 Op.getOperand(3), 1307 Op.getOperand(4), 1308 Op.getOperand(5), 1309 Op.getOperand(6), 1310 Op.getOperand(7), 1311 Op.getOperand(8), 1312 Op.getOperand(9), 1313 Op.getOperand(10), 1314 Op.getOperand(11), 1315 Op.getOperand(12), 1316 Op.getOperand(13), 1317 Op.getOperand(14) 1318 }; 1319 1320 EVT VT = Op.getOperand(3).getValueType(); 1321 1322 MachineMemOperand *MMO = MF.getMachineMemOperand( 1323 MachinePointerInfo(), 1324 MachineMemOperand::MOStore, 1325 VT.getStoreSize(), 4); 1326 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, 1327 Op->getVTList(), Ops, VT, MMO); 1328 } 1329 default: 1330 return SDValue(); 1331 } 1332} 1333 1334SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 1335 SDLoc DL(Op); 1336 LoadSDNode *Load = cast<LoadSDNode>(Op); 1337 1338 if (Op.getValueType().isVector()) { 1339 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 1340 "Custom lowering for non-i32 vectors hasn't been implemented."); 1341 unsigned NumElements = Op.getValueType().getVectorNumElements(); 1342 assert(NumElements != 2 && "v2 loads are supported for all address spaces."); 1343 1344 switch (Load->getAddressSpace()) { 1345 default: break; 1346 case AMDGPUAS::CONSTANT_ADDRESS: 1347 if (isMemOpUniform(Load)) 1348 break; 1349 // Non-uniform loads will be selected to MUBUF instructions, so they 1350 // have the same legalization requires ments as global and private 1351 // loads. 1352 // 1353 // Fall-through 1354 case AMDGPUAS::GLOBAL_ADDRESS: 1355 case AMDGPUAS::PRIVATE_ADDRESS: 1356 if (NumElements >= 8) 1357 return SplitVectorLoad(Op, DAG); 1358 1359 // v4 loads are supported for private and global memory. 1360 if (NumElements <= 4) 1361 break; 1362 // fall-through 1363 case AMDGPUAS::LOCAL_ADDRESS: 1364 // If properly aligned, if we split we might be able to use ds_read_b64. 1365 return SplitVectorLoad(Op, DAG); 1366 } 1367 } 1368 1369 return AMDGPUTargetLowering::LowerLOAD(Op, DAG); 1370} 1371 1372SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode, 1373 const SDValue &Op, 1374 SelectionDAG &DAG) const { 1375 return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1), 1376 Op.getOperand(2), 1377 Op.getOperand(3), 1378 Op.getOperand(4)); 1379} 1380 1381SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 1382 if (Op.getValueType() != MVT::i64) 1383 return SDValue(); 1384 1385 SDLoc DL(Op); 1386 SDValue Cond = Op.getOperand(0); 1387 1388 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 1389 SDValue One = DAG.getConstant(1, DL, MVT::i32); 1390 1391 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 1392 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 1393 1394 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 1395 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 1396 1397 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 1398 1399 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 1400 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 1401 1402 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 1403 1404 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi); 1405 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); 1406} 1407 1408// Catch division cases where we can use shortcuts with rcp and rsq 1409// instructions. 1410SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const { 1411 SDLoc SL(Op); 1412 SDValue LHS = Op.getOperand(0); 1413 SDValue RHS = Op.getOperand(1); 1414 EVT VT = Op.getValueType(); 1415 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; 1416 1417 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 1418 if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) && 1419 CLHS->isExactlyValue(1.0)) { 1420 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 1421 // the CI documentation has a worst case error of 1 ulp. 1422 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 1423 // use it as long as we aren't trying to use denormals. 1424 1425 // 1.0 / sqrt(x) -> rsq(x) 1426 // 1427 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 1428 // error seems really high at 2^29 ULP. 1429 if (RHS.getOpcode() == ISD::FSQRT) 1430 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 1431 1432 // 1.0 / x -> rcp(x) 1433 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 1434 } 1435 } 1436 1437 if (Unsafe) { 1438 // Turn into multiply by the reciprocal. 1439 // x / y -> x * (1.0 / y) 1440 SDNodeFlags Flags; 1441 Flags.setUnsafeAlgebra(true); 1442 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 1443 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); 1444 } 1445 1446 return SDValue(); 1447} 1448 1449SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 1450 SDValue FastLowered = LowerFastFDIV(Op, DAG); 1451 if (FastLowered.getNode()) 1452 return FastLowered; 1453 1454 // This uses v_rcp_f32 which does not handle denormals. Let this hit a 1455 // selection error for now rather than do something incorrect. 1456 if (Subtarget->hasFP32Denormals()) 1457 return SDValue(); 1458 1459 SDLoc SL(Op); 1460 SDValue LHS = Op.getOperand(0); 1461 SDValue RHS = Op.getOperand(1); 1462 1463 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 1464 1465 const APFloat K0Val(BitsToFloat(0x6f800000)); 1466 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 1467 1468 const APFloat K1Val(BitsToFloat(0x2f800000)); 1469 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 1470 1471 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 1472 1473 EVT SetCCVT = 1474 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 1475 1476 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 1477 1478 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 1479 1480 // TODO: Should this propagate fast-math-flags? 1481 1482 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 1483 1484 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 1485 1486 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 1487 1488 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 1489} 1490 1491SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 1492 if (DAG.getTarget().Options.UnsafeFPMath) 1493 return LowerFastFDIV(Op, DAG); 1494 1495 SDLoc SL(Op); 1496 SDValue X = Op.getOperand(0); 1497 SDValue Y = Op.getOperand(1); 1498 1499 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 1500 1501 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 1502 1503 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 1504 1505 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 1506 1507 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 1508 1509 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 1510 1511 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 1512 1513 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 1514 1515 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 1516 1517 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 1518 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 1519 1520 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 1521 NegDivScale0, Mul, DivScale1); 1522 1523 SDValue Scale; 1524 1525 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 1526 // Workaround a hardware bug on SI where the condition output from div_scale 1527 // is not usable. 1528 1529 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 1530 1531 // Figure out if the scale to use for div_fmas. 1532 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 1533 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 1534 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 1535 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 1536 1537 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 1538 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 1539 1540 SDValue Scale0Hi 1541 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 1542 SDValue Scale1Hi 1543 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 1544 1545 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 1546 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 1547 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 1548 } else { 1549 Scale = DivScale1.getValue(1); 1550 } 1551 1552 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 1553 Fma4, Fma3, Mul, Scale); 1554 1555 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 1556} 1557 1558SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 1559 EVT VT = Op.getValueType(); 1560 1561 if (VT == MVT::f32) 1562 return LowerFDIV32(Op, DAG); 1563 1564 if (VT == MVT::f64) 1565 return LowerFDIV64(Op, DAG); 1566 1567 llvm_unreachable("Unexpected type for fdiv"); 1568} 1569 1570SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 1571 SDLoc DL(Op); 1572 StoreSDNode *Store = cast<StoreSDNode>(Op); 1573 EVT VT = Store->getMemoryVT(); 1574 1575 // These stores are legal. 1576 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { 1577 if (VT.isVector() && VT.getVectorNumElements() > 4) 1578 return ScalarizeVectorStore(Op, DAG); 1579 return SDValue(); 1580 } 1581 1582 SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG); 1583 if (Ret.getNode()) 1584 return Ret; 1585 1586 if (VT.isVector() && VT.getVectorNumElements() >= 8) 1587 return SplitVectorStore(Op, DAG); 1588 1589 if (VT == MVT::i1) 1590 return DAG.getTruncStore(Store->getChain(), DL, 1591 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 1592 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 1593 1594 return SDValue(); 1595} 1596 1597SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 1598 SDLoc DL(Op); 1599 EVT VT = Op.getValueType(); 1600 SDValue Arg = Op.getOperand(0); 1601 // TODO: Should this propagate fast-math-flags? 1602 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 1603 DAG.getNode(ISD::FMUL, DL, VT, Arg, 1604 DAG.getConstantFP(0.5/M_PI, DL, 1605 VT))); 1606 1607 switch (Op.getOpcode()) { 1608 case ISD::FCOS: 1609 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 1610 case ISD::FSIN: 1611 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 1612 default: 1613 llvm_unreachable("Wrong trig opcode"); 1614 } 1615} 1616 1617//===----------------------------------------------------------------------===// 1618// Custom DAG optimizations 1619//===----------------------------------------------------------------------===// 1620 1621SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 1622 DAGCombinerInfo &DCI) const { 1623 EVT VT = N->getValueType(0); 1624 EVT ScalarVT = VT.getScalarType(); 1625 if (ScalarVT != MVT::f32) 1626 return SDValue(); 1627 1628 SelectionDAG &DAG = DCI.DAG; 1629 SDLoc DL(N); 1630 1631 SDValue Src = N->getOperand(0); 1632 EVT SrcVT = Src.getValueType(); 1633 1634 // TODO: We could try to match extracting the higher bytes, which would be 1635 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 1636 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 1637 // about in practice. 1638 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { 1639 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 1640 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 1641 DCI.AddToWorklist(Cvt.getNode()); 1642 return Cvt; 1643 } 1644 } 1645 1646 // We are primarily trying to catch operations on illegal vector types 1647 // before they are expanded. 1648 // For scalars, we can use the more flexible method of checking masked bits 1649 // after legalization. 1650 if (!DCI.isBeforeLegalize() || 1651 !SrcVT.isVector() || 1652 SrcVT.getVectorElementType() != MVT::i8) { 1653 return SDValue(); 1654 } 1655 1656 assert(DCI.isBeforeLegalize() && "Unexpected legal type"); 1657 1658 // Weird sized vectors are a pain to handle, but we know 3 is really the same 1659 // size as 4. 1660 unsigned NElts = SrcVT.getVectorNumElements(); 1661 if (!SrcVT.isSimple() && NElts != 3) 1662 return SDValue(); 1663 1664 // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to 1665 // prevent a mess from expanding to v4i32 and repacking. 1666 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) { 1667 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT); 1668 EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT); 1669 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts); 1670 LoadSDNode *Load = cast<LoadSDNode>(Src); 1671 1672 unsigned AS = Load->getAddressSpace(); 1673 unsigned Align = Load->getAlignment(); 1674 Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext()); 1675 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 1676 1677 // Don't try to replace the load if we have to expand it due to alignment 1678 // problems. Otherwise we will end up scalarizing the load, and trying to 1679 // repack into the vector for no real reason. 1680 if (Align < ABIAlignment && 1681 !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) { 1682 return SDValue(); 1683 } 1684 1685 SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT, 1686 Load->getChain(), 1687 Load->getBasePtr(), 1688 LoadVT, 1689 Load->getMemOperand()); 1690 1691 // Make sure successors of the original load stay after it by updating 1692 // them to use the new Chain. 1693 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1)); 1694 1695 SmallVector<SDValue, 4> Elts; 1696 if (RegVT.isVector()) 1697 DAG.ExtractVectorElements(NewLoad, Elts); 1698 else 1699 Elts.push_back(NewLoad); 1700 1701 SmallVector<SDValue, 4> Ops; 1702 1703 unsigned EltIdx = 0; 1704 for (SDValue Elt : Elts) { 1705 unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx); 1706 for (unsigned I = 0; I < ComponentsInElt; ++I) { 1707 unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I; 1708 SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt); 1709 DCI.AddToWorklist(Cvt.getNode()); 1710 Ops.push_back(Cvt); 1711 } 1712 1713 ++EltIdx; 1714 } 1715 1716 assert(Ops.size() == NElts); 1717 1718 return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops); 1719 } 1720 1721 return SDValue(); 1722} 1723 1724/// \brief Return true if the given offset Size in bytes can be folded into 1725/// the immediate offsets of a memory instruction for the given address space. 1726static bool canFoldOffset(unsigned OffsetSize, unsigned AS, 1727 const AMDGPUSubtarget &STI) { 1728 switch (AS) { 1729 case AMDGPUAS::GLOBAL_ADDRESS: { 1730 // MUBUF instructions a 12-bit offset in bytes. 1731 return isUInt<12>(OffsetSize); 1732 } 1733 case AMDGPUAS::CONSTANT_ADDRESS: { 1734 // SMRD instructions have an 8-bit offset in dwords on SI and 1735 // a 20-bit offset in bytes on VI. 1736 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1737 return isUInt<20>(OffsetSize); 1738 else 1739 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); 1740 } 1741 case AMDGPUAS::LOCAL_ADDRESS: 1742 case AMDGPUAS::REGION_ADDRESS: { 1743 // The single offset versions have a 16-bit offset in bytes. 1744 return isUInt<16>(OffsetSize); 1745 } 1746 case AMDGPUAS::PRIVATE_ADDRESS: 1747 // Indirect register addressing does not use any offsets. 1748 default: 1749 return 0; 1750 } 1751} 1752 1753// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 1754 1755// This is a variant of 1756// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 1757// 1758// The normal DAG combiner will do this, but only if the add has one use since 1759// that would increase the number of instructions. 1760// 1761// This prevents us from seeing a constant offset that can be folded into a 1762// memory instruction's addressing mode. If we know the resulting add offset of 1763// a pointer can be folded into an addressing offset, we can replace the pointer 1764// operand with the add of new constant offset. This eliminates one of the uses, 1765// and may allow the remaining use to also be simplified. 1766// 1767SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 1768 unsigned AddrSpace, 1769 DAGCombinerInfo &DCI) const { 1770 SDValue N0 = N->getOperand(0); 1771 SDValue N1 = N->getOperand(1); 1772 1773 if (N0.getOpcode() != ISD::ADD) 1774 return SDValue(); 1775 1776 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 1777 if (!CN1) 1778 return SDValue(); 1779 1780 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 1781 if (!CAdd) 1782 return SDValue(); 1783 1784 // If the resulting offset is too large, we can't fold it into the addressing 1785 // mode offset. 1786 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 1787 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *Subtarget)) 1788 return SDValue(); 1789 1790 SelectionDAG &DAG = DCI.DAG; 1791 SDLoc SL(N); 1792 EVT VT = N->getValueType(0); 1793 1794 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 1795 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 1796 1797 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); 1798} 1799 1800SDValue SITargetLowering::performAndCombine(SDNode *N, 1801 DAGCombinerInfo &DCI) const { 1802 if (DCI.isBeforeLegalize()) 1803 return SDValue(); 1804 1805 SelectionDAG &DAG = DCI.DAG; 1806 1807 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 1808 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 1809 SDValue LHS = N->getOperand(0); 1810 SDValue RHS = N->getOperand(1); 1811 1812 if (LHS.getOpcode() == ISD::SETCC && 1813 RHS.getOpcode() == ISD::SETCC) { 1814 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 1815 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 1816 1817 SDValue X = LHS.getOperand(0); 1818 SDValue Y = RHS.getOperand(0); 1819 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 1820 return SDValue(); 1821 1822 if (LCC == ISD::SETO) { 1823 if (X != LHS.getOperand(1)) 1824 return SDValue(); 1825 1826 if (RCC == ISD::SETUNE) { 1827 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 1828 if (!C1 || !C1->isInfinity() || C1->isNegative()) 1829 return SDValue(); 1830 1831 const uint32_t Mask = SIInstrFlags::N_NORMAL | 1832 SIInstrFlags::N_SUBNORMAL | 1833 SIInstrFlags::N_ZERO | 1834 SIInstrFlags::P_ZERO | 1835 SIInstrFlags::P_SUBNORMAL | 1836 SIInstrFlags::P_NORMAL; 1837 1838 static_assert(((~(SIInstrFlags::S_NAN | 1839 SIInstrFlags::Q_NAN | 1840 SIInstrFlags::N_INFINITY | 1841 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 1842 "mask not equal"); 1843 1844 SDLoc DL(N); 1845 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 1846 X, DAG.getConstant(Mask, DL, MVT::i32)); 1847 } 1848 } 1849 } 1850 1851 return SDValue(); 1852} 1853 1854SDValue SITargetLowering::performOrCombine(SDNode *N, 1855 DAGCombinerInfo &DCI) const { 1856 SelectionDAG &DAG = DCI.DAG; 1857 SDValue LHS = N->getOperand(0); 1858 SDValue RHS = N->getOperand(1); 1859 1860 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 1861 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 1862 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 1863 SDValue Src = LHS.getOperand(0); 1864 if (Src != RHS.getOperand(0)) 1865 return SDValue(); 1866 1867 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 1868 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 1869 if (!CLHS || !CRHS) 1870 return SDValue(); 1871 1872 // Only 10 bits are used. 1873 static const uint32_t MaxMask = 0x3ff; 1874 1875 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 1876 SDLoc DL(N); 1877 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 1878 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 1879 } 1880 1881 return SDValue(); 1882} 1883 1884SDValue SITargetLowering::performClassCombine(SDNode *N, 1885 DAGCombinerInfo &DCI) const { 1886 SelectionDAG &DAG = DCI.DAG; 1887 SDValue Mask = N->getOperand(1); 1888 1889 // fp_class x, 0 -> false 1890 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 1891 if (CMask->isNullValue()) 1892 return DAG.getConstant(0, SDLoc(N), MVT::i1); 1893 } 1894 1895 return SDValue(); 1896} 1897 1898static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 1899 switch (Opc) { 1900 case ISD::FMAXNUM: 1901 return AMDGPUISD::FMAX3; 1902 case ISD::SMAX: 1903 return AMDGPUISD::SMAX3; 1904 case ISD::UMAX: 1905 return AMDGPUISD::UMAX3; 1906 case ISD::FMINNUM: 1907 return AMDGPUISD::FMIN3; 1908 case ISD::SMIN: 1909 return AMDGPUISD::SMIN3; 1910 case ISD::UMIN: 1911 return AMDGPUISD::UMIN3; 1912 default: 1913 llvm_unreachable("Not a min/max opcode"); 1914 } 1915} 1916 1917SDValue SITargetLowering::performMin3Max3Combine(SDNode *N, 1918 DAGCombinerInfo &DCI) const { 1919 SelectionDAG &DAG = DCI.DAG; 1920 1921 unsigned Opc = N->getOpcode(); 1922 SDValue Op0 = N->getOperand(0); 1923 SDValue Op1 = N->getOperand(1); 1924 1925 // Only do this if the inner op has one use since this will just increases 1926 // register pressure for no benefit. 1927 1928 // max(max(a, b), c) 1929 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 1930 SDLoc DL(N); 1931 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 1932 DL, 1933 N->getValueType(0), 1934 Op0.getOperand(0), 1935 Op0.getOperand(1), 1936 Op1); 1937 } 1938 1939 // max(a, max(b, c)) 1940 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 1941 SDLoc DL(N); 1942 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 1943 DL, 1944 N->getValueType(0), 1945 Op0, 1946 Op1.getOperand(0), 1947 Op1.getOperand(1)); 1948 } 1949 1950 return SDValue(); 1951} 1952 1953SDValue SITargetLowering::performSetCCCombine(SDNode *N, 1954 DAGCombinerInfo &DCI) const { 1955 SelectionDAG &DAG = DCI.DAG; 1956 SDLoc SL(N); 1957 1958 SDValue LHS = N->getOperand(0); 1959 SDValue RHS = N->getOperand(1); 1960 EVT VT = LHS.getValueType(); 1961 1962 if (VT != MVT::f32 && VT != MVT::f64) 1963 return SDValue(); 1964 1965 // Match isinf pattern 1966 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 1967 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 1968 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 1969 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 1970 if (!CRHS) 1971 return SDValue(); 1972 1973 const APFloat &APF = CRHS->getValueAPF(); 1974 if (APF.isInfinity() && !APF.isNegative()) { 1975 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 1976 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 1977 DAG.getConstant(Mask, SL, MVT::i32)); 1978 } 1979 } 1980 1981 return SDValue(); 1982} 1983 1984SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 1985 DAGCombinerInfo &DCI) const { 1986 SelectionDAG &DAG = DCI.DAG; 1987 SDLoc DL(N); 1988 1989 switch (N->getOpcode()) { 1990 default: 1991 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 1992 case ISD::SETCC: 1993 return performSetCCCombine(N, DCI); 1994 case ISD::FMAXNUM: // TODO: What about fmax_legacy? 1995 case ISD::FMINNUM: 1996 case ISD::SMAX: 1997 case ISD::SMIN: 1998 case ISD::UMAX: 1999 case ISD::UMIN: { 2000 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 2001 N->getValueType(0) != MVT::f64 && 2002 getTargetMachine().getOptLevel() > CodeGenOpt::None) 2003 return performMin3Max3Combine(N, DCI); 2004 break; 2005 } 2006 2007 case AMDGPUISD::CVT_F32_UBYTE0: 2008 case AMDGPUISD::CVT_F32_UBYTE1: 2009 case AMDGPUISD::CVT_F32_UBYTE2: 2010 case AMDGPUISD::CVT_F32_UBYTE3: { 2011 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 2012 2013 SDValue Src = N->getOperand(0); 2014 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 2015 2016 APInt KnownZero, KnownOne; 2017 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2018 !DCI.isBeforeLegalizeOps()); 2019 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2020 if (TLO.ShrinkDemandedConstant(Src, Demanded) || 2021 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { 2022 DCI.CommitTargetLoweringOpt(TLO); 2023 } 2024 2025 break; 2026 } 2027 2028 case ISD::UINT_TO_FP: { 2029 return performUCharToFloatCombine(N, DCI); 2030 2031 case ISD::FADD: { 2032 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2033 break; 2034 2035 EVT VT = N->getValueType(0); 2036 if (VT != MVT::f32) 2037 break; 2038 2039 // Only do this if we are not trying to support denormals. v_mad_f32 does 2040 // not support denormals ever. 2041 if (Subtarget->hasFP32Denormals()) 2042 break; 2043 2044 SDValue LHS = N->getOperand(0); 2045 SDValue RHS = N->getOperand(1); 2046 2047 // These should really be instruction patterns, but writing patterns with 2048 // source modiifiers is a pain. 2049 2050 // fadd (fadd (a, a), b) -> mad 2.0, a, b 2051 if (LHS.getOpcode() == ISD::FADD) { 2052 SDValue A = LHS.getOperand(0); 2053 if (A == LHS.getOperand(1)) { 2054 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 2055 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS); 2056 } 2057 } 2058 2059 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 2060 if (RHS.getOpcode() == ISD::FADD) { 2061 SDValue A = RHS.getOperand(0); 2062 if (A == RHS.getOperand(1)) { 2063 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 2064 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS); 2065 } 2066 } 2067 2068 return SDValue(); 2069 } 2070 case ISD::FSUB: { 2071 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2072 break; 2073 2074 EVT VT = N->getValueType(0); 2075 2076 // Try to get the fneg to fold into the source modifier. This undoes generic 2077 // DAG combines and folds them into the mad. 2078 // 2079 // Only do this if we are not trying to support denormals. v_mad_f32 does 2080 // not support denormals ever. 2081 if (VT == MVT::f32 && 2082 !Subtarget->hasFP32Denormals()) { 2083 SDValue LHS = N->getOperand(0); 2084 SDValue RHS = N->getOperand(1); 2085 if (LHS.getOpcode() == ISD::FADD) { 2086 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 2087 2088 SDValue A = LHS.getOperand(0); 2089 if (A == LHS.getOperand(1)) { 2090 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32); 2091 SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); 2092 2093 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS); 2094 } 2095 } 2096 2097 if (RHS.getOpcode() == ISD::FADD) { 2098 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 2099 2100 SDValue A = RHS.getOperand(0); 2101 if (A == RHS.getOperand(1)) { 2102 const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32); 2103 return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS); 2104 } 2105 } 2106 2107 return SDValue(); 2108 } 2109 2110 break; 2111 } 2112 } 2113 case ISD::LOAD: 2114 case ISD::STORE: 2115 case ISD::ATOMIC_LOAD: 2116 case ISD::ATOMIC_STORE: 2117 case ISD::ATOMIC_CMP_SWAP: 2118 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 2119 case ISD::ATOMIC_SWAP: 2120 case ISD::ATOMIC_LOAD_ADD: 2121 case ISD::ATOMIC_LOAD_SUB: 2122 case ISD::ATOMIC_LOAD_AND: 2123 case ISD::ATOMIC_LOAD_OR: 2124 case ISD::ATOMIC_LOAD_XOR: 2125 case ISD::ATOMIC_LOAD_NAND: 2126 case ISD::ATOMIC_LOAD_MIN: 2127 case ISD::ATOMIC_LOAD_MAX: 2128 case ISD::ATOMIC_LOAD_UMIN: 2129 case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics. 2130 if (DCI.isBeforeLegalize()) 2131 break; 2132 2133 MemSDNode *MemNode = cast<MemSDNode>(N); 2134 SDValue Ptr = MemNode->getBasePtr(); 2135 2136 // TODO: We could also do this for multiplies. 2137 unsigned AS = MemNode->getAddressSpace(); 2138 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { 2139 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); 2140 if (NewPtr) { 2141 SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end()); 2142 2143 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 2144 return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0); 2145 } 2146 } 2147 break; 2148 } 2149 case ISD::AND: 2150 return performAndCombine(N, DCI); 2151 case ISD::OR: 2152 return performOrCombine(N, DCI); 2153 case AMDGPUISD::FP_CLASS: 2154 return performClassCombine(N, DCI); 2155 } 2156 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 2157} 2158 2159/// \brief Analyze the possible immediate value Op 2160/// 2161/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate 2162/// and the immediate value if it's a literal immediate 2163int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const { 2164 2165 const SIInstrInfo *TII = 2166 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2167 2168 if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) { 2169 if (TII->isInlineConstant(Node->getAPIntValue())) 2170 return 0; 2171 2172 uint64_t Val = Node->getZExtValue(); 2173 return isUInt<32>(Val) ? Val : -1; 2174 } 2175 2176 if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) { 2177 if (TII->isInlineConstant(Node->getValueAPF().bitcastToAPInt())) 2178 return 0; 2179 2180 if (Node->getValueType(0) == MVT::f32) 2181 return FloatToBits(Node->getValueAPF().convertToFloat()); 2182 2183 return -1; 2184 } 2185 2186 return -1; 2187} 2188 2189/// \brief Helper function for adjustWritemask 2190static unsigned SubIdx2Lane(unsigned Idx) { 2191 switch (Idx) { 2192 default: return 0; 2193 case AMDGPU::sub0: return 0; 2194 case AMDGPU::sub1: return 1; 2195 case AMDGPU::sub2: return 2; 2196 case AMDGPU::sub3: return 3; 2197 } 2198} 2199 2200/// \brief Adjust the writemask of MIMG instructions 2201void SITargetLowering::adjustWritemask(MachineSDNode *&Node, 2202 SelectionDAG &DAG) const { 2203 SDNode *Users[4] = { }; 2204 unsigned Lane = 0; 2205 unsigned OldDmask = Node->getConstantOperandVal(0); 2206 unsigned NewDmask = 0; 2207 2208 // Try to figure out the used register components 2209 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 2210 I != E; ++I) { 2211 2212 // Abort if we can't understand the usage 2213 if (!I->isMachineOpcode() || 2214 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 2215 return; 2216 2217 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. 2218 // Note that subregs are packed, i.e. Lane==0 is the first bit set 2219 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 2220 // set, etc. 2221 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 2222 2223 // Set which texture component corresponds to the lane. 2224 unsigned Comp; 2225 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 2226 assert(Dmask); 2227 Comp = countTrailingZeros(Dmask); 2228 Dmask &= ~(1 << Comp); 2229 } 2230 2231 // Abort if we have more than one user per component 2232 if (Users[Lane]) 2233 return; 2234 2235 Users[Lane] = *I; 2236 NewDmask |= 1 << Comp; 2237 } 2238 2239 // Abort if there's no change 2240 if (NewDmask == OldDmask) 2241 return; 2242 2243 // Adjust the writemask in the node 2244 std::vector<SDValue> Ops; 2245 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 2246 Ops.insert(Ops.end(), Node->op_begin() + 1, Node->op_end()); 2247 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); 2248 2249 // If we only got one lane, replace it with a copy 2250 // (if NewDmask has only one bit set...) 2251 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { 2252 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), 2253 MVT::i32); 2254 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 2255 SDLoc(), Users[Lane]->getValueType(0), 2256 SDValue(Node, 0), RC); 2257 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 2258 return; 2259 } 2260 2261 // Update the users of the node with the new indices 2262 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 2263 2264 SDNode *User = Users[i]; 2265 if (!User) 2266 continue; 2267 2268 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 2269 DAG.UpdateNodeOperands(User, User->getOperand(0), Op); 2270 2271 switch (Idx) { 2272 default: break; 2273 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 2274 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 2275 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 2276 } 2277 } 2278} 2279 2280static bool isFrameIndexOp(SDValue Op) { 2281 if (Op.getOpcode() == ISD::AssertZext) 2282 Op = Op.getOperand(0); 2283 2284 return isa<FrameIndexSDNode>(Op); 2285} 2286 2287/// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) 2288/// with frame index operands. 2289/// LLVM assumes that inputs are to these instructions are registers. 2290void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 2291 SelectionDAG &DAG) const { 2292 2293 SmallVector<SDValue, 8> Ops; 2294 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 2295 if (!isFrameIndexOp(Node->getOperand(i))) { 2296 Ops.push_back(Node->getOperand(i)); 2297 continue; 2298 } 2299 2300 SDLoc DL(Node); 2301 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 2302 Node->getOperand(i).getValueType(), 2303 Node->getOperand(i)), 0)); 2304 } 2305 2306 DAG.UpdateNodeOperands(Node, Ops); 2307} 2308 2309/// \brief Fold the instructions after selecting them. 2310SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 2311 SelectionDAG &DAG) const { 2312 const SIInstrInfo *TII = 2313 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2314 2315 if (TII->isMIMG(Node->getMachineOpcode())) 2316 adjustWritemask(Node, DAG); 2317 2318 if (Node->getMachineOpcode() == AMDGPU::INSERT_SUBREG || 2319 Node->getMachineOpcode() == AMDGPU::REG_SEQUENCE) { 2320 legalizeTargetIndependentNode(Node, DAG); 2321 return Node; 2322 } 2323 return Node; 2324} 2325 2326/// \brief Assign the register class depending on the number of 2327/// bits set in the writemask 2328void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 2329 SDNode *Node) const { 2330 const SIInstrInfo *TII = 2331 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2332 2333 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 2334 2335 if (TII->isVOP3(MI->getOpcode())) { 2336 // Make sure constant bus requirements are respected. 2337 TII->legalizeOperandsVOP3(MRI, MI); 2338 return; 2339 } 2340 2341 if (TII->isMIMG(*MI)) { 2342 unsigned VReg = MI->getOperand(0).getReg(); 2343 unsigned Writemask = MI->getOperand(1).getImm(); 2344 unsigned BitsSet = 0; 2345 for (unsigned i = 0; i < 4; ++i) 2346 BitsSet += Writemask & (1 << i) ? 1 : 0; 2347 2348 const TargetRegisterClass *RC; 2349 switch (BitsSet) { 2350 default: return; 2351 case 1: RC = &AMDGPU::VGPR_32RegClass; break; 2352 case 2: RC = &AMDGPU::VReg_64RegClass; break; 2353 case 3: RC = &AMDGPU::VReg_96RegClass; break; 2354 } 2355 2356 unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet); 2357 MI->setDesc(TII->get(NewOpcode)); 2358 MRI.setRegClass(VReg, RC); 2359 return; 2360 } 2361 2362 // Replace unused atomics with the no return version. 2363 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode()); 2364 if (NoRetAtomicOp != -1) { 2365 if (!Node->hasAnyUseOfValue(0)) { 2366 MI->setDesc(TII->get(NoRetAtomicOp)); 2367 MI->RemoveOperand(0); 2368 } 2369 2370 return; 2371 } 2372} 2373 2374static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) { 2375 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 2376 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 2377} 2378 2379MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 2380 SDLoc DL, 2381 SDValue Ptr) const { 2382 const SIInstrInfo *TII = 2383 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); 2384 2385 // Build the half of the subregister with the constants before building the 2386 // full 128-bit register. If we are building multiple resource descriptors, 2387 // this will allow CSEing of the 2-component register. 2388 const SDValue Ops0[] = { 2389 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 2390 buildSMovImm32(DAG, DL, 0), 2391 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 2392 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 2393 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 2394 }; 2395 2396 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 2397 MVT::v2i32, Ops0), 0); 2398 2399 // Combine the constants and the pointer. 2400 const SDValue Ops1[] = { 2401 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 2402 Ptr, 2403 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 2404 SubRegHi, 2405 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 2406 }; 2407 2408 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 2409} 2410 2411/// \brief Return a resource descriptor with the 'Add TID' bit enabled 2412/// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 2413/// of the resource descriptor) to create an offset, which is added to 2414/// the resource pointer. 2415MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, 2416 SDLoc DL, 2417 SDValue Ptr, 2418 uint32_t RsrcDword1, 2419 uint64_t RsrcDword2And3) const { 2420 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 2421 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 2422 if (RsrcDword1) { 2423 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 2424 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 2425 0); 2426 } 2427 2428 SDValue DataLo = buildSMovImm32(DAG, DL, 2429 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 2430 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 2431 2432 const SDValue Ops[] = { 2433 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 2434 PtrLo, 2435 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 2436 PtrHi, 2437 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 2438 DataLo, 2439 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 2440 DataHi, 2441 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 2442 }; 2443 2444 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 2445} 2446 2447SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 2448 const TargetRegisterClass *RC, 2449 unsigned Reg, EVT VT) const { 2450 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); 2451 2452 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), 2453 cast<RegisterSDNode>(VReg)->getReg(), VT); 2454} 2455 2456//===----------------------------------------------------------------------===// 2457// SI Inline Assembly Support 2458//===----------------------------------------------------------------------===// 2459 2460std::pair<unsigned, const TargetRegisterClass *> 2461SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 2462 StringRef Constraint, 2463 MVT VT) const { 2464 2465 if (Constraint.size() == 1) { 2466 switch (Constraint[0]) { 2467 case 's': 2468 case 'r': 2469 switch (VT.getSizeInBits()) { 2470 default: 2471 return std::make_pair(0U, nullptr); 2472 case 32: 2473 return std::make_pair(0U, &AMDGPU::SGPR_32RegClass); 2474 case 64: 2475 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); 2476 case 128: 2477 return std::make_pair(0U, &AMDGPU::SReg_128RegClass); 2478 case 256: 2479 return std::make_pair(0U, &AMDGPU::SReg_256RegClass); 2480 } 2481 2482 case 'v': 2483 switch (VT.getSizeInBits()) { 2484 default: 2485 return std::make_pair(0U, nullptr); 2486 case 32: 2487 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); 2488 case 64: 2489 return std::make_pair(0U, &AMDGPU::VReg_64RegClass); 2490 case 96: 2491 return std::make_pair(0U, &AMDGPU::VReg_96RegClass); 2492 case 128: 2493 return std::make_pair(0U, &AMDGPU::VReg_128RegClass); 2494 case 256: 2495 return std::make_pair(0U, &AMDGPU::VReg_256RegClass); 2496 case 512: 2497 return std::make_pair(0U, &AMDGPU::VReg_512RegClass); 2498 } 2499 } 2500 } 2501 2502 if (Constraint.size() > 1) { 2503 const TargetRegisterClass *RC = nullptr; 2504 if (Constraint[1] == 'v') { 2505 RC = &AMDGPU::VGPR_32RegClass; 2506 } else if (Constraint[1] == 's') { 2507 RC = &AMDGPU::SGPR_32RegClass; 2508 } 2509 2510 if (RC) { 2511 uint32_t Idx; 2512 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 2513 if (!Failed && Idx < RC->getNumRegs()) 2514 return std::make_pair(RC->getRegister(Idx), RC); 2515 } 2516 } 2517 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 2518} 2519 2520SITargetLowering::ConstraintType 2521SITargetLowering::getConstraintType(StringRef Constraint) const { 2522 if (Constraint.size() == 1) { 2523 switch (Constraint[0]) { 2524 default: break; 2525 case 's': 2526 case 'v': 2527 return C_RegisterClass; 2528 } 2529 } 2530 return TargetLowering::getConstraintType(Constraint); 2531} 2532