SILowerControlFlow.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief This pass lowers the pseudo control flow instructions to real 12/// machine instructions. 13/// 14/// All control flow is handled using predicated instructions and 15/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17/// by writting to the 64-bit EXEC register (each bit corresponds to a 18/// single vector ALU). Typically, for predicates, a vector ALU will write 19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20/// Vector ALU) and then the ScalarALU will AND the VCC register with the 21/// EXEC to update the predicates. 22/// 23/// For example: 24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25/// %SGPR0 = SI_IF %VCC 26/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27/// %SGPR0 = SI_ELSE %SGPR0 28/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29/// SI_END_CF %SGPR0 30/// 31/// becomes: 32/// 33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35/// S_CBRANCH_EXECZ label0 // This instruction is an optional 36/// // optimization which allows us to 37/// // branch if all the bits of 38/// // EXEC are zero. 39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40/// 41/// label0: 42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44/// S_BRANCH_EXECZ label1 // Use our branch optimization 45/// // instruction again. 46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47/// label1: 48/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49//===----------------------------------------------------------------------===// 50 51#include "AMDGPU.h" 52#include "SIInstrInfo.h" 53#include "SIMachineFunctionInfo.h" 54#include "llvm/CodeGen/MachineFunction.h" 55#include "llvm/CodeGen/MachineFunctionPass.h" 56#include "llvm/CodeGen/MachineInstrBuilder.h" 57#include "llvm/CodeGen/MachineRegisterInfo.h" 58#include "llvm/IR/Constants.h" 59 60using namespace llvm; 61 62namespace { 63 64class SILowerControlFlowPass : public MachineFunctionPass { 65 66private: 67 static const unsigned SkipThreshold = 12; 68 69 static char ID; 70 const SIRegisterInfo *TRI; 71 const SIInstrInfo *TII; 72 73 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 74 75 void Skip(MachineInstr &From, MachineOperand &To); 76 void SkipIfDead(MachineInstr &MI); 77 78 void If(MachineInstr &MI); 79 void Else(MachineInstr &MI); 80 void Break(MachineInstr &MI); 81 void IfBreak(MachineInstr &MI); 82 void ElseBreak(MachineInstr &MI); 83 void Loop(MachineInstr &MI); 84 void EndCf(MachineInstr &MI); 85 86 void Kill(MachineInstr &MI); 87 void Branch(MachineInstr &MI); 88 89 void LoadM0(MachineInstr &MI, MachineInstr *MovRel); 90 void IndirectSrc(MachineInstr &MI); 91 void IndirectDst(MachineInstr &MI); 92 93public: 94 SILowerControlFlowPass(TargetMachine &tm) : 95 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { } 96 97 bool runOnMachineFunction(MachineFunction &MF) override; 98 99 const char *getPassName() const override { 100 return "SI Lower control flow instructions"; 101 } 102 103}; 104 105} // End anonymous namespace 106 107char SILowerControlFlowPass::ID = 0; 108 109FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) { 110 return new SILowerControlFlowPass(tm); 111} 112 113bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From, 114 MachineBasicBlock *To) { 115 116 unsigned NumInstr = 0; 117 118 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty(); 119 MBB = *MBB->succ_begin()) { 120 121 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 122 NumInstr < SkipThreshold && I != E; ++I) { 123 124 if (I->isBundle() || !I->isBundled()) 125 if (++NumInstr >= SkipThreshold) 126 return true; 127 } 128 } 129 130 return false; 131} 132 133void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) { 134 135 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 136 return; 137 138 DebugLoc DL = From.getDebugLoc(); 139 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 140 .addOperand(To) 141 .addReg(AMDGPU::EXEC); 142} 143 144void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) { 145 146 MachineBasicBlock &MBB = *MI.getParent(); 147 DebugLoc DL = MI.getDebugLoc(); 148 149 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType != 150 ShaderType::PIXEL || 151 !shouldSkip(&MBB, &MBB.getParent()->back())) 152 return; 153 154 MachineBasicBlock::iterator Insert = &MI; 155 ++Insert; 156 157 // If the exec mask is non-zero, skip the next two instructions 158 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 159 .addImm(3) 160 .addReg(AMDGPU::EXEC); 161 162 // Exec mask is zero: Export to NULL target... 163 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 164 .addImm(0) 165 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 166 .addImm(0) 167 .addImm(1) 168 .addImm(1) 169 .addReg(AMDGPU::VGPR0) 170 .addReg(AMDGPU::VGPR0) 171 .addReg(AMDGPU::VGPR0) 172 .addReg(AMDGPU::VGPR0); 173 174 // ... and terminate wavefront 175 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 176} 177 178void SILowerControlFlowPass::If(MachineInstr &MI) { 179 MachineBasicBlock &MBB = *MI.getParent(); 180 DebugLoc DL = MI.getDebugLoc(); 181 unsigned Reg = MI.getOperand(0).getReg(); 182 unsigned Vcc = MI.getOperand(1).getReg(); 183 184 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 185 .addReg(Vcc); 186 187 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 188 .addReg(AMDGPU::EXEC) 189 .addReg(Reg); 190 191 Skip(MI, MI.getOperand(2)); 192 193 MI.eraseFromParent(); 194} 195 196void SILowerControlFlowPass::Else(MachineInstr &MI) { 197 MachineBasicBlock &MBB = *MI.getParent(); 198 DebugLoc DL = MI.getDebugLoc(); 199 unsigned Dst = MI.getOperand(0).getReg(); 200 unsigned Src = MI.getOperand(1).getReg(); 201 202 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 203 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 204 .addReg(Src); // Saved EXEC 205 206 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 207 .addReg(AMDGPU::EXEC) 208 .addReg(Dst); 209 210 Skip(MI, MI.getOperand(2)); 211 212 MI.eraseFromParent(); 213} 214 215void SILowerControlFlowPass::Break(MachineInstr &MI) { 216 MachineBasicBlock &MBB = *MI.getParent(); 217 DebugLoc DL = MI.getDebugLoc(); 218 219 unsigned Dst = MI.getOperand(0).getReg(); 220 unsigned Src = MI.getOperand(1).getReg(); 221 222 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 223 .addReg(AMDGPU::EXEC) 224 .addReg(Src); 225 226 MI.eraseFromParent(); 227} 228 229void SILowerControlFlowPass::IfBreak(MachineInstr &MI) { 230 MachineBasicBlock &MBB = *MI.getParent(); 231 DebugLoc DL = MI.getDebugLoc(); 232 233 unsigned Dst = MI.getOperand(0).getReg(); 234 unsigned Vcc = MI.getOperand(1).getReg(); 235 unsigned Src = MI.getOperand(2).getReg(); 236 237 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 238 .addReg(Vcc) 239 .addReg(Src); 240 241 MI.eraseFromParent(); 242} 243 244void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) { 245 MachineBasicBlock &MBB = *MI.getParent(); 246 DebugLoc DL = MI.getDebugLoc(); 247 248 unsigned Dst = MI.getOperand(0).getReg(); 249 unsigned Saved = MI.getOperand(1).getReg(); 250 unsigned Src = MI.getOperand(2).getReg(); 251 252 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 253 .addReg(Saved) 254 .addReg(Src); 255 256 MI.eraseFromParent(); 257} 258 259void SILowerControlFlowPass::Loop(MachineInstr &MI) { 260 MachineBasicBlock &MBB = *MI.getParent(); 261 DebugLoc DL = MI.getDebugLoc(); 262 unsigned Src = MI.getOperand(0).getReg(); 263 264 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 265 .addReg(AMDGPU::EXEC) 266 .addReg(Src); 267 268 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 269 .addOperand(MI.getOperand(1)) 270 .addReg(AMDGPU::EXEC); 271 272 MI.eraseFromParent(); 273} 274 275void SILowerControlFlowPass::EndCf(MachineInstr &MI) { 276 MachineBasicBlock &MBB = *MI.getParent(); 277 DebugLoc DL = MI.getDebugLoc(); 278 unsigned Reg = MI.getOperand(0).getReg(); 279 280 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 281 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 282 .addReg(AMDGPU::EXEC) 283 .addReg(Reg); 284 285 MI.eraseFromParent(); 286} 287 288void SILowerControlFlowPass::Branch(MachineInstr &MI) { 289 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode()) 290 MI.eraseFromParent(); 291 292 // If these aren't equal, this is probably an infinite loop. 293} 294 295void SILowerControlFlowPass::Kill(MachineInstr &MI) { 296 MachineBasicBlock &MBB = *MI.getParent(); 297 DebugLoc DL = MI.getDebugLoc(); 298 const MachineOperand &Op = MI.getOperand(0); 299 300 // Kill is only allowed in pixel / geometry shaders 301 assert(MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType == 302 ShaderType::PIXEL || 303 MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType == 304 ShaderType::GEOMETRY); 305 306 // Clear this thread from the exec mask if the operand is negative 307 if ((Op.isImm() || Op.isFPImm())) { 308 // Constant operand: Set exec mask to 0 or do nothing 309 if (Op.isImm() ? (Op.getImm() & 0x80000000) : 310 Op.getFPImm()->isNegative()) { 311 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 312 .addImm(0); 313 } 314 } else { 315 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC) 316 .addImm(0) 317 .addOperand(Op); 318 } 319 320 MI.eraseFromParent(); 321} 322 323void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) { 324 325 MachineBasicBlock &MBB = *MI.getParent(); 326 DebugLoc DL = MI.getDebugLoc(); 327 MachineBasicBlock::iterator I = MI; 328 329 unsigned Save = MI.getOperand(1).getReg(); 330 unsigned Idx = MI.getOperand(3).getReg(); 331 332 if (AMDGPU::SReg_32RegClass.contains(Idx)) { 333 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 334 .addReg(Idx); 335 MBB.insert(I, MovRel); 336 MI.eraseFromParent(); 337 return; 338 } 339 340 assert(AMDGPU::SReg_64RegClass.contains(Save)); 341 assert(AMDGPU::VReg_32RegClass.contains(Idx)); 342 343 // Save the EXEC mask 344 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save) 345 .addReg(AMDGPU::EXEC); 346 347 // Read the next variant into VCC (lower 32 bits) <- also loop target 348 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), 349 AMDGPU::VCC_LO) 350 .addReg(Idx); 351 352 // Move index from VCC into M0 353 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 354 .addReg(AMDGPU::VCC_LO); 355 356 // Compare the just read M0 value to all possible Idx values 357 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC) 358 .addReg(AMDGPU::M0) 359 .addReg(Idx); 360 361 // Update EXEC, save the original EXEC value to VCC 362 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) 363 .addReg(AMDGPU::VCC); 364 365 // Do the actual move 366 MBB.insert(I, MovRel); 367 368 // Update EXEC, switch all done bits to 0 and all todo bits to 1 369 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 370 .addReg(AMDGPU::EXEC) 371 .addReg(AMDGPU::VCC); 372 373 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover 374 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 375 .addImm(-7) 376 .addReg(AMDGPU::EXEC); 377 378 // Restore EXEC 379 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 380 .addReg(Save); 381 382 MI.eraseFromParent(); 383} 384 385void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) { 386 387 MachineBasicBlock &MBB = *MI.getParent(); 388 DebugLoc DL = MI.getDebugLoc(); 389 390 unsigned Dst = MI.getOperand(0).getReg(); 391 unsigned Vec = MI.getOperand(2).getReg(); 392 unsigned Off = MI.getOperand(4).getImm(); 393 unsigned SubReg = TRI->getSubReg(Vec, AMDGPU::sub0); 394 if (!SubReg) 395 SubReg = Vec; 396 397 MachineInstr *MovRel = 398 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 399 .addReg(SubReg + Off) 400 .addReg(AMDGPU::M0, RegState::Implicit) 401 .addReg(Vec, RegState::Implicit); 402 403 LoadM0(MI, MovRel); 404} 405 406void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) { 407 408 MachineBasicBlock &MBB = *MI.getParent(); 409 DebugLoc DL = MI.getDebugLoc(); 410 411 unsigned Dst = MI.getOperand(0).getReg(); 412 unsigned Off = MI.getOperand(4).getImm(); 413 unsigned Val = MI.getOperand(5).getReg(); 414 unsigned SubReg = TRI->getSubReg(Dst, AMDGPU::sub0); 415 if (!SubReg) 416 SubReg = Dst; 417 418 MachineInstr *MovRel = 419 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) 420 .addReg(SubReg + Off, RegState::Define) 421 .addReg(Val) 422 .addReg(AMDGPU::M0, RegState::Implicit) 423 .addReg(Dst, RegState::Implicit); 424 425 LoadM0(MI, MovRel); 426} 427 428bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { 429 TII = static_cast<const SIInstrInfo*>(MF.getTarget().getInstrInfo()); 430 TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo()); 431 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 432 433 bool HaveKill = false; 434 bool NeedM0 = false; 435 bool NeedWQM = false; 436 unsigned Depth = 0; 437 438 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 439 BI != BE; ++BI) { 440 441 MachineBasicBlock &MBB = *BI; 442 MachineBasicBlock::iterator I, Next; 443 for (I = MBB.begin(); I != MBB.end(); I = Next) { 444 Next = std::next(I); 445 446 MachineInstr &MI = *I; 447 if (TII->isDS(MI.getOpcode())) { 448 NeedM0 = true; 449 NeedWQM = true; 450 } 451 452 switch (MI.getOpcode()) { 453 default: break; 454 case AMDGPU::SI_IF: 455 ++Depth; 456 If(MI); 457 break; 458 459 case AMDGPU::SI_ELSE: 460 Else(MI); 461 break; 462 463 case AMDGPU::SI_BREAK: 464 Break(MI); 465 break; 466 467 case AMDGPU::SI_IF_BREAK: 468 IfBreak(MI); 469 break; 470 471 case AMDGPU::SI_ELSE_BREAK: 472 ElseBreak(MI); 473 break; 474 475 case AMDGPU::SI_LOOP: 476 ++Depth; 477 Loop(MI); 478 break; 479 480 case AMDGPU::SI_END_CF: 481 if (--Depth == 0 && HaveKill) { 482 SkipIfDead(MI); 483 HaveKill = false; 484 } 485 EndCf(MI); 486 break; 487 488 case AMDGPU::SI_KILL: 489 if (Depth == 0) 490 SkipIfDead(MI); 491 else 492 HaveKill = true; 493 Kill(MI); 494 break; 495 496 case AMDGPU::S_BRANCH: 497 Branch(MI); 498 break; 499 500 case AMDGPU::SI_INDIRECT_SRC: 501 IndirectSrc(MI); 502 break; 503 504 case AMDGPU::SI_INDIRECT_DST_V1: 505 case AMDGPU::SI_INDIRECT_DST_V2: 506 case AMDGPU::SI_INDIRECT_DST_V4: 507 case AMDGPU::SI_INDIRECT_DST_V8: 508 case AMDGPU::SI_INDIRECT_DST_V16: 509 IndirectDst(MI); 510 break; 511 512 case AMDGPU::V_INTERP_P1_F32: 513 case AMDGPU::V_INTERP_P2_F32: 514 case AMDGPU::V_INTERP_MOV_F32: 515 NeedWQM = true; 516 break; 517 518 } 519 } 520 } 521 522 if (NeedM0) { 523 MachineBasicBlock &MBB = MF.front(); 524 // Initialize M0 to a value that won't cause LDS access to be discarded 525 // due to offset clamping 526 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_MOV_B32), 527 AMDGPU::M0).addImm(0xffffffff); 528 } 529 530 if (NeedWQM && MFI->ShaderType == ShaderType::PIXEL) { 531 MachineBasicBlock &MBB = MF.front(); 532 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 533 AMDGPU::EXEC).addReg(AMDGPU::EXEC); 534 } 535 536 return true; 537} 538