R600MachineScheduler.cpp revision 5f035d048e4ae04a30075e75d919fe023452ab0b
1//===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief R600 Machine Scheduler interface 12// TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot 13// 14//===----------------------------------------------------------------------===// 15 16#define DEBUG_TYPE "misched" 17 18#include "R600MachineScheduler.h" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "llvm/CodeGen/MachineRegisterInfo.h" 21#include "llvm/Pass.h" 22#include "llvm/PassManager.h" 23#include "llvm/Support/raw_ostream.h" 24 25using namespace llvm; 26 27void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { 28 29 DAG = dag; 30 TII = static_cast<const R600InstrInfo*>(DAG->TII); 31 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI); 32 MRI = &DAG->MRI; 33 CurInstKind = IDOther; 34 CurEmitted = 0; 35 OccupedSlotsMask = 15; 36 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); 37 InstKindLimit[IDOther] = 32; 38 39 const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>(); 40 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); 41} 42 43void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc, 44 std::vector<SUnit *> &QDst) 45{ 46 QDst.insert(QDst.end(), QSrc.begin(), QSrc.end()); 47 QSrc.clear(); 48} 49 50SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { 51 SUnit *SU = 0; 52 NextInstKind = IDOther; 53 54 IsTopNode = false; 55 56 // check if we might want to switch current clause type 57 bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) || 58 (Available[CurInstKind].empty()); 59 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) && 60 (!Available[IDFetch].empty() || !Available[IDOther].empty()); 61 62 // We want to scheduled AR defs as soon as possible to make sure they aren't 63 // put in a different ALU clause from their uses. 64 if (!SU && !UnscheduledARDefs.empty()) { 65 SU = UnscheduledARDefs[0]; 66 UnscheduledARDefs.erase(UnscheduledARDefs.begin()); 67 NextInstKind = IDAlu; 68 } 69 70 if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) || 71 (!AllowSwitchFromAlu && CurInstKind == IDAlu))) { 72 // try to pick ALU 73 SU = pickAlu(); 74 if (!SU && !PhysicalRegCopy.empty()) { 75 SU = PhysicalRegCopy.front(); 76 PhysicalRegCopy.erase(PhysicalRegCopy.begin()); 77 } 78 if (SU) { 79 if (CurEmitted >= InstKindLimit[IDAlu]) 80 CurEmitted = 0; 81 NextInstKind = IDAlu; 82 } 83 } 84 85 if (!SU) { 86 // try to pick FETCH 87 SU = pickOther(IDFetch); 88 if (SU) 89 NextInstKind = IDFetch; 90 } 91 92 // try to pick other 93 if (!SU) { 94 SU = pickOther(IDOther); 95 if (SU) 96 NextInstKind = IDOther; 97 } 98 99 // We want to schedule the AR uses as late as possible to make sure that 100 // the AR defs have been released. 101 if (!SU && !UnscheduledARUses.empty()) { 102 SU = UnscheduledARUses[0]; 103 UnscheduledARUses.erase(UnscheduledARUses.begin()); 104 NextInstKind = IDAlu; 105 } 106 107 108 DEBUG( 109 if (SU) { 110 dbgs() << " ** Pick node **\n"; 111 SU->dump(DAG); 112 } else { 113 dbgs() << "NO NODE \n"; 114 for (unsigned i = 0; i < DAG->SUnits.size(); i++) { 115 const SUnit &S = DAG->SUnits[i]; 116 if (!S.isScheduled) 117 S.dump(DAG); 118 } 119 } 120 ); 121 122 return SU; 123} 124 125void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { 126 if (NextInstKind != CurInstKind) { 127 DEBUG(dbgs() << "Instruction Type Switch\n"); 128 if (NextInstKind != IDAlu) 129 OccupedSlotsMask = 15; 130 CurEmitted = 0; 131 CurInstKind = NextInstKind; 132 } 133 134 if (CurInstKind == IDAlu) { 135 switch (getAluKind(SU)) { 136 case AluT_XYZW: 137 CurEmitted += 4; 138 break; 139 case AluDiscarded: 140 break; 141 default: { 142 ++CurEmitted; 143 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(), 144 E = SU->getInstr()->operands_end(); It != E; ++It) { 145 MachineOperand &MO = *It; 146 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X) 147 ++CurEmitted; 148 } 149 } 150 } 151 } else { 152 ++CurEmitted; 153 } 154 155 156 DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n"); 157 158 if (CurInstKind != IDFetch) { 159 MoveUnits(Pending[IDFetch], Available[IDFetch]); 160 } 161} 162 163static bool 164isPhysicalRegCopy(MachineInstr *MI) { 165 if (MI->getOpcode() != AMDGPU::COPY) 166 return false; 167 168 return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()); 169} 170 171void R600SchedStrategy::releaseTopNode(SUnit *SU) { 172 DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG);); 173} 174 175void R600SchedStrategy::releaseBottomNode(SUnit *SU) { 176 DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG);); 177 if (isPhysicalRegCopy(SU->getInstr())) { 178 PhysicalRegCopy.push_back(SU); 179 return; 180 } 181 182 int IK = getInstKind(SU); 183 184 // Check for AR register defines 185 for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(), 186 E = SU->getInstr()->operands_end(); 187 I != E; ++I) { 188 if (I->isReg() && I->getReg() == AMDGPU::AR_X) { 189 if (I->isDef()) { 190 UnscheduledARDefs.push_back(SU); 191 } else { 192 UnscheduledARUses.push_back(SU); 193 } 194 return; 195 } 196 } 197 198 // There is no export clause, we can schedule one as soon as its ready 199 if (IK == IDOther) 200 Available[IDOther].push_back(SU); 201 else 202 Pending[IK].push_back(SU); 203 204} 205 206bool R600SchedStrategy::regBelongsToClass(unsigned Reg, 207 const TargetRegisterClass *RC) const { 208 if (!TargetRegisterInfo::isVirtualRegister(Reg)) { 209 return RC->contains(Reg); 210 } else { 211 return MRI->getRegClass(Reg) == RC; 212 } 213} 214 215R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { 216 MachineInstr *MI = SU->getInstr(); 217 218 switch (MI->getOpcode()) { 219 case AMDGPU::PRED_X: 220 return AluPredX; 221 case AMDGPU::INTERP_PAIR_XY: 222 case AMDGPU::INTERP_PAIR_ZW: 223 case AMDGPU::INTERP_VEC_LOAD: 224 case AMDGPU::DOT_4: 225 return AluT_XYZW; 226 case AMDGPU::COPY: 227 if (MI->getOperand(1).isUndef()) { 228 // MI will become a KILL, don't considers it in scheduling 229 return AluDiscarded; 230 } 231 default: 232 break; 233 } 234 235 // Does the instruction take a whole IG ? 236 if(TII->isVector(*MI) || 237 TII->isCubeOp(MI->getOpcode()) || 238 TII->isReductionOp(MI->getOpcode())) 239 return AluT_XYZW; 240 241 // Is the result already assigned to a channel ? 242 unsigned DestSubReg = MI->getOperand(0).getSubReg(); 243 switch (DestSubReg) { 244 case AMDGPU::sub0: 245 return AluT_X; 246 case AMDGPU::sub1: 247 return AluT_Y; 248 case AMDGPU::sub2: 249 return AluT_Z; 250 case AMDGPU::sub3: 251 return AluT_W; 252 default: 253 break; 254 } 255 256 // Is the result already member of a X/Y/Z/W class ? 257 unsigned DestReg = MI->getOperand(0).getReg(); 258 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) || 259 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass)) 260 return AluT_X; 261 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass)) 262 return AluT_Y; 263 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass)) 264 return AluT_Z; 265 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass)) 266 return AluT_W; 267 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass)) 268 return AluT_XYZW; 269 270 return AluAny; 271 272} 273 274int R600SchedStrategy::getInstKind(SUnit* SU) { 275 int Opcode = SU->getInstr()->getOpcode(); 276 277 if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode)) 278 return IDFetch; 279 280 if (TII->isALUInstr(Opcode)) { 281 return IDAlu; 282 } 283 284 switch (Opcode) { 285 case AMDGPU::PRED_X: 286 case AMDGPU::COPY: 287 case AMDGPU::CONST_COPY: 288 case AMDGPU::INTERP_PAIR_XY: 289 case AMDGPU::INTERP_PAIR_ZW: 290 case AMDGPU::INTERP_VEC_LOAD: 291 case AMDGPU::DOT_4: 292 return IDAlu; 293 default: 294 return IDOther; 295 } 296} 297 298SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) { 299 if (Q.empty()) 300 return NULL; 301 for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend(); 302 It != E; ++It) { 303 SUnit *SU = *It; 304 InstructionsGroupCandidate.push_back(SU->getInstr()); 305 if (TII->canBundle(InstructionsGroupCandidate)) { 306 InstructionsGroupCandidate.pop_back(); 307 Q.erase((It + 1).base()); 308 return SU; 309 } else { 310 InstructionsGroupCandidate.pop_back(); 311 } 312 } 313 return NULL; 314} 315 316void R600SchedStrategy::LoadAlu() { 317 std::vector<SUnit *> &QSrc = Pending[IDAlu]; 318 for (unsigned i = 0, e = QSrc.size(); i < e; ++i) { 319 AluKind AK = getAluKind(QSrc[i]); 320 AvailableAlus[AK].push_back(QSrc[i]); 321 } 322 QSrc.clear(); 323} 324 325void R600SchedStrategy::PrepareNextSlot() { 326 DEBUG(dbgs() << "New Slot\n"); 327 assert (OccupedSlotsMask && "Slot wasn't filled"); 328 OccupedSlotsMask = 0; 329 InstructionsGroupCandidate.clear(); 330 LoadAlu(); 331} 332 333void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { 334 unsigned DestReg = MI->getOperand(0).getReg(); 335 // PressureRegister crashes if an operand is def and used in the same inst 336 // and we try to constraint its regclass 337 for (MachineInstr::mop_iterator It = MI->operands_begin(), 338 E = MI->operands_end(); It != E; ++It) { 339 MachineOperand &MO = *It; 340 if (MO.isReg() && !MO.isDef() && 341 MO.getReg() == MI->getOperand(0).getReg()) 342 return; 343 } 344 // Constrains the regclass of DestReg to assign it to Slot 345 switch (Slot) { 346 case 0: 347 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass); 348 break; 349 case 1: 350 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass); 351 break; 352 case 2: 353 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass); 354 break; 355 case 3: 356 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass); 357 break; 358 } 359} 360 361SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) { 362 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W}; 363 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]); 364 if (SlotedSU) 365 return SlotedSU; 366 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]); 367 if (UnslotedSU) 368 AssignSlot(UnslotedSU->getInstr(), Slot); 369 return UnslotedSU; 370} 371 372bool R600SchedStrategy::isAvailablesAluEmpty() const { 373 return Pending[IDAlu].empty() && AvailableAlus[AluAny].empty() && 374 AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() && 375 AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() && 376 AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty() && 377 AvailableAlus[AluPredX].empty(); 378} 379 380SUnit* R600SchedStrategy::pickAlu() { 381 while (!isAvailablesAluEmpty()) { 382 if (!OccupedSlotsMask) { 383 // Bottom up scheduling : predX must comes first 384 if (!AvailableAlus[AluPredX].empty()) { 385 OccupedSlotsMask = 15; 386 return PopInst(AvailableAlus[AluPredX]); 387 } 388 // Flush physical reg copies (RA will discard them) 389 if (!AvailableAlus[AluDiscarded].empty()) { 390 OccupedSlotsMask = 15; 391 return PopInst(AvailableAlus[AluDiscarded]); 392 } 393 // If there is a T_XYZW alu available, use it 394 if (!AvailableAlus[AluT_XYZW].empty()) { 395 OccupedSlotsMask = 15; 396 return PopInst(AvailableAlus[AluT_XYZW]); 397 } 398 } 399 for (int Chan = 3; Chan > -1; --Chan) { 400 bool isOccupied = OccupedSlotsMask & (1 << Chan); 401 if (!isOccupied) { 402 SUnit *SU = AttemptFillSlot(Chan); 403 if (SU) { 404 OccupedSlotsMask |= (1 << Chan); 405 InstructionsGroupCandidate.push_back(SU->getInstr()); 406 return SU; 407 } 408 } 409 } 410 PrepareNextSlot(); 411 } 412 return NULL; 413} 414 415SUnit* R600SchedStrategy::pickOther(int QID) { 416 SUnit *SU = 0; 417 std::vector<SUnit *> &AQ = Available[QID]; 418 419 if (AQ.empty()) { 420 MoveUnits(Pending[QID], AQ); 421 } 422 if (!AQ.empty()) { 423 SU = AQ.back(); 424 AQ.resize(AQ.size() - 1); 425 } 426 return SU; 427} 428 429