1//===-- SIMachineFunctionInfo.cpp - SI Machine Function Info -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// \file
9//===----------------------------------------------------------------------===//
10
11
12#include "SIMachineFunctionInfo.h"
13#include "SIInstrInfo.h"
14#include "SIRegisterInfo.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/IR/Function.h"
17#include "llvm/IR/LLVMContext.h"
18
19#define MAX_LANES 64
20
21using namespace llvm;
22
23
24// Pin the vtable to this file.
25void SIMachineFunctionInfo::anchor() {}
26
27SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
28  : AMDGPUMachineFunction(MF),
29    PSInputAddr(0),
30    SpillTracker() { }
31
32static unsigned createLaneVGPR(MachineRegisterInfo &MRI, MachineFunction *MF) {
33  unsigned VGPR = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
34
35  // We need to add this register as live out for the function, in order to
36  // have the live range calculated directly.
37  //
38  // When register spilling begins, we have already calculated the live
39  // live intervals for all the registers.  Since we are spilling SGPRs to
40  // VGPRs, we need to update the Lane VGPR's live interval every time we
41  // spill or restore a register.
42  //
43  // Unfortunately, there is no good way to update the live interval as
44  // the TargetInstrInfo callbacks for spilling and restoring don't give
45  // us access to the live interval information.
46  //
47  // We are lucky, though, because the InlineSpiller calls
48  // LiveRangeEdit::calculateRegClassAndHint() which iterates through
49  // all the new register that have been created when restoring a register
50  // and calls LiveIntervals::getInterval(), which creates and computes
51  // the live interval for the newly created register.  However, once this
52  // live intervals is created, it doesn't change and since we usually reuse
53  // the Lane VGPR multiple times, this means any uses after the first aren't
54  // added to the live interval.
55  //
56  // To work around this, we add Lane VGPRs to the functions live out list,
57  // so that we can guarantee its live range will cover all of its uses.
58
59  for (MachineBasicBlock &MBB : *MF) {
60    if (MBB.back().getOpcode() == AMDGPU::S_ENDPGM) {
61      MBB.back().addOperand(*MF, MachineOperand::CreateReg(VGPR, false, true));
62      return VGPR;
63    }
64  }
65
66  LLVMContext &Ctx = MF->getFunction()->getContext();
67  Ctx.emitError("Could not find S_ENDPGM instruction.");
68
69  return VGPR;
70}
71
72unsigned SIMachineFunctionInfo::RegSpillTracker::reserveLanes(
73    MachineRegisterInfo &MRI, MachineFunction *MF, unsigned NumRegs) {
74  unsigned StartLane = CurrentLane;
75  CurrentLane += NumRegs;
76  if (!LaneVGPR) {
77    LaneVGPR = createLaneVGPR(MRI, MF);
78  } else {
79    if (CurrentLane >= MAX_LANES) {
80      StartLane = CurrentLane = 0;
81      LaneVGPR = createLaneVGPR(MRI, MF);
82    }
83  }
84  return StartLane;
85}
86
87void SIMachineFunctionInfo::RegSpillTracker::addSpilledReg(unsigned FrameIndex,
88                                                           unsigned Reg,
89                                                           int Lane) {
90  SpilledRegisters[FrameIndex] = SpilledReg(Reg, Lane);
91}
92
93const SIMachineFunctionInfo::SpilledReg&
94SIMachineFunctionInfo::RegSpillTracker::getSpilledReg(unsigned FrameIndex) {
95  return SpilledRegisters[FrameIndex];
96}
97