InterferenceCache.cpp revision 6ef7da0197735a16aa534e9e2c80709d3d6e8c56
1//===-- InterferenceCache.cpp - Caching per-block interference ---------*--===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// InterferenceCache remembers per-block interference in LiveIntervalUnions.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "regalloc"
15#include "InterferenceCache.h"
16#include "llvm/Target/TargetRegisterInfo.h"
17#include "llvm/Support/ErrorHandling.h"
18#include "llvm/CodeGen/LiveIntervalAnalysis.h"
19
20using namespace llvm;
21
22// Static member used for null interference cursors.
23InterferenceCache::BlockInterference InterferenceCache::Cursor::NoInterference;
24
25void InterferenceCache::init(MachineFunction *mf,
26                             LiveIntervalUnion *liuarray,
27                             SlotIndexes *indexes,
28                             LiveIntervals *lis,
29                             const TargetRegisterInfo *tri) {
30  MF = mf;
31  LIUArray = liuarray;
32  TRI = tri;
33  PhysRegEntries.assign(TRI->getNumRegs(), 0);
34  for (unsigned i = 0; i != CacheEntries; ++i)
35    Entries[i].clear(mf, indexes, lis);
36}
37
38InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
39  unsigned E = PhysRegEntries[PhysReg];
40  if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
41    if (!Entries[E].valid(LIUArray, TRI))
42      Entries[E].revalidate();
43    return &Entries[E];
44  }
45  // No valid entry exists, pick the next round-robin entry.
46  E = RoundRobin;
47  if (++RoundRobin == CacheEntries)
48    RoundRobin = 0;
49  for (unsigned i = 0; i != CacheEntries; ++i) {
50    // Skip entries that are in use.
51    if (Entries[E].hasRefs()) {
52      if (++E == CacheEntries)
53        E = 0;
54      continue;
55    }
56    Entries[E].reset(PhysReg, LIUArray, TRI, MF);
57    PhysRegEntries[PhysReg] = E;
58    return &Entries[E];
59  }
60  llvm_unreachable("Ran out of interference cache entries.");
61}
62
63/// revalidate - LIU contents have changed, update tags.
64void InterferenceCache::Entry::revalidate() {
65  // Invalidate all block entries.
66  ++Tag;
67  // Invalidate all iterators.
68  PrevPos = SlotIndex();
69  for (unsigned i = 0, e = Aliases.size(); i != e; ++i)
70    Aliases[i].second = Aliases[i].first->getTag();
71}
72
73void InterferenceCache::Entry::reset(unsigned physReg,
74                                     LiveIntervalUnion *LIUArray,
75                                     const TargetRegisterInfo *TRI,
76                                     const MachineFunction *MF) {
77  assert(!hasRefs() && "Cannot reset cache entry with references");
78  // LIU's changed, invalidate cache.
79  ++Tag;
80  PhysReg = physReg;
81  Blocks.resize(MF->getNumBlockIDs());
82  Aliases.clear();
83  for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) {
84    LiveIntervalUnion *LIU = LIUArray + *AS;
85    Aliases.push_back(std::make_pair(LIU, LIU->getTag()));
86  }
87
88  // Reset iterators.
89  PrevPos = SlotIndex();
90  unsigned e = Aliases.size();
91  Iters.resize(e);
92  for (unsigned i = 0; i != e; ++i)
93    Iters[i].setMap(Aliases[i].first->getMap());
94}
95
96bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
97                                     const TargetRegisterInfo *TRI) {
98  unsigned i = 0, e = Aliases.size();
99  for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) {
100    LiveIntervalUnion *LIU = LIUArray + *AS;
101    if (i == e ||  Aliases[i].first != LIU)
102      return false;
103    if (LIU->changedSince(Aliases[i].second))
104      return false;
105  }
106  return i == e;
107}
108
109// Test if a register mask clobbers PhysReg.
110static inline bool maskClobber(const uint32_t *Mask, unsigned PhysReg) {
111  return !(Mask[PhysReg/32] & (1u << PhysReg%32));
112}
113
114void InterferenceCache::Entry::update(unsigned MBBNum) {
115  SlotIndex Start, Stop;
116  tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
117
118  // Use advanceTo only when possible.
119  if (PrevPos != Start) {
120    if (!PrevPos.isValid() || Start < PrevPos)
121      for (unsigned i = 0, e = Iters.size(); i != e; ++i)
122        Iters[i].find(Start);
123    else
124      for (unsigned i = 0, e = Iters.size(); i != e; ++i)
125        Iters[i].advanceTo(Start);
126    PrevPos = Start;
127  }
128
129  MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum);
130  BlockInterference *BI = &Blocks[MBBNum];
131  ArrayRef<SlotIndex> RegMaskSlots;
132  ArrayRef<const uint32_t*> RegMaskBits;
133  for (;;) {
134    BI->Tag = Tag;
135    BI->First = BI->Last = SlotIndex();
136
137    // Check for first interference.
138    for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
139      Iter &I = Iters[i];
140      if (!I.valid())
141        continue;
142      SlotIndex StartI = I.start();
143      if (StartI >= Stop)
144        continue;
145      if (!BI->First.isValid() || StartI < BI->First)
146        BI->First = StartI;
147    }
148
149    // Also check for register mask interference.
150    RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
151    RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
152    SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
153    for (unsigned i = 0, e = RegMaskSlots.size();
154         i != e && RegMaskSlots[i] < Limit; ++i)
155      if (maskClobber(RegMaskBits[i], PhysReg)) {
156        // Register mask i clobbers PhysReg before the LIU interference.
157        BI->First = RegMaskSlots[i];
158        break;
159      }
160
161    PrevPos = Stop;
162    if (BI->First.isValid())
163      break;
164
165    // No interference in this block? Go ahead and precompute the next block.
166    if (++MFI == MF->end())
167      return;
168    MBBNum = MFI->getNumber();
169    BI = &Blocks[MBBNum];
170    if (BI->Tag == Tag)
171      return;
172    tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
173  }
174
175  // Check for last interference in block.
176  for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
177    Iter &I = Iters[i];
178    if (!I.valid() || I.start() >= Stop)
179      continue;
180    I.advanceTo(Stop);
181    bool Backup = !I.valid() || I.start() >= Stop;
182    if (Backup)
183      --I;
184    SlotIndex StopI = I.stop();
185    if (!BI->Last.isValid() || StopI > BI->Last)
186      BI->Last = StopI;
187    if (Backup)
188      ++I;
189  }
190
191  // Also check for register mask interference.
192  SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
193  for (unsigned i = RegMaskSlots.size(); i && RegMaskSlots[i-1] > Limit; --i)
194    if (maskClobber(RegMaskBits[i-1], PhysReg)) {
195      // Register mask i-1 clobbers PhysReg after the LIU interference.
196      // Model the regmask clobber as a dead def.
197      BI->Last = RegMaskSlots[i-1].getDeadSlot();
198      break;
199    }
200}
201