X86VZeroUpper.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- X86VZeroUpper.cpp - AVX vzeroupper instruction inserter -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the pass which inserts x86 AVX vzeroupper instructions
11// before calls to SSE encoded functions. This avoids transition latency
12// penalty when tranfering control between AVX encoded instructions and old
13// SSE encoding mode.
14//
15//===----------------------------------------------------------------------===//
16
17#define DEBUG_TYPE "x86-vzeroupper"
18#include "X86.h"
19#include "X86InstrInfo.h"
20#include "X86Subtarget.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/Passes.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/Target/TargetInstrInfo.h"
29using namespace llvm;
30
31STATISTIC(NumVZU, "Number of vzeroupper instructions inserted");
32
33namespace {
34
35  class VZeroUpperInserter : public MachineFunctionPass {
36  public:
37
38    VZeroUpperInserter() : MachineFunctionPass(ID) {}
39    bool runOnMachineFunction(MachineFunction &MF) override;
40    const char *getPassName() const override {return "X86 vzeroupper inserter";}
41
42  private:
43
44    void processBasicBlock(MachineBasicBlock &MBB);
45    void insertVZeroUpper(MachineBasicBlock::iterator I,
46                          MachineBasicBlock &MBB);
47    void addDirtySuccessor(MachineBasicBlock &MBB);
48
49    typedef enum { PASS_THROUGH, EXITS_CLEAN, EXITS_DIRTY } BlockExitState;
50    static const char* getBlockExitStateName(BlockExitState ST);
51
52    // Core algorithm state:
53    // BlockState - Each block is either:
54    //   - PASS_THROUGH: There are neither YMM dirtying instructions nor
55    //                   vzeroupper instructions in this block.
56    //   - EXITS_CLEAN: There is (or will be) a vzeroupper instruction in this
57    //                  block that will ensure that YMM is clean on exit.
58    //   - EXITS_DIRTY: An instruction in the block dirties YMM and no
59    //                  subsequent vzeroupper in the block clears it.
60    //
61    // AddedToDirtySuccessors - This flag is raised when a block is added to the
62    //                          DirtySuccessors list to ensure that it's not
63    //                          added multiple times.
64    //
65    // FirstUnguardedCall - Records the location of the first unguarded call in
66    //                      each basic block that may need to be guarded by a
67    //                      vzeroupper. We won't know whether it actually needs
68    //                      to be guarded until we discover a predecessor that
69    //                      is DIRTY_OUT.
70    struct BlockState {
71      BlockState() : ExitState(PASS_THROUGH), AddedToDirtySuccessors(false) {}
72      BlockExitState ExitState;
73      bool AddedToDirtySuccessors;
74      MachineBasicBlock::iterator FirstUnguardedCall;
75    };
76    typedef SmallVector<BlockState, 8> BlockStateMap;
77    typedef SmallVector<MachineBasicBlock*, 8> DirtySuccessorsWorkList;
78
79    BlockStateMap BlockStates;
80    DirtySuccessorsWorkList DirtySuccessors;
81    bool EverMadeChange;
82    const TargetInstrInfo *TII;
83
84    static char ID;
85  };
86
87  char VZeroUpperInserter::ID = 0;
88}
89
90FunctionPass *llvm::createX86IssueVZeroUpperPass() {
91  return new VZeroUpperInserter();
92}
93
94const char* VZeroUpperInserter::getBlockExitStateName(BlockExitState ST) {
95  switch (ST) {
96    case PASS_THROUGH: return "Pass-through";
97    case EXITS_DIRTY: return "Exits-dirty";
98    case EXITS_CLEAN: return "Exits-clean";
99  }
100  llvm_unreachable("Invalid block exit state.");
101}
102
103static bool isYmmReg(unsigned Reg) {
104  return (Reg >= X86::YMM0 && Reg <= X86::YMM15);
105}
106
107static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) {
108  for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(),
109       E = MRI.livein_end(); I != E; ++I)
110    if (isYmmReg(I->first))
111      return true;
112
113  return false;
114}
115
116static bool clobbersAllYmmRegs(const MachineOperand &MO) {
117  for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
118    if (!MO.clobbersPhysReg(reg))
119      return false;
120  }
121  return true;
122}
123
124static bool hasYmmReg(MachineInstr *MI) {
125  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
126    const MachineOperand &MO = MI->getOperand(i);
127    if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
128      return true;
129    if (!MO.isReg())
130      continue;
131    if (MO.isDebug())
132      continue;
133    if (isYmmReg(MO.getReg()))
134      return true;
135  }
136  return false;
137}
138
139/// clobbersAnyYmmReg() - Check if any YMM register will be clobbered by this
140/// instruction.
141static bool callClobbersAnyYmmReg(MachineInstr *MI) {
142  assert(MI->isCall() && "Can only be called on call instructions.");
143  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
144    const MachineOperand &MO = MI->getOperand(i);
145    if (!MO.isRegMask())
146      continue;
147    for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
148      if (MO.clobbersPhysReg(reg))
149        return true;
150    }
151  }
152  return false;
153}
154
155// Insert a vzeroupper instruction before I.
156void VZeroUpperInserter::insertVZeroUpper(MachineBasicBlock::iterator I,
157                                              MachineBasicBlock &MBB) {
158  DebugLoc dl = I->getDebugLoc();
159  BuildMI(MBB, I, dl, TII->get(X86::VZEROUPPER));
160  ++NumVZU;
161  EverMadeChange = true;
162}
163
164// Add MBB to the DirtySuccessors list if it hasn't already been added.
165void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
166  if (!BlockStates[MBB.getNumber()].AddedToDirtySuccessors) {
167    DirtySuccessors.push_back(&MBB);
168    BlockStates[MBB.getNumber()].AddedToDirtySuccessors = true;
169  }
170}
171
172/// processBasicBlock - Loop over all of the instructions in the basic block,
173/// inserting vzero upper instructions before function calls.
174void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
175
176  // Start by assuming that the block PASS_THROUGH, which implies no unguarded
177  // calls.
178  BlockExitState CurState = PASS_THROUGH;
179  BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end();
180
181  for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
182    MachineInstr *MI = I;
183    bool isControlFlow = MI->isCall() || MI->isReturn();
184
185    // Shortcut: don't need to check regular instructions in dirty state.
186    if (!isControlFlow && CurState == EXITS_DIRTY)
187      continue;
188
189    if (hasYmmReg(MI)) {
190      // We found a ymm-using instruction; this could be an AVX instruction,
191      // or it could be control flow.
192      CurState = EXITS_DIRTY;
193      continue;
194    }
195
196    // Check for control-flow out of the current function (which might
197    // indirectly execute SSE instructions).
198    if (!isControlFlow)
199      continue;
200
201    // If the call won't clobber any YMM register, skip it as well. It usually
202    // happens on helper function calls (such as '_chkstk', '_ftol2') where
203    // standard calling convention is not used (RegMask is not used to mark
204    // register clobbered and register usage (def/imp-def/use) is well-dfined
205    // and explicitly specified.
206    if (MI->isCall() && !callClobbersAnyYmmReg(MI))
207      continue;
208
209    // The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
210    // registers. This instruction has zero latency. In addition, the processor
211    // changes back to Clean state, after which execution of Intel SSE
212    // instructions or Intel AVX instructions has no transition penalty. Add
213    // the VZEROUPPER instruction before any function call/return that might
214    // execute SSE code.
215    // FIXME: In some cases, we may want to move the VZEROUPPER into a
216    // predecessor block.
217    if (CurState == EXITS_DIRTY) {
218      // After the inserted VZEROUPPER the state becomes clean again, but
219      // other YMM may appear before other subsequent calls or even before
220      // the end of the BB.
221      insertVZeroUpper(I, MBB);
222      CurState = EXITS_CLEAN;
223    } else if (CurState == PASS_THROUGH) {
224      // If this block is currently in pass-through state and we encounter a
225      // call then whether we need a vzeroupper or not depends on whether this
226      // block has successors that exit dirty. Record the location of the call,
227      // and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet.
228      // It will be inserted later if necessary.
229      BlockStates[MBB.getNumber()].FirstUnguardedCall = I;
230      CurState = EXITS_CLEAN;
231    }
232  }
233
234  DEBUG(dbgs() << "MBB #" << MBB.getNumber() << " exit state: "
235               << getBlockExitStateName(CurState) << '\n');
236
237  if (CurState == EXITS_DIRTY)
238    for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
239                                          SE = MBB.succ_end();
240         SI != SE; ++SI)
241      addDirtySuccessor(**SI);
242
243  BlockStates[MBB.getNumber()].ExitState = CurState;
244}
245
246/// runOnMachineFunction - Loop over all of the basic blocks, inserting
247/// vzero upper instructions before function calls.
248bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
249  if (MF.getTarget().getSubtarget<X86Subtarget>().hasAVX512())
250    return false;
251  TII = MF.getTarget().getInstrInfo();
252  MachineRegisterInfo &MRI = MF.getRegInfo();
253  EverMadeChange = false;
254
255  // Fast check: if the function doesn't use any ymm registers, we don't need
256  // to insert any VZEROUPPER instructions.  This is constant-time, so it is
257  // cheap in the common case of no ymm use.
258  bool YMMUsed = false;
259  const TargetRegisterClass *RC = &X86::VR256RegClass;
260  for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
261       i != e; i++) {
262    if (!MRI.reg_nodbg_empty(*i)) {
263      YMMUsed = true;
264      break;
265    }
266  }
267  if (!YMMUsed) {
268    return false;
269  }
270
271  assert(BlockStates.empty() && DirtySuccessors.empty() &&
272         "X86VZeroUpper state should be clear");
273  BlockStates.resize(MF.getNumBlockIDs());
274
275  // Process all blocks. This will compute block exit states, record the first
276  // unguarded call in each block, and add successors of dirty blocks to the
277  // DirtySuccessors list.
278  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
279    processBasicBlock(*I);
280
281  // If any YMM regs are live in to this function, add the entry block to the
282  // DirtySuccessors list
283  if (checkFnHasLiveInYmm(MRI))
284    addDirtySuccessor(MF.front());
285
286  // Re-visit all blocks that are successors of EXITS_DIRTY bsocks. Add
287  // vzeroupper instructions to unguarded calls, and propagate EXITS_DIRTY
288  // through PASS_THROUGH blocks.
289  while (!DirtySuccessors.empty()) {
290    MachineBasicBlock &MBB = *DirtySuccessors.back();
291    DirtySuccessors.pop_back();
292    BlockState &BBState = BlockStates[MBB.getNumber()];
293
294    // MBB is a successor of a dirty block, so its first call needs to be
295    // guarded.
296    if (BBState.FirstUnguardedCall != MBB.end())
297      insertVZeroUpper(BBState.FirstUnguardedCall, MBB);
298
299    // If this successor was a pass-through block then it is now dirty, and its
300    // successors need to be added to the worklist (if they haven't been
301    // already).
302    if (BBState.ExitState == PASS_THROUGH) {
303      DEBUG(dbgs() << "MBB #" << MBB.getNumber()
304                   << " was Pass-through, is now Dirty-out.\n");
305      for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
306                                            SE = MBB.succ_end();
307           SI != SE; ++SI)
308        addDirtySuccessor(**SI);
309    }
310  }
311
312  BlockStates.clear();
313  return EverMadeChange;
314}
315