SystemZInstrInfo.cpp revision e39a156b921f47a374f091b43205555ee90cd555
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the SystemZ implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SystemZInstrInfo.h"
15#include "SystemZTargetMachine.h"
16#include "SystemZInstrBuilder.h"
17#include "llvm/CodeGen/LiveVariables.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19
20#define GET_INSTRINFO_CTOR
21#define GET_INSTRMAP_INFO
22#include "SystemZGenInstrInfo.inc"
23
24using namespace llvm;
25
26// Return a mask with Count low bits set.
27static uint64_t allOnes(unsigned int Count) {
28  return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
29}
30
31SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
32  : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
33    RI(tm), TM(tm) {
34}
35
36// MI is a 128-bit load or store.  Split it into two 64-bit loads or stores,
37// each having the opcode given by NewOpcode.
38void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
39                                 unsigned NewOpcode) const {
40  MachineBasicBlock *MBB = MI->getParent();
41  MachineFunction &MF = *MBB->getParent();
42
43  // Get two load or store instructions.  Use the original instruction for one
44  // of them (arbitarily the second here) and create a clone for the other.
45  MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
46  MBB->insert(MI, EarlierMI);
47
48  // Set up the two 64-bit registers.
49  MachineOperand &HighRegOp = EarlierMI->getOperand(0);
50  MachineOperand &LowRegOp = MI->getOperand(0);
51  HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high));
52  LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low));
53
54  // The address in the first (high) instruction is already correct.
55  // Adjust the offset in the second (low) instruction.
56  MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
57  MachineOperand &LowOffsetOp = MI->getOperand(2);
58  LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
59
60  // Set the opcodes.
61  unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
62  unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
63  assert(HighOpcode && LowOpcode && "Both offsets should be in range");
64
65  EarlierMI->setDesc(get(HighOpcode));
66  MI->setDesc(get(LowOpcode));
67}
68
69// Split ADJDYNALLOC instruction MI.
70void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
71  MachineBasicBlock *MBB = MI->getParent();
72  MachineFunction &MF = *MBB->getParent();
73  MachineFrameInfo *MFFrame = MF.getFrameInfo();
74  MachineOperand &OffsetMO = MI->getOperand(2);
75
76  uint64_t Offset = (MFFrame->getMaxCallFrameSize() +
77                     SystemZMC::CallFrameSize +
78                     OffsetMO.getImm());
79  unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
80  assert(NewOpcode && "No support for huge argument lists yet");
81  MI->setDesc(get(NewOpcode));
82  OffsetMO.setImm(Offset);
83}
84
85// If MI is a simple load or store for a frame object, return the register
86// it loads or stores and set FrameIndex to the index of the frame object.
87// Return 0 otherwise.
88//
89// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
90static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
91                        unsigned Flag) {
92  const MCInstrDesc &MCID = MI->getDesc();
93  if ((MCID.TSFlags & Flag) &&
94      MI->getOperand(1).isFI() &&
95      MI->getOperand(2).getImm() == 0 &&
96      MI->getOperand(3).getReg() == 0) {
97    FrameIndex = MI->getOperand(1).getIndex();
98    return MI->getOperand(0).getReg();
99  }
100  return 0;
101}
102
103unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
104                                               int &FrameIndex) const {
105  return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
106}
107
108unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
109                                              int &FrameIndex) const {
110  return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
111}
112
113bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
114                                       int &DestFrameIndex,
115                                       int &SrcFrameIndex) const {
116  // Check for MVC 0(Length,FI1),0(FI2)
117  const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
118  if (MI->getOpcode() != SystemZ::MVC ||
119      !MI->getOperand(0).isFI() ||
120      MI->getOperand(1).getImm() != 0 ||
121      !MI->getOperand(3).isFI() ||
122      MI->getOperand(4).getImm() != 0)
123    return false;
124
125  // Check that Length covers the full slots.
126  int64_t Length = MI->getOperand(2).getImm();
127  unsigned FI1 = MI->getOperand(0).getIndex();
128  unsigned FI2 = MI->getOperand(3).getIndex();
129  if (MFI->getObjectSize(FI1) != Length ||
130      MFI->getObjectSize(FI2) != Length)
131    return false;
132
133  DestFrameIndex = FI1;
134  SrcFrameIndex = FI2;
135  return true;
136}
137
138bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
139                                     MachineBasicBlock *&TBB,
140                                     MachineBasicBlock *&FBB,
141                                     SmallVectorImpl<MachineOperand> &Cond,
142                                     bool AllowModify) const {
143  // Most of the code and comments here are boilerplate.
144
145  // Start from the bottom of the block and work up, examining the
146  // terminator instructions.
147  MachineBasicBlock::iterator I = MBB.end();
148  while (I != MBB.begin()) {
149    --I;
150    if (I->isDebugValue())
151      continue;
152
153    // Working from the bottom, when we see a non-terminator instruction, we're
154    // done.
155    if (!isUnpredicatedTerminator(I))
156      break;
157
158    // A terminator that isn't a branch can't easily be handled by this
159    // analysis.
160    if (!I->isBranch())
161      return true;
162
163    // Can't handle indirect branches.
164    SystemZII::Branch Branch(getBranchInfo(I));
165    if (!Branch.Target->isMBB())
166      return true;
167
168    // Punt on compound branches.
169    if (Branch.Type != SystemZII::BranchNormal)
170      return true;
171
172    if (Branch.CCMask == SystemZ::CCMASK_ANY) {
173      // Handle unconditional branches.
174      if (!AllowModify) {
175        TBB = Branch.Target->getMBB();
176        continue;
177      }
178
179      // If the block has any instructions after a JMP, delete them.
180      while (llvm::next(I) != MBB.end())
181        llvm::next(I)->eraseFromParent();
182
183      Cond.clear();
184      FBB = 0;
185
186      // Delete the JMP if it's equivalent to a fall-through.
187      if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
188        TBB = 0;
189        I->eraseFromParent();
190        I = MBB.end();
191        continue;
192      }
193
194      // TBB is used to indicate the unconditinal destination.
195      TBB = Branch.Target->getMBB();
196      continue;
197    }
198
199    // Working from the bottom, handle the first conditional branch.
200    if (Cond.empty()) {
201      // FIXME: add X86-style branch swap
202      FBB = TBB;
203      TBB = Branch.Target->getMBB();
204      Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
205      Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
206      continue;
207    }
208
209    // Handle subsequent conditional branches.
210    assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
211
212    // Only handle the case where all conditional branches branch to the same
213    // destination.
214    if (TBB != Branch.Target->getMBB())
215      return true;
216
217    // If the conditions are the same, we can leave them alone.
218    unsigned OldCCValid = Cond[0].getImm();
219    unsigned OldCCMask = Cond[1].getImm();
220    if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
221      continue;
222
223    // FIXME: Try combining conditions like X86 does.  Should be easy on Z!
224    return false;
225  }
226
227  return false;
228}
229
230unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
231  // Most of the code and comments here are boilerplate.
232  MachineBasicBlock::iterator I = MBB.end();
233  unsigned Count = 0;
234
235  while (I != MBB.begin()) {
236    --I;
237    if (I->isDebugValue())
238      continue;
239    if (!I->isBranch())
240      break;
241    if (!getBranchInfo(I).Target->isMBB())
242      break;
243    // Remove the branch.
244    I->eraseFromParent();
245    I = MBB.end();
246    ++Count;
247  }
248
249  return Count;
250}
251
252bool SystemZInstrInfo::
253ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
254  assert(Cond.size() == 2 && "Invalid condition");
255  Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
256  return false;
257}
258
259unsigned
260SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
261                               MachineBasicBlock *FBB,
262                               const SmallVectorImpl<MachineOperand> &Cond,
263                               DebugLoc DL) const {
264  // In this function we output 32-bit branches, which should always
265  // have enough range.  They can be shortened and relaxed by later code
266  // in the pipeline, if desired.
267
268  // Shouldn't be a fall through.
269  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
270  assert((Cond.size() == 2 || Cond.size() == 0) &&
271         "SystemZ branch conditions have one component!");
272
273  if (Cond.empty()) {
274    // Unconditional branch?
275    assert(!FBB && "Unconditional branch with multiple successors!");
276    BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
277    return 1;
278  }
279
280  // Conditional branch.
281  unsigned Count = 0;
282  unsigned CCValid = Cond[0].getImm();
283  unsigned CCMask = Cond[1].getImm();
284  BuildMI(&MBB, DL, get(SystemZ::BRC))
285    .addImm(CCValid).addImm(CCMask).addMBB(TBB);
286  ++Count;
287
288  if (FBB) {
289    // Two-way Conditional branch. Insert the second branch.
290    BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
291    ++Count;
292  }
293  return Count;
294}
295
296bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
297                                      unsigned &SrcReg, unsigned &SrcReg2,
298                                      int &Mask, int &Value) const {
299  assert(MI->isCompare() && "Caller should have checked for a comparison");
300
301  if (MI->getNumExplicitOperands() == 2 &&
302      MI->getOperand(0).isReg() &&
303      MI->getOperand(1).isImm()) {
304    SrcReg = MI->getOperand(0).getReg();
305    SrcReg2 = 0;
306    Value = MI->getOperand(1).getImm();
307    Mask = ~0;
308    return true;
309  }
310
311  return false;
312}
313
314// If Reg is a virtual register, return its definition, otherwise return null.
315static MachineInstr *getDef(unsigned Reg,
316                            const MachineRegisterInfo *MRI) {
317  if (TargetRegisterInfo::isPhysicalRegister(Reg))
318    return 0;
319  return MRI->getUniqueVRegDef(Reg);
320}
321
322// Return true if MI is a shift of type Opcode by Imm bits.
323static bool isShift(MachineInstr *MI, int Opcode, int64_t Imm) {
324  return (MI->getOpcode() == Opcode &&
325          !MI->getOperand(2).getReg() &&
326          MI->getOperand(3).getImm() == Imm);
327}
328
329// If the destination of MI has no uses, delete it as dead.
330static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
331  if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
332    MI->eraseFromParent();
333}
334
335// Compare compares SrcReg against zero.  Check whether SrcReg contains
336// the result of an IPM sequence whose input CC survives until Compare,
337// and whether Compare is therefore redundant.  Delete it and return
338// true if so.
339static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
340                                  const MachineRegisterInfo *MRI,
341                                  const TargetRegisterInfo *TRI) {
342  MachineInstr *LGFR = 0;
343  MachineInstr *RLL = getDef(SrcReg, MRI);
344  if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
345    LGFR = RLL;
346    RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
347  }
348  if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
349    return false;
350
351  MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
352  if (!SRL || !isShift(SRL, SystemZ::SRL, 28))
353    return false;
354
355  MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
356  if (!IPM || IPM->getOpcode() != SystemZ::IPM)
357    return false;
358
359  // Check that there are no assignments to CC between the IPM and Compare,
360  if (IPM->getParent() != Compare->getParent())
361    return false;
362  MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
363  for (++MBBI; MBBI != MBBE; ++MBBI) {
364    MachineInstr *MI = MBBI;
365    if (MI->modifiesRegister(SystemZ::CC, TRI))
366      return false;
367  }
368
369  Compare->eraseFromParent();
370  if (LGFR)
371    eraseIfDead(LGFR, MRI);
372  eraseIfDead(RLL, MRI);
373  eraseIfDead(SRL, MRI);
374  eraseIfDead(IPM, MRI);
375
376  return true;
377}
378
379bool
380SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
381                                       unsigned SrcReg, unsigned SrcReg2,
382                                       int Mask, int Value,
383                                       const MachineRegisterInfo *MRI) const {
384  assert(!SrcReg2 && "Only optimizing constant comparisons so far");
385  bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
386  if (Value == 0 &&
387      !IsLogical &&
388      removeIPMBasedCompare(Compare, SrcReg, MRI, TM.getRegisterInfo()))
389    return true;
390  return false;
391}
392
393// If Opcode is a move that has a conditional variant, return that variant,
394// otherwise return 0.
395static unsigned getConditionalMove(unsigned Opcode) {
396  switch (Opcode) {
397  case SystemZ::LR:  return SystemZ::LOCR;
398  case SystemZ::LGR: return SystemZ::LOCGR;
399  default:           return 0;
400  }
401}
402
403bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
404  unsigned Opcode = MI->getOpcode();
405  if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
406      getConditionalMove(Opcode))
407    return true;
408  return false;
409}
410
411bool SystemZInstrInfo::
412isProfitableToIfCvt(MachineBasicBlock &MBB,
413                    unsigned NumCycles, unsigned ExtraPredCycles,
414                    const BranchProbability &Probability) const {
415  // For now only convert single instructions.
416  return NumCycles == 1;
417}
418
419bool SystemZInstrInfo::
420isProfitableToIfCvt(MachineBasicBlock &TMBB,
421                    unsigned NumCyclesT, unsigned ExtraPredCyclesT,
422                    MachineBasicBlock &FMBB,
423                    unsigned NumCyclesF, unsigned ExtraPredCyclesF,
424                    const BranchProbability &Probability) const {
425  // For now avoid converting mutually-exclusive cases.
426  return false;
427}
428
429bool SystemZInstrInfo::
430PredicateInstruction(MachineInstr *MI,
431                     const SmallVectorImpl<MachineOperand> &Pred) const {
432  assert(Pred.size() == 2 && "Invalid condition");
433  unsigned CCValid = Pred[0].getImm();
434  unsigned CCMask = Pred[1].getImm();
435  assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
436  unsigned Opcode = MI->getOpcode();
437  if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
438    if (unsigned CondOpcode = getConditionalMove(Opcode)) {
439      MI->setDesc(get(CondOpcode));
440      MachineInstrBuilder(*MI->getParent()->getParent(), MI)
441        .addImm(CCValid).addImm(CCMask)
442        .addReg(SystemZ::CC, RegState::Implicit);;
443      return true;
444    }
445  }
446  return false;
447}
448
449void
450SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
451			      MachineBasicBlock::iterator MBBI, DebugLoc DL,
452			      unsigned DestReg, unsigned SrcReg,
453			      bool KillSrc) const {
454  // Split 128-bit GPR moves into two 64-bit moves.  This handles ADDR128 too.
455  if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
456    copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high),
457                RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc);
458    copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low),
459                RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc);
460    return;
461  }
462
463  // Everything else needs only one instruction.
464  unsigned Opcode;
465  if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg))
466    Opcode = SystemZ::LR;
467  else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
468    Opcode = SystemZ::LGR;
469  else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
470    Opcode = SystemZ::LER;
471  else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
472    Opcode = SystemZ::LDR;
473  else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
474    Opcode = SystemZ::LXR;
475  else
476    llvm_unreachable("Impossible reg-to-reg copy");
477
478  BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
479    .addReg(SrcReg, getKillRegState(KillSrc));
480}
481
482void
483SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
484				      MachineBasicBlock::iterator MBBI,
485				      unsigned SrcReg, bool isKill,
486				      int FrameIdx,
487				      const TargetRegisterClass *RC,
488				      const TargetRegisterInfo *TRI) const {
489  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
490
491  // Callers may expect a single instruction, so keep 128-bit moves
492  // together for now and lower them after register allocation.
493  unsigned LoadOpcode, StoreOpcode;
494  getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
495  addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
496		    .addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
497}
498
499void
500SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
501				       MachineBasicBlock::iterator MBBI,
502				       unsigned DestReg, int FrameIdx,
503				       const TargetRegisterClass *RC,
504				       const TargetRegisterInfo *TRI) const {
505  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
506
507  // Callers may expect a single instruction, so keep 128-bit moves
508  // together for now and lower them after register allocation.
509  unsigned LoadOpcode, StoreOpcode;
510  getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
511  addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
512                    FrameIdx);
513}
514
515// Return true if MI is a simple load or store with a 12-bit displacement
516// and no index.  Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
517static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
518  const MCInstrDesc &MCID = MI->getDesc();
519  return ((MCID.TSFlags & Flag) &&
520          isUInt<12>(MI->getOperand(2).getImm()) &&
521          MI->getOperand(3).getReg() == 0);
522}
523
524namespace {
525  struct LogicOp {
526    LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
527    LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
528      : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
529
530    operator bool() const { return RegSize; }
531
532    unsigned RegSize, ImmLSB, ImmSize;
533  };
534}
535
536static LogicOp interpretAndImmediate(unsigned Opcode) {
537  switch (Opcode) {
538  case SystemZ::NILL32: return LogicOp(32,  0, 16);
539  case SystemZ::NILH32: return LogicOp(32, 16, 16);
540  case SystemZ::NILL:   return LogicOp(64,  0, 16);
541  case SystemZ::NILH:   return LogicOp(64, 16, 16);
542  case SystemZ::NIHL:   return LogicOp(64, 32, 16);
543  case SystemZ::NIHH:   return LogicOp(64, 48, 16);
544  case SystemZ::NILF32: return LogicOp(32,  0, 32);
545  case SystemZ::NILF:   return LogicOp(64,  0, 32);
546  case SystemZ::NIHF:   return LogicOp(64, 32, 32);
547  default:              return LogicOp();
548  }
549}
550
551// Used to return from convertToThreeAddress after replacing two-address
552// instruction OldMI with three-address instruction NewMI.
553static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
554                                                 MachineInstr *NewMI,
555                                                 LiveVariables *LV) {
556  if (LV) {
557    unsigned NumOps = OldMI->getNumOperands();
558    for (unsigned I = 1; I < NumOps; ++I) {
559      MachineOperand &Op = OldMI->getOperand(I);
560      if (Op.isReg() && Op.isKill())
561        LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
562    }
563  }
564  return NewMI;
565}
566
567MachineInstr *
568SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
569                                        MachineBasicBlock::iterator &MBBI,
570                                        LiveVariables *LV) const {
571  MachineInstr *MI = MBBI;
572  MachineBasicBlock *MBB = MI->getParent();
573
574  unsigned Opcode = MI->getOpcode();
575  unsigned NumOps = MI->getNumOperands();
576
577  // Try to convert something like SLL into SLLK, if supported.
578  // We prefer to keep the two-operand form where possible both
579  // because it tends to be shorter and because some instructions
580  // have memory forms that can be used during spilling.
581  if (TM.getSubtargetImpl()->hasDistinctOps()) {
582    int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
583    if (ThreeOperandOpcode >= 0) {
584      MachineOperand &Dest = MI->getOperand(0);
585      MachineOperand &Src = MI->getOperand(1);
586      MachineInstrBuilder MIB =
587        BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode))
588        .addOperand(Dest);
589      // Keep the kill state, but drop the tied flag.
590      MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
591      // Keep the remaining operands as-is.
592      for (unsigned I = 2; I < NumOps; ++I)
593        MIB.addOperand(MI->getOperand(I));
594      return finishConvertToThreeAddress(MI, MIB, LV);
595    }
596  }
597
598  // Try to convert an AND into an RISBG-type instruction.
599  if (LogicOp And = interpretAndImmediate(Opcode)) {
600    unsigned NewOpcode;
601    if (And.RegSize == 64)
602      NewOpcode = SystemZ::RISBG;
603    else if (TM.getSubtargetImpl()->hasHighWord())
604      NewOpcode = SystemZ::RISBLG32;
605    else
606      // We can't use RISBG for 32-bit operations because it clobbers the
607      // high word of the destination too.
608      NewOpcode = 0;
609    if (NewOpcode) {
610      uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
611      // AND IMMEDIATE leaves the other bits of the register unchanged.
612      Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
613      unsigned Start, End;
614      if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
615        if (NewOpcode == SystemZ::RISBLG32) {
616          Start &= 31;
617          End &= 31;
618        }
619        MachineOperand &Dest = MI->getOperand(0);
620        MachineOperand &Src = MI->getOperand(1);
621        MachineInstrBuilder MIB =
622          BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
623          .addOperand(Dest).addReg(0)
624          .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
625          .addImm(Start).addImm(End + 128).addImm(0);
626        return finishConvertToThreeAddress(MI, MIB, LV);
627      }
628    }
629  }
630  return 0;
631}
632
633MachineInstr *
634SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
635                                        MachineInstr *MI,
636                                        const SmallVectorImpl<unsigned> &Ops,
637                                        int FrameIndex) const {
638  const MachineFrameInfo *MFI = MF.getFrameInfo();
639  unsigned Size = MFI->getObjectSize(FrameIndex);
640
641  // Eary exit for cases we don't care about
642  if (Ops.size() != 1)
643    return 0;
644
645  unsigned OpNum = Ops[0];
646  assert(Size == MF.getRegInfo()
647         .getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
648         "Invalid size combination");
649
650  unsigned Opcode = MI->getOpcode();
651  if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
652    bool Op0IsGPR = (Opcode == SystemZ::LGDR);
653    bool Op1IsGPR = (Opcode == SystemZ::LDGR);
654    // If we're spilling the destination of an LDGR or LGDR, store the
655    // source register instead.
656    if (OpNum == 0) {
657      unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
658      return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
659        .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
660        .addImm(0).addReg(0);
661    }
662    // If we're spilling the source of an LDGR or LGDR, load the
663    // destination register instead.
664    if (OpNum == 1) {
665      unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
666      unsigned Dest = MI->getOperand(0).getReg();
667      return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
668        .addFrameIndex(FrameIndex).addImm(0).addReg(0);
669    }
670  }
671
672  // Look for cases where the source of a simple store or the destination
673  // of a simple load is being spilled.  Try to use MVC instead.
674  //
675  // Although MVC is in practice a fast choice in these cases, it is still
676  // logically a bytewise copy.  This means that we cannot use it if the
677  // load or store is volatile.  It also means that the transformation is
678  // not valid in cases where the two memories partially overlap; however,
679  // that is not a problem here, because we know that one of the memories
680  // is a full frame index.
681  if (OpNum == 0 && MI->hasOneMemOperand()) {
682    MachineMemOperand *MMO = *MI->memoperands_begin();
683    if (MMO->getSize() == Size && !MMO->isVolatile()) {
684      // Handle conversion of loads.
685      if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
686        return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
687          .addFrameIndex(FrameIndex).addImm(0).addImm(Size)
688          .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
689          .addMemOperand(MMO);
690      }
691      // Handle conversion of stores.
692      if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
693        return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
694          .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
695          .addImm(Size).addFrameIndex(FrameIndex).addImm(0)
696          .addMemOperand(MMO);
697      }
698    }
699  }
700
701  // If the spilled operand is the final one, try to change <INSN>R
702  // into <INSN>.
703  int MemOpcode = SystemZ::getMemOpcode(Opcode);
704  if (MemOpcode >= 0) {
705    unsigned NumOps = MI->getNumExplicitOperands();
706    if (OpNum == NumOps - 1) {
707      const MCInstrDesc &MemDesc = get(MemOpcode);
708      uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
709      assert(AccessBytes != 0 && "Size of access should be known");
710      assert(AccessBytes <= Size && "Access outside the frame index");
711      uint64_t Offset = Size - AccessBytes;
712      MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
713      for (unsigned I = 0; I < OpNum; ++I)
714        MIB.addOperand(MI->getOperand(I));
715      MIB.addFrameIndex(FrameIndex).addImm(Offset);
716      if (MemDesc.TSFlags & SystemZII::HasIndex)
717        MIB.addReg(0);
718      return MIB;
719    }
720  }
721
722  return 0;
723}
724
725MachineInstr *
726SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
727                                        const SmallVectorImpl<unsigned> &Ops,
728                                        MachineInstr* LoadMI) const {
729  return 0;
730}
731
732bool
733SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
734  switch (MI->getOpcode()) {
735  case SystemZ::L128:
736    splitMove(MI, SystemZ::LG);
737    return true;
738
739  case SystemZ::ST128:
740    splitMove(MI, SystemZ::STG);
741    return true;
742
743  case SystemZ::LX:
744    splitMove(MI, SystemZ::LD);
745    return true;
746
747  case SystemZ::STX:
748    splitMove(MI, SystemZ::STD);
749    return true;
750
751  case SystemZ::ADJDYNALLOC:
752    splitAdjDynAlloc(MI);
753    return true;
754
755  default:
756    return false;
757  }
758}
759
760uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
761  if (MI->getOpcode() == TargetOpcode::INLINEASM) {
762    const MachineFunction *MF = MI->getParent()->getParent();
763    const char *AsmStr = MI->getOperand(0).getSymbolName();
764    return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
765  }
766  return MI->getDesc().getSize();
767}
768
769SystemZII::Branch
770SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
771  switch (MI->getOpcode()) {
772  case SystemZ::BR:
773  case SystemZ::J:
774  case SystemZ::JG:
775    return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
776                             SystemZ::CCMASK_ANY, &MI->getOperand(0));
777
778  case SystemZ::BRC:
779  case SystemZ::BRCL:
780    return SystemZII::Branch(SystemZII::BranchNormal,
781                             MI->getOperand(0).getImm(),
782                             MI->getOperand(1).getImm(), &MI->getOperand(2));
783
784  case SystemZ::BRCT:
785    return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
786                             SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
787
788  case SystemZ::BRCTG:
789    return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
790                             SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
791
792  case SystemZ::CIJ:
793  case SystemZ::CRJ:
794    return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
795                             MI->getOperand(2).getImm(), &MI->getOperand(3));
796
797  case SystemZ::CLIJ:
798  case SystemZ::CLRJ:
799    return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
800                             MI->getOperand(2).getImm(), &MI->getOperand(3));
801
802  case SystemZ::CGIJ:
803  case SystemZ::CGRJ:
804    return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
805                             MI->getOperand(2).getImm(), &MI->getOperand(3));
806
807  case SystemZ::CLGIJ:
808  case SystemZ::CLGRJ:
809    return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
810                             MI->getOperand(2).getImm(), &MI->getOperand(3));
811
812  default:
813    llvm_unreachable("Unrecognized branch opcode");
814  }
815}
816
817void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
818                                           unsigned &LoadOpcode,
819                                           unsigned &StoreOpcode) const {
820  if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
821    LoadOpcode = SystemZ::L;
822    StoreOpcode = SystemZ::ST;
823  } else if (RC == &SystemZ::GR64BitRegClass ||
824             RC == &SystemZ::ADDR64BitRegClass) {
825    LoadOpcode = SystemZ::LG;
826    StoreOpcode = SystemZ::STG;
827  } else if (RC == &SystemZ::GR128BitRegClass ||
828             RC == &SystemZ::ADDR128BitRegClass) {
829    LoadOpcode = SystemZ::L128;
830    StoreOpcode = SystemZ::ST128;
831  } else if (RC == &SystemZ::FP32BitRegClass) {
832    LoadOpcode = SystemZ::LE;
833    StoreOpcode = SystemZ::STE;
834  } else if (RC == &SystemZ::FP64BitRegClass) {
835    LoadOpcode = SystemZ::LD;
836    StoreOpcode = SystemZ::STD;
837  } else if (RC == &SystemZ::FP128BitRegClass) {
838    LoadOpcode = SystemZ::LX;
839    StoreOpcode = SystemZ::STX;
840  } else
841    llvm_unreachable("Unsupported regclass to load or store");
842}
843
844unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
845                                              int64_t Offset) const {
846  const MCInstrDesc &MCID = get(Opcode);
847  int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
848  if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
849    // Get the instruction to use for unsigned 12-bit displacements.
850    int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
851    if (Disp12Opcode >= 0)
852      return Disp12Opcode;
853
854    // All address-related instructions can use unsigned 12-bit
855    // displacements.
856    return Opcode;
857  }
858  if (isInt<20>(Offset) && isInt<20>(Offset2)) {
859    // Get the instruction to use for signed 20-bit displacements.
860    int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
861    if (Disp20Opcode >= 0)
862      return Disp20Opcode;
863
864    // Check whether Opcode allows signed 20-bit displacements.
865    if (MCID.TSFlags & SystemZII::Has20BitOffset)
866      return Opcode;
867  }
868  return 0;
869}
870
871unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
872  switch (Opcode) {
873  case SystemZ::L:    return SystemZ::LT;
874  case SystemZ::LY:   return SystemZ::LT;
875  case SystemZ::LG:   return SystemZ::LTG;
876  case SystemZ::LGF:  return SystemZ::LTGF;
877  case SystemZ::LR:   return SystemZ::LTR;
878  case SystemZ::LGFR: return SystemZ::LTGFR;
879  case SystemZ::LGR:  return SystemZ::LTGR;
880  case SystemZ::LER:  return SystemZ::LTEBR;
881  case SystemZ::LDR:  return SystemZ::LTDBR;
882  case SystemZ::LXR:  return SystemZ::LTXBR;
883  default:            return 0;
884  }
885}
886
887// Return true if Mask matches the regexp 0*1+0*, given that zero masks
888// have already been filtered out.  Store the first set bit in LSB and
889// the number of set bits in Length if so.
890static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
891  unsigned First = findFirstSet(Mask);
892  uint64_t Top = (Mask >> First) + 1;
893  if ((Top & -Top) == Top) {
894    LSB = First;
895    Length = findFirstSet(Top);
896    return true;
897  }
898  return false;
899}
900
901bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
902                                   unsigned &Start, unsigned &End) const {
903  // Reject trivial all-zero masks.
904  if (Mask == 0)
905    return false;
906
907  // Handle the 1+0+ or 0+1+0* cases.  Start then specifies the index of
908  // the msb and End specifies the index of the lsb.
909  unsigned LSB, Length;
910  if (isStringOfOnes(Mask, LSB, Length)) {
911    Start = 63 - (LSB + Length - 1);
912    End = 63 - LSB;
913    return true;
914  }
915
916  // Handle the wrap-around 1+0+1+ cases.  Start then specifies the msb
917  // of the low 1s and End specifies the lsb of the high 1s.
918  if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
919    assert(LSB > 0 && "Bottom bit must be set");
920    assert(LSB + Length < BitSize && "Top bit must be set");
921    Start = 63 - (LSB - 1);
922    End = 63 - (LSB + Length);
923    return true;
924  }
925
926  return false;
927}
928
929unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
930                                               const MachineInstr *MI) const {
931  switch (Opcode) {
932  case SystemZ::CR:
933    return SystemZ::CRJ;
934  case SystemZ::CGR:
935    return SystemZ::CGRJ;
936  case SystemZ::CHI:
937    return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
938  case SystemZ::CGHI:
939    return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
940  case SystemZ::CLR:
941    return SystemZ::CLRJ;
942  case SystemZ::CLGR:
943    return SystemZ::CLGRJ;
944  case SystemZ::CLFI:
945    return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0;
946  case SystemZ::CLGFI:
947    return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0;
948  default:
949    return 0;
950  }
951}
952
953void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
954                                     MachineBasicBlock::iterator MBBI,
955                                     unsigned Reg, uint64_t Value) const {
956  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
957  unsigned Opcode;
958  if (isInt<16>(Value))
959    Opcode = SystemZ::LGHI;
960  else if (SystemZ::isImmLL(Value))
961    Opcode = SystemZ::LLILL;
962  else if (SystemZ::isImmLH(Value)) {
963    Opcode = SystemZ::LLILH;
964    Value >>= 16;
965  } else {
966    assert(isInt<32>(Value) && "Huge values not handled yet");
967    Opcode = SystemZ::LGFI;
968  }
969  BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
970}
971