1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the SystemZ implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SystemZInstrInfo.h"
15#include "SystemZInstrBuilder.h"
16#include "SystemZTargetMachine.h"
17#include "llvm/CodeGen/LiveVariables.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19
20using namespace llvm;
21
22#define GET_INSTRINFO_CTOR_DTOR
23#define GET_INSTRMAP_INFO
24#include "SystemZGenInstrInfo.inc"
25
26// Return a mask with Count low bits set.
27static uint64_t allOnes(unsigned int Count) {
28  return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
29}
30
31// Reg should be a 32-bit GPR.  Return true if it is a high register rather
32// than a low register.
33static bool isHighReg(unsigned int Reg) {
34  if (SystemZ::GRH32BitRegClass.contains(Reg))
35    return true;
36  assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
37  return false;
38}
39
40// Pin the vtable to this file.
41void SystemZInstrInfo::anchor() {}
42
43SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
44  : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
45    RI(), STI(sti) {
46}
47
48// MI is a 128-bit load or store.  Split it into two 64-bit loads or stores,
49// each having the opcode given by NewOpcode.
50void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
51                                 unsigned NewOpcode) const {
52  MachineBasicBlock *MBB = MI->getParent();
53  MachineFunction &MF = *MBB->getParent();
54
55  // Get two load or store instructions.  Use the original instruction for one
56  // of them (arbitrarily the second here) and create a clone for the other.
57  MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
58  MBB->insert(MI, EarlierMI);
59
60  // Set up the two 64-bit registers.
61  MachineOperand &HighRegOp = EarlierMI->getOperand(0);
62  MachineOperand &LowRegOp = MI->getOperand(0);
63  HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
64  LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
65
66  // The address in the first (high) instruction is already correct.
67  // Adjust the offset in the second (low) instruction.
68  MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
69  MachineOperand &LowOffsetOp = MI->getOperand(2);
70  LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
71
72  // Set the opcodes.
73  unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
74  unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
75  assert(HighOpcode && LowOpcode && "Both offsets should be in range");
76
77  EarlierMI->setDesc(get(HighOpcode));
78  MI->setDesc(get(LowOpcode));
79}
80
81// Split ADJDYNALLOC instruction MI.
82void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
83  MachineBasicBlock *MBB = MI->getParent();
84  MachineFunction &MF = *MBB->getParent();
85  MachineFrameInfo *MFFrame = MF.getFrameInfo();
86  MachineOperand &OffsetMO = MI->getOperand(2);
87
88  uint64_t Offset = (MFFrame->getMaxCallFrameSize() +
89                     SystemZMC::CallFrameSize +
90                     OffsetMO.getImm());
91  unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
92  assert(NewOpcode && "No support for huge argument lists yet");
93  MI->setDesc(get(NewOpcode));
94  OffsetMO.setImm(Offset);
95}
96
97// MI is an RI-style pseudo instruction.  Replace it with LowOpcode
98// if the first operand is a low GR32 and HighOpcode if the first operand
99// is a high GR32.  ConvertHigh is true if LowOpcode takes a signed operand
100// and HighOpcode takes an unsigned 32-bit operand.  In those cases,
101// MI has the same kind of operand as LowOpcode, so needs to be converted
102// if HighOpcode is used.
103void SystemZInstrInfo::expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
104                                      unsigned HighOpcode,
105                                      bool ConvertHigh) const {
106  unsigned Reg = MI->getOperand(0).getReg();
107  bool IsHigh = isHighReg(Reg);
108  MI->setDesc(get(IsHigh ? HighOpcode : LowOpcode));
109  if (IsHigh && ConvertHigh)
110    MI->getOperand(1).setImm(uint32_t(MI->getOperand(1).getImm()));
111}
112
113// MI is a three-operand RIE-style pseudo instruction.  Replace it with
114// LowOpcode3 if the registers are both low GR32s, otherwise use a move
115// followed by HighOpcode or LowOpcode, depending on whether the target
116// is a high or low GR32.
117void SystemZInstrInfo::expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
118                                       unsigned LowOpcodeK,
119                                       unsigned HighOpcode) const {
120  unsigned DestReg = MI->getOperand(0).getReg();
121  unsigned SrcReg = MI->getOperand(1).getReg();
122  bool DestIsHigh = isHighReg(DestReg);
123  bool SrcIsHigh = isHighReg(SrcReg);
124  if (!DestIsHigh && !SrcIsHigh)
125    MI->setDesc(get(LowOpcodeK));
126  else {
127    emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
128                  DestReg, SrcReg, SystemZ::LR, 32,
129                  MI->getOperand(1).isKill());
130    MI->setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
131    MI->getOperand(1).setReg(DestReg);
132  }
133}
134
135// MI is an RXY-style pseudo instruction.  Replace it with LowOpcode
136// if the first operand is a low GR32 and HighOpcode if the first operand
137// is a high GR32.
138void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
139                                       unsigned HighOpcode) const {
140  unsigned Reg = MI->getOperand(0).getReg();
141  unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
142                                       MI->getOperand(2).getImm());
143  MI->setDesc(get(Opcode));
144}
145
146// MI is an RR-style pseudo instruction that zero-extends the low Size bits
147// of one GRX32 into another.  Replace it with LowOpcode if both operands
148// are low registers, otherwise use RISB[LH]G.
149void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
150                                        unsigned Size) const {
151  emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
152                MI->getOperand(0).getReg(), MI->getOperand(1).getReg(),
153                LowOpcode, Size, MI->getOperand(1).isKill());
154  MI->eraseFromParent();
155}
156
157// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
158// DestReg before MBBI in MBB.  Use LowLowOpcode when both DestReg and SrcReg
159// are low registers, otherwise use RISB[LH]G.  Size is the number of bits
160// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
161// KillSrc is true if this move is the last use of SrcReg.
162void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
163                                     MachineBasicBlock::iterator MBBI,
164                                     DebugLoc DL, unsigned DestReg,
165                                     unsigned SrcReg, unsigned LowLowOpcode,
166                                     unsigned Size, bool KillSrc) const {
167  unsigned Opcode;
168  bool DestIsHigh = isHighReg(DestReg);
169  bool SrcIsHigh = isHighReg(SrcReg);
170  if (DestIsHigh && SrcIsHigh)
171    Opcode = SystemZ::RISBHH;
172  else if (DestIsHigh && !SrcIsHigh)
173    Opcode = SystemZ::RISBHL;
174  else if (!DestIsHigh && SrcIsHigh)
175    Opcode = SystemZ::RISBLH;
176  else {
177    BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
178      .addReg(SrcReg, getKillRegState(KillSrc));
179    return;
180  }
181  unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
182  BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
183    .addReg(DestReg, RegState::Undef)
184    .addReg(SrcReg, getKillRegState(KillSrc))
185    .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
186}
187
188// If MI is a simple load or store for a frame object, return the register
189// it loads or stores and set FrameIndex to the index of the frame object.
190// Return 0 otherwise.
191//
192// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
193static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
194                        unsigned Flag) {
195  const MCInstrDesc &MCID = MI->getDesc();
196  if ((MCID.TSFlags & Flag) &&
197      MI->getOperand(1).isFI() &&
198      MI->getOperand(2).getImm() == 0 &&
199      MI->getOperand(3).getReg() == 0) {
200    FrameIndex = MI->getOperand(1).getIndex();
201    return MI->getOperand(0).getReg();
202  }
203  return 0;
204}
205
206unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
207                                               int &FrameIndex) const {
208  return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
209}
210
211unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
212                                              int &FrameIndex) const {
213  return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
214}
215
216bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
217                                       int &DestFrameIndex,
218                                       int &SrcFrameIndex) const {
219  // Check for MVC 0(Length,FI1),0(FI2)
220  const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
221  if (MI->getOpcode() != SystemZ::MVC ||
222      !MI->getOperand(0).isFI() ||
223      MI->getOperand(1).getImm() != 0 ||
224      !MI->getOperand(3).isFI() ||
225      MI->getOperand(4).getImm() != 0)
226    return false;
227
228  // Check that Length covers the full slots.
229  int64_t Length = MI->getOperand(2).getImm();
230  unsigned FI1 = MI->getOperand(0).getIndex();
231  unsigned FI2 = MI->getOperand(3).getIndex();
232  if (MFI->getObjectSize(FI1) != Length ||
233      MFI->getObjectSize(FI2) != Length)
234    return false;
235
236  DestFrameIndex = FI1;
237  SrcFrameIndex = FI2;
238  return true;
239}
240
241bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
242                                     MachineBasicBlock *&TBB,
243                                     MachineBasicBlock *&FBB,
244                                     SmallVectorImpl<MachineOperand> &Cond,
245                                     bool AllowModify) const {
246  // Most of the code and comments here are boilerplate.
247
248  // Start from the bottom of the block and work up, examining the
249  // terminator instructions.
250  MachineBasicBlock::iterator I = MBB.end();
251  while (I != MBB.begin()) {
252    --I;
253    if (I->isDebugValue())
254      continue;
255
256    // Working from the bottom, when we see a non-terminator instruction, we're
257    // done.
258    if (!isUnpredicatedTerminator(I))
259      break;
260
261    // A terminator that isn't a branch can't easily be handled by this
262    // analysis.
263    if (!I->isBranch())
264      return true;
265
266    // Can't handle indirect branches.
267    SystemZII::Branch Branch(getBranchInfo(I));
268    if (!Branch.Target->isMBB())
269      return true;
270
271    // Punt on compound branches.
272    if (Branch.Type != SystemZII::BranchNormal)
273      return true;
274
275    if (Branch.CCMask == SystemZ::CCMASK_ANY) {
276      // Handle unconditional branches.
277      if (!AllowModify) {
278        TBB = Branch.Target->getMBB();
279        continue;
280      }
281
282      // If the block has any instructions after a JMP, delete them.
283      while (std::next(I) != MBB.end())
284        std::next(I)->eraseFromParent();
285
286      Cond.clear();
287      FBB = nullptr;
288
289      // Delete the JMP if it's equivalent to a fall-through.
290      if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
291        TBB = nullptr;
292        I->eraseFromParent();
293        I = MBB.end();
294        continue;
295      }
296
297      // TBB is used to indicate the unconditinal destination.
298      TBB = Branch.Target->getMBB();
299      continue;
300    }
301
302    // Working from the bottom, handle the first conditional branch.
303    if (Cond.empty()) {
304      // FIXME: add X86-style branch swap
305      FBB = TBB;
306      TBB = Branch.Target->getMBB();
307      Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
308      Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
309      continue;
310    }
311
312    // Handle subsequent conditional branches.
313    assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
314
315    // Only handle the case where all conditional branches branch to the same
316    // destination.
317    if (TBB != Branch.Target->getMBB())
318      return true;
319
320    // If the conditions are the same, we can leave them alone.
321    unsigned OldCCValid = Cond[0].getImm();
322    unsigned OldCCMask = Cond[1].getImm();
323    if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
324      continue;
325
326    // FIXME: Try combining conditions like X86 does.  Should be easy on Z!
327    return false;
328  }
329
330  return false;
331}
332
333unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
334  // Most of the code and comments here are boilerplate.
335  MachineBasicBlock::iterator I = MBB.end();
336  unsigned Count = 0;
337
338  while (I != MBB.begin()) {
339    --I;
340    if (I->isDebugValue())
341      continue;
342    if (!I->isBranch())
343      break;
344    if (!getBranchInfo(I).Target->isMBB())
345      break;
346    // Remove the branch.
347    I->eraseFromParent();
348    I = MBB.end();
349    ++Count;
350  }
351
352  return Count;
353}
354
355bool SystemZInstrInfo::
356ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
357  assert(Cond.size() == 2 && "Invalid condition");
358  Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
359  return false;
360}
361
362unsigned
363SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
364                               MachineBasicBlock *FBB,
365                               const SmallVectorImpl<MachineOperand> &Cond,
366                               DebugLoc DL) const {
367  // In this function we output 32-bit branches, which should always
368  // have enough range.  They can be shortened and relaxed by later code
369  // in the pipeline, if desired.
370
371  // Shouldn't be a fall through.
372  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
373  assert((Cond.size() == 2 || Cond.size() == 0) &&
374         "SystemZ branch conditions have one component!");
375
376  if (Cond.empty()) {
377    // Unconditional branch?
378    assert(!FBB && "Unconditional branch with multiple successors!");
379    BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
380    return 1;
381  }
382
383  // Conditional branch.
384  unsigned Count = 0;
385  unsigned CCValid = Cond[0].getImm();
386  unsigned CCMask = Cond[1].getImm();
387  BuildMI(&MBB, DL, get(SystemZ::BRC))
388    .addImm(CCValid).addImm(CCMask).addMBB(TBB);
389  ++Count;
390
391  if (FBB) {
392    // Two-way Conditional branch. Insert the second branch.
393    BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
394    ++Count;
395  }
396  return Count;
397}
398
399bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
400                                      unsigned &SrcReg, unsigned &SrcReg2,
401                                      int &Mask, int &Value) const {
402  assert(MI->isCompare() && "Caller should have checked for a comparison");
403
404  if (MI->getNumExplicitOperands() == 2 &&
405      MI->getOperand(0).isReg() &&
406      MI->getOperand(1).isImm()) {
407    SrcReg = MI->getOperand(0).getReg();
408    SrcReg2 = 0;
409    Value = MI->getOperand(1).getImm();
410    Mask = ~0;
411    return true;
412  }
413
414  return false;
415}
416
417// If Reg is a virtual register, return its definition, otherwise return null.
418static MachineInstr *getDef(unsigned Reg,
419                            const MachineRegisterInfo *MRI) {
420  if (TargetRegisterInfo::isPhysicalRegister(Reg))
421    return nullptr;
422  return MRI->getUniqueVRegDef(Reg);
423}
424
425// Return true if MI is a shift of type Opcode by Imm bits.
426static bool isShift(MachineInstr *MI, int Opcode, int64_t Imm) {
427  return (MI->getOpcode() == Opcode &&
428          !MI->getOperand(2).getReg() &&
429          MI->getOperand(3).getImm() == Imm);
430}
431
432// If the destination of MI has no uses, delete it as dead.
433static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
434  if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
435    MI->eraseFromParent();
436}
437
438// Compare compares SrcReg against zero.  Check whether SrcReg contains
439// the result of an IPM sequence whose input CC survives until Compare,
440// and whether Compare is therefore redundant.  Delete it and return
441// true if so.
442static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
443                                  const MachineRegisterInfo *MRI,
444                                  const TargetRegisterInfo *TRI) {
445  MachineInstr *LGFR = nullptr;
446  MachineInstr *RLL = getDef(SrcReg, MRI);
447  if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
448    LGFR = RLL;
449    RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
450  }
451  if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
452    return false;
453
454  MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
455  if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC))
456    return false;
457
458  MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
459  if (!IPM || IPM->getOpcode() != SystemZ::IPM)
460    return false;
461
462  // Check that there are no assignments to CC between the IPM and Compare,
463  if (IPM->getParent() != Compare->getParent())
464    return false;
465  MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
466  for (++MBBI; MBBI != MBBE; ++MBBI) {
467    MachineInstr *MI = MBBI;
468    if (MI->modifiesRegister(SystemZ::CC, TRI))
469      return false;
470  }
471
472  Compare->eraseFromParent();
473  if (LGFR)
474    eraseIfDead(LGFR, MRI);
475  eraseIfDead(RLL, MRI);
476  eraseIfDead(SRL, MRI);
477  eraseIfDead(IPM, MRI);
478
479  return true;
480}
481
482bool
483SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
484                                       unsigned SrcReg, unsigned SrcReg2,
485                                       int Mask, int Value,
486                                       const MachineRegisterInfo *MRI) const {
487  assert(!SrcReg2 && "Only optimizing constant comparisons so far");
488  bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
489  if (Value == 0 &&
490      !IsLogical &&
491      removeIPMBasedCompare(Compare, SrcReg, MRI, &RI))
492    return true;
493  return false;
494}
495
496// If Opcode is a move that has a conditional variant, return that variant,
497// otherwise return 0.
498static unsigned getConditionalMove(unsigned Opcode) {
499  switch (Opcode) {
500  case SystemZ::LR:  return SystemZ::LOCR;
501  case SystemZ::LGR: return SystemZ::LOCGR;
502  default:           return 0;
503  }
504}
505
506bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
507  unsigned Opcode = MI->getOpcode();
508  if (STI.hasLoadStoreOnCond() &&
509      getConditionalMove(Opcode))
510    return true;
511  return false;
512}
513
514bool SystemZInstrInfo::
515isProfitableToIfCvt(MachineBasicBlock &MBB,
516                    unsigned NumCycles, unsigned ExtraPredCycles,
517                    const BranchProbability &Probability) const {
518  // For now only convert single instructions.
519  return NumCycles == 1;
520}
521
522bool SystemZInstrInfo::
523isProfitableToIfCvt(MachineBasicBlock &TMBB,
524                    unsigned NumCyclesT, unsigned ExtraPredCyclesT,
525                    MachineBasicBlock &FMBB,
526                    unsigned NumCyclesF, unsigned ExtraPredCyclesF,
527                    const BranchProbability &Probability) const {
528  // For now avoid converting mutually-exclusive cases.
529  return false;
530}
531
532bool SystemZInstrInfo::
533PredicateInstruction(MachineInstr *MI,
534                     const SmallVectorImpl<MachineOperand> &Pred) const {
535  assert(Pred.size() == 2 && "Invalid condition");
536  unsigned CCValid = Pred[0].getImm();
537  unsigned CCMask = Pred[1].getImm();
538  assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
539  unsigned Opcode = MI->getOpcode();
540  if (STI.hasLoadStoreOnCond()) {
541    if (unsigned CondOpcode = getConditionalMove(Opcode)) {
542      MI->setDesc(get(CondOpcode));
543      MachineInstrBuilder(*MI->getParent()->getParent(), MI)
544        .addImm(CCValid).addImm(CCMask)
545        .addReg(SystemZ::CC, RegState::Implicit);
546      return true;
547    }
548  }
549  return false;
550}
551
552void
553SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
554			      MachineBasicBlock::iterator MBBI, DebugLoc DL,
555			      unsigned DestReg, unsigned SrcReg,
556			      bool KillSrc) const {
557  // Split 128-bit GPR moves into two 64-bit moves.  This handles ADDR128 too.
558  if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
559    copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
560                RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
561    copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
562                RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
563    return;
564  }
565
566  if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
567    emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc);
568    return;
569  }
570
571  // Everything else needs only one instruction.
572  unsigned Opcode;
573  if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
574    Opcode = SystemZ::LGR;
575  else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
576    Opcode = SystemZ::LER;
577  else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
578    Opcode = SystemZ::LDR;
579  else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
580    Opcode = SystemZ::LXR;
581  else
582    llvm_unreachable("Impossible reg-to-reg copy");
583
584  BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
585    .addReg(SrcReg, getKillRegState(KillSrc));
586}
587
588void
589SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
590				      MachineBasicBlock::iterator MBBI,
591				      unsigned SrcReg, bool isKill,
592				      int FrameIdx,
593				      const TargetRegisterClass *RC,
594				      const TargetRegisterInfo *TRI) const {
595  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
596
597  // Callers may expect a single instruction, so keep 128-bit moves
598  // together for now and lower them after register allocation.
599  unsigned LoadOpcode, StoreOpcode;
600  getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
601  addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
602		    .addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
603}
604
605void
606SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
607				       MachineBasicBlock::iterator MBBI,
608				       unsigned DestReg, int FrameIdx,
609				       const TargetRegisterClass *RC,
610				       const TargetRegisterInfo *TRI) const {
611  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
612
613  // Callers may expect a single instruction, so keep 128-bit moves
614  // together for now and lower them after register allocation.
615  unsigned LoadOpcode, StoreOpcode;
616  getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
617  addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
618                    FrameIdx);
619}
620
621// Return true if MI is a simple load or store with a 12-bit displacement
622// and no index.  Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
623static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
624  const MCInstrDesc &MCID = MI->getDesc();
625  return ((MCID.TSFlags & Flag) &&
626          isUInt<12>(MI->getOperand(2).getImm()) &&
627          MI->getOperand(3).getReg() == 0);
628}
629
630namespace {
631struct LogicOp {
632  LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
633  LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
634    : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
635
636  explicit operator bool() const { return RegSize; }
637
638  unsigned RegSize, ImmLSB, ImmSize;
639};
640} // end anonymous namespace
641
642static LogicOp interpretAndImmediate(unsigned Opcode) {
643  switch (Opcode) {
644  case SystemZ::NILMux: return LogicOp(32,  0, 16);
645  case SystemZ::NIHMux: return LogicOp(32, 16, 16);
646  case SystemZ::NILL64: return LogicOp(64,  0, 16);
647  case SystemZ::NILH64: return LogicOp(64, 16, 16);
648  case SystemZ::NIHL64: return LogicOp(64, 32, 16);
649  case SystemZ::NIHH64: return LogicOp(64, 48, 16);
650  case SystemZ::NIFMux: return LogicOp(32,  0, 32);
651  case SystemZ::NILF64: return LogicOp(64,  0, 32);
652  case SystemZ::NIHF64: return LogicOp(64, 32, 32);
653  default:              return LogicOp();
654  }
655}
656
657// Used to return from convertToThreeAddress after replacing two-address
658// instruction OldMI with three-address instruction NewMI.
659static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
660                                                 MachineInstr *NewMI,
661                                                 LiveVariables *LV) {
662  if (LV) {
663    unsigned NumOps = OldMI->getNumOperands();
664    for (unsigned I = 1; I < NumOps; ++I) {
665      MachineOperand &Op = OldMI->getOperand(I);
666      if (Op.isReg() && Op.isKill())
667        LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
668    }
669  }
670  return NewMI;
671}
672
673MachineInstr *
674SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
675                                        MachineBasicBlock::iterator &MBBI,
676                                        LiveVariables *LV) const {
677  MachineInstr *MI = MBBI;
678  MachineBasicBlock *MBB = MI->getParent();
679  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
680
681  unsigned Opcode = MI->getOpcode();
682  unsigned NumOps = MI->getNumOperands();
683
684  // Try to convert something like SLL into SLLK, if supported.
685  // We prefer to keep the two-operand form where possible both
686  // because it tends to be shorter and because some instructions
687  // have memory forms that can be used during spilling.
688  if (STI.hasDistinctOps()) {
689    MachineOperand &Dest = MI->getOperand(0);
690    MachineOperand &Src = MI->getOperand(1);
691    unsigned DestReg = Dest.getReg();
692    unsigned SrcReg = Src.getReg();
693    // AHIMux is only really a three-operand instruction when both operands
694    // are low registers.  Try to constrain both operands to be low if
695    // possible.
696    if (Opcode == SystemZ::AHIMux &&
697        TargetRegisterInfo::isVirtualRegister(DestReg) &&
698        TargetRegisterInfo::isVirtualRegister(SrcReg) &&
699        MRI.getRegClass(DestReg)->contains(SystemZ::R1L) &&
700        MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) {
701      MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass);
702      MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass);
703    }
704    int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
705    if (ThreeOperandOpcode >= 0) {
706      MachineInstrBuilder MIB =
707        BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode))
708        .addOperand(Dest);
709      // Keep the kill state, but drop the tied flag.
710      MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
711      // Keep the remaining operands as-is.
712      for (unsigned I = 2; I < NumOps; ++I)
713        MIB.addOperand(MI->getOperand(I));
714      return finishConvertToThreeAddress(MI, MIB, LV);
715    }
716  }
717
718  // Try to convert an AND into an RISBG-type instruction.
719  if (LogicOp And = interpretAndImmediate(Opcode)) {
720    uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
721    // AND IMMEDIATE leaves the other bits of the register unchanged.
722    Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
723    unsigned Start, End;
724    if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
725      unsigned NewOpcode;
726      if (And.RegSize == 64) {
727        NewOpcode = SystemZ::RISBG;
728        // Prefer RISBGN if available, since it does not clobber CC.
729        if (STI.hasMiscellaneousExtensions())
730          NewOpcode = SystemZ::RISBGN;
731      } else {
732        NewOpcode = SystemZ::RISBMux;
733        Start &= 31;
734        End &= 31;
735      }
736      MachineOperand &Dest = MI->getOperand(0);
737      MachineOperand &Src = MI->getOperand(1);
738      MachineInstrBuilder MIB =
739        BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
740        .addOperand(Dest).addReg(0)
741        .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
742        .addImm(Start).addImm(End + 128).addImm(0);
743      return finishConvertToThreeAddress(MI, MIB, LV);
744    }
745  }
746  return nullptr;
747}
748
749MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
750                                                      MachineInstr *MI,
751                                                      ArrayRef<unsigned> Ops,
752                                                      int FrameIndex) const {
753  const MachineFrameInfo *MFI = MF.getFrameInfo();
754  unsigned Size = MFI->getObjectSize(FrameIndex);
755  unsigned Opcode = MI->getOpcode();
756
757  if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
758    if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
759        isInt<8>(MI->getOperand(2).getImm()) &&
760        !MI->getOperand(3).getReg()) {
761      // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
762      return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI))
763        .addFrameIndex(FrameIndex).addImm(0)
764        .addImm(MI->getOperand(2).getImm());
765    }
766    return nullptr;
767  }
768
769  // All other cases require a single operand.
770  if (Ops.size() != 1)
771    return nullptr;
772
773  unsigned OpNum = Ops[0];
774  assert(Size == MF.getRegInfo()
775         .getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
776         "Invalid size combination");
777
778  if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) &&
779      OpNum == 0 &&
780      isInt<8>(MI->getOperand(2).getImm())) {
781    // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
782    Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
783    return BuildMI(MF, MI->getDebugLoc(), get(Opcode))
784      .addFrameIndex(FrameIndex).addImm(0)
785      .addImm(MI->getOperand(2).getImm());
786  }
787
788  if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
789    bool Op0IsGPR = (Opcode == SystemZ::LGDR);
790    bool Op1IsGPR = (Opcode == SystemZ::LDGR);
791    // If we're spilling the destination of an LDGR or LGDR, store the
792    // source register instead.
793    if (OpNum == 0) {
794      unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
795      return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
796        .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
797        .addImm(0).addReg(0);
798    }
799    // If we're spilling the source of an LDGR or LGDR, load the
800    // destination register instead.
801    if (OpNum == 1) {
802      unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
803      unsigned Dest = MI->getOperand(0).getReg();
804      return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
805        .addFrameIndex(FrameIndex).addImm(0).addReg(0);
806    }
807  }
808
809  // Look for cases where the source of a simple store or the destination
810  // of a simple load is being spilled.  Try to use MVC instead.
811  //
812  // Although MVC is in practice a fast choice in these cases, it is still
813  // logically a bytewise copy.  This means that we cannot use it if the
814  // load or store is volatile.  We also wouldn't be able to use MVC if
815  // the two memories partially overlap, but that case cannot occur here,
816  // because we know that one of the memories is a full frame index.
817  //
818  // For performance reasons, we also want to avoid using MVC if the addresses
819  // might be equal.  We don't worry about that case here, because spill slot
820  // coloring happens later, and because we have special code to remove
821  // MVCs that turn out to be redundant.
822  if (OpNum == 0 && MI->hasOneMemOperand()) {
823    MachineMemOperand *MMO = *MI->memoperands_begin();
824    if (MMO->getSize() == Size && !MMO->isVolatile()) {
825      // Handle conversion of loads.
826      if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
827        return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
828          .addFrameIndex(FrameIndex).addImm(0).addImm(Size)
829          .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
830          .addMemOperand(MMO);
831      }
832      // Handle conversion of stores.
833      if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
834        return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
835          .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
836          .addImm(Size).addFrameIndex(FrameIndex).addImm(0)
837          .addMemOperand(MMO);
838      }
839    }
840  }
841
842  // If the spilled operand is the final one, try to change <INSN>R
843  // into <INSN>.
844  int MemOpcode = SystemZ::getMemOpcode(Opcode);
845  if (MemOpcode >= 0) {
846    unsigned NumOps = MI->getNumExplicitOperands();
847    if (OpNum == NumOps - 1) {
848      const MCInstrDesc &MemDesc = get(MemOpcode);
849      uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
850      assert(AccessBytes != 0 && "Size of access should be known");
851      assert(AccessBytes <= Size && "Access outside the frame index");
852      uint64_t Offset = Size - AccessBytes;
853      MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
854      for (unsigned I = 0; I < OpNum; ++I)
855        MIB.addOperand(MI->getOperand(I));
856      MIB.addFrameIndex(FrameIndex).addImm(Offset);
857      if (MemDesc.TSFlags & SystemZII::HasIndex)
858        MIB.addReg(0);
859      return MIB;
860    }
861  }
862
863  return nullptr;
864}
865
866MachineInstr *
867SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
868                                        ArrayRef<unsigned> Ops,
869                                        MachineInstr *LoadMI) const {
870  return nullptr;
871}
872
873bool
874SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
875  switch (MI->getOpcode()) {
876  case SystemZ::L128:
877    splitMove(MI, SystemZ::LG);
878    return true;
879
880  case SystemZ::ST128:
881    splitMove(MI, SystemZ::STG);
882    return true;
883
884  case SystemZ::LX:
885    splitMove(MI, SystemZ::LD);
886    return true;
887
888  case SystemZ::STX:
889    splitMove(MI, SystemZ::STD);
890    return true;
891
892  case SystemZ::LBMux:
893    expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
894    return true;
895
896  case SystemZ::LHMux:
897    expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
898    return true;
899
900  case SystemZ::LLCRMux:
901    expandZExtPseudo(MI, SystemZ::LLCR, 8);
902    return true;
903
904  case SystemZ::LLHRMux:
905    expandZExtPseudo(MI, SystemZ::LLHR, 16);
906    return true;
907
908  case SystemZ::LLCMux:
909    expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
910    return true;
911
912  case SystemZ::LLHMux:
913    expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
914    return true;
915
916  case SystemZ::LMux:
917    expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
918    return true;
919
920  case SystemZ::STCMux:
921    expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
922    return true;
923
924  case SystemZ::STHMux:
925    expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
926    return true;
927
928  case SystemZ::STMux:
929    expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
930    return true;
931
932  case SystemZ::LHIMux:
933    expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
934    return true;
935
936  case SystemZ::IIFMux:
937    expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
938    return true;
939
940  case SystemZ::IILMux:
941    expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
942    return true;
943
944  case SystemZ::IIHMux:
945    expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
946    return true;
947
948  case SystemZ::NIFMux:
949    expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
950    return true;
951
952  case SystemZ::NILMux:
953    expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
954    return true;
955
956  case SystemZ::NIHMux:
957    expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
958    return true;
959
960  case SystemZ::OIFMux:
961    expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
962    return true;
963
964  case SystemZ::OILMux:
965    expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
966    return true;
967
968  case SystemZ::OIHMux:
969    expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
970    return true;
971
972  case SystemZ::XIFMux:
973    expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
974    return true;
975
976  case SystemZ::TMLMux:
977    expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
978    return true;
979
980  case SystemZ::TMHMux:
981    expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
982    return true;
983
984  case SystemZ::AHIMux:
985    expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
986    return true;
987
988  case SystemZ::AHIMuxK:
989    expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
990    return true;
991
992  case SystemZ::AFIMux:
993    expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
994    return true;
995
996  case SystemZ::CFIMux:
997    expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
998    return true;
999
1000  case SystemZ::CLFIMux:
1001    expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1002    return true;
1003
1004  case SystemZ::CMux:
1005    expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1006    return true;
1007
1008  case SystemZ::CLMux:
1009    expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1010    return true;
1011
1012  case SystemZ::RISBMux: {
1013    bool DestIsHigh = isHighReg(MI->getOperand(0).getReg());
1014    bool SrcIsHigh = isHighReg(MI->getOperand(2).getReg());
1015    if (SrcIsHigh == DestIsHigh)
1016      MI->setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1017    else {
1018      MI->setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1019      MI->getOperand(5).setImm(MI->getOperand(5).getImm() ^ 32);
1020    }
1021    return true;
1022  }
1023
1024  case SystemZ::ADJDYNALLOC:
1025    splitAdjDynAlloc(MI);
1026    return true;
1027
1028  default:
1029    return false;
1030  }
1031}
1032
1033uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
1034  if (MI->getOpcode() == TargetOpcode::INLINEASM) {
1035    const MachineFunction *MF = MI->getParent()->getParent();
1036    const char *AsmStr = MI->getOperand(0).getSymbolName();
1037    return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1038  }
1039  return MI->getDesc().getSize();
1040}
1041
1042SystemZII::Branch
1043SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
1044  switch (MI->getOpcode()) {
1045  case SystemZ::BR:
1046  case SystemZ::J:
1047  case SystemZ::JG:
1048    return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
1049                             SystemZ::CCMASK_ANY, &MI->getOperand(0));
1050
1051  case SystemZ::BRC:
1052  case SystemZ::BRCL:
1053    return SystemZII::Branch(SystemZII::BranchNormal,
1054                             MI->getOperand(0).getImm(),
1055                             MI->getOperand(1).getImm(), &MI->getOperand(2));
1056
1057  case SystemZ::BRCT:
1058    return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
1059                             SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
1060
1061  case SystemZ::BRCTG:
1062    return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
1063                             SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
1064
1065  case SystemZ::CIJ:
1066  case SystemZ::CRJ:
1067    return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
1068                             MI->getOperand(2).getImm(), &MI->getOperand(3));
1069
1070  case SystemZ::CLIJ:
1071  case SystemZ::CLRJ:
1072    return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
1073                             MI->getOperand(2).getImm(), &MI->getOperand(3));
1074
1075  case SystemZ::CGIJ:
1076  case SystemZ::CGRJ:
1077    return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
1078                             MI->getOperand(2).getImm(), &MI->getOperand(3));
1079
1080  case SystemZ::CLGIJ:
1081  case SystemZ::CLGRJ:
1082    return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
1083                             MI->getOperand(2).getImm(), &MI->getOperand(3));
1084
1085  default:
1086    llvm_unreachable("Unrecognized branch opcode");
1087  }
1088}
1089
1090void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
1091                                           unsigned &LoadOpcode,
1092                                           unsigned &StoreOpcode) const {
1093  if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1094    LoadOpcode = SystemZ::L;
1095    StoreOpcode = SystemZ::ST;
1096  } else if (RC == &SystemZ::GRH32BitRegClass) {
1097    LoadOpcode = SystemZ::LFH;
1098    StoreOpcode = SystemZ::STFH;
1099  } else if (RC == &SystemZ::GRX32BitRegClass) {
1100    LoadOpcode = SystemZ::LMux;
1101    StoreOpcode = SystemZ::STMux;
1102  } else if (RC == &SystemZ::GR64BitRegClass ||
1103             RC == &SystemZ::ADDR64BitRegClass) {
1104    LoadOpcode = SystemZ::LG;
1105    StoreOpcode = SystemZ::STG;
1106  } else if (RC == &SystemZ::GR128BitRegClass ||
1107             RC == &SystemZ::ADDR128BitRegClass) {
1108    LoadOpcode = SystemZ::L128;
1109    StoreOpcode = SystemZ::ST128;
1110  } else if (RC == &SystemZ::FP32BitRegClass) {
1111    LoadOpcode = SystemZ::LE;
1112    StoreOpcode = SystemZ::STE;
1113  } else if (RC == &SystemZ::FP64BitRegClass) {
1114    LoadOpcode = SystemZ::LD;
1115    StoreOpcode = SystemZ::STD;
1116  } else if (RC == &SystemZ::FP128BitRegClass) {
1117    LoadOpcode = SystemZ::LX;
1118    StoreOpcode = SystemZ::STX;
1119  } else
1120    llvm_unreachable("Unsupported regclass to load or store");
1121}
1122
1123unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
1124                                              int64_t Offset) const {
1125  const MCInstrDesc &MCID = get(Opcode);
1126  int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1127  if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1128    // Get the instruction to use for unsigned 12-bit displacements.
1129    int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1130    if (Disp12Opcode >= 0)
1131      return Disp12Opcode;
1132
1133    // All address-related instructions can use unsigned 12-bit
1134    // displacements.
1135    return Opcode;
1136  }
1137  if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1138    // Get the instruction to use for signed 20-bit displacements.
1139    int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1140    if (Disp20Opcode >= 0)
1141      return Disp20Opcode;
1142
1143    // Check whether Opcode allows signed 20-bit displacements.
1144    if (MCID.TSFlags & SystemZII::Has20BitOffset)
1145      return Opcode;
1146  }
1147  return 0;
1148}
1149
1150unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1151  switch (Opcode) {
1152  case SystemZ::L:      return SystemZ::LT;
1153  case SystemZ::LY:     return SystemZ::LT;
1154  case SystemZ::LG:     return SystemZ::LTG;
1155  case SystemZ::LGF:    return SystemZ::LTGF;
1156  case SystemZ::LR:     return SystemZ::LTR;
1157  case SystemZ::LGFR:   return SystemZ::LTGFR;
1158  case SystemZ::LGR:    return SystemZ::LTGR;
1159  case SystemZ::LER:    return SystemZ::LTEBR;
1160  case SystemZ::LDR:    return SystemZ::LTDBR;
1161  case SystemZ::LXR:    return SystemZ::LTXBR;
1162  // On zEC12 we prefer to use RISBGN.  But if there is a chance to
1163  // actually use the condition code, we may turn it back into RISGB.
1164  // Note that RISBG is not really a "load-and-test" instruction,
1165  // but sets the same condition code values, so is OK to use here.
1166  case SystemZ::RISBGN: return SystemZ::RISBG;
1167  default:              return 0;
1168  }
1169}
1170
1171// Return true if Mask matches the regexp 0*1+0*, given that zero masks
1172// have already been filtered out.  Store the first set bit in LSB and
1173// the number of set bits in Length if so.
1174static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
1175  unsigned First = findFirstSet(Mask);
1176  uint64_t Top = (Mask >> First) + 1;
1177  if ((Top & -Top) == Top) {
1178    LSB = First;
1179    Length = findFirstSet(Top);
1180    return true;
1181  }
1182  return false;
1183}
1184
1185bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1186                                   unsigned &Start, unsigned &End) const {
1187  // Reject trivial all-zero masks.
1188  if (Mask == 0)
1189    return false;
1190
1191  // Handle the 1+0+ or 0+1+0* cases.  Start then specifies the index of
1192  // the msb and End specifies the index of the lsb.
1193  unsigned LSB, Length;
1194  if (isStringOfOnes(Mask, LSB, Length)) {
1195    Start = 63 - (LSB + Length - 1);
1196    End = 63 - LSB;
1197    return true;
1198  }
1199
1200  // Handle the wrap-around 1+0+1+ cases.  Start then specifies the msb
1201  // of the low 1s and End specifies the lsb of the high 1s.
1202  if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
1203    assert(LSB > 0 && "Bottom bit must be set");
1204    assert(LSB + Length < BitSize && "Top bit must be set");
1205    Start = 63 - (LSB - 1);
1206    End = 63 - (LSB + Length);
1207    return true;
1208  }
1209
1210  return false;
1211}
1212
1213unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
1214                                               const MachineInstr *MI) const {
1215  switch (Opcode) {
1216  case SystemZ::CR:
1217    return SystemZ::CRJ;
1218  case SystemZ::CGR:
1219    return SystemZ::CGRJ;
1220  case SystemZ::CHI:
1221    return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
1222  case SystemZ::CGHI:
1223    return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
1224  case SystemZ::CLR:
1225    return SystemZ::CLRJ;
1226  case SystemZ::CLGR:
1227    return SystemZ::CLGRJ;
1228  case SystemZ::CLFI:
1229    return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0;
1230  case SystemZ::CLGFI:
1231    return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0;
1232  default:
1233    return 0;
1234  }
1235}
1236
1237void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
1238                                     MachineBasicBlock::iterator MBBI,
1239                                     unsigned Reg, uint64_t Value) const {
1240  DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1241  unsigned Opcode;
1242  if (isInt<16>(Value))
1243    Opcode = SystemZ::LGHI;
1244  else if (SystemZ::isImmLL(Value))
1245    Opcode = SystemZ::LLILL;
1246  else if (SystemZ::isImmLH(Value)) {
1247    Opcode = SystemZ::LLILH;
1248    Value >>= 16;
1249  } else {
1250    assert(isInt<32>(Value) && "Huge values not handled yet");
1251    Opcode = SystemZ::LGFI;
1252  }
1253  BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
1254}
1255