1//===-- MipsSEInstrInfo.cpp - Mips32/64 Instruction Information -----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Mips32/64 implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "MipsSEInstrInfo.h"
15#include "InstPrinter/MipsInstPrinter.h"
16#include "MipsMachineFunction.h"
17#include "MipsTargetMachine.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/Support/CommandLine.h"
22#include "llvm/Support/ErrorHandling.h"
23#include "llvm/Support/TargetRegistry.h"
24
25using namespace llvm;
26
27MipsSEInstrInfo::MipsSEInstrInfo(MipsTargetMachine &tm)
28  : MipsInstrInfo(tm,
29                  tm.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J),
30    RI(*tm.getSubtargetImpl()),
31    IsN64(tm.getSubtarget<MipsSubtarget>().isABI_N64()) {}
32
33const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
34  return RI;
35}
36
37/// isLoadFromStackSlot - If the specified machine instruction is a direct
38/// load from a stack slot, return the virtual or physical register number of
39/// the destination along with the FrameIndex of the loaded stack slot.  If
40/// not, return 0.  This predicate must return 0 if the instruction has
41/// any side effects other than loading from the stack slot.
42unsigned MipsSEInstrInfo::
43isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
44{
45  unsigned Opc = MI->getOpcode();
46
47  if ((Opc == Mips::LW)   || (Opc == Mips::LD)   ||
48      (Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
49    if ((MI->getOperand(1).isFI()) && // is a stack slot
50        (MI->getOperand(2).isImm()) &&  // the imm is zero
51        (isZeroImm(MI->getOperand(2)))) {
52      FrameIndex = MI->getOperand(1).getIndex();
53      return MI->getOperand(0).getReg();
54    }
55  }
56
57  return 0;
58}
59
60/// isStoreToStackSlot - If the specified machine instruction is a direct
61/// store to a stack slot, return the virtual or physical register number of
62/// the source reg along with the FrameIndex of the loaded stack slot.  If
63/// not, return 0.  This predicate must return 0 if the instruction has
64/// any side effects other than storing to the stack slot.
65unsigned MipsSEInstrInfo::
66isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
67{
68  unsigned Opc = MI->getOpcode();
69
70  if ((Opc == Mips::SW)   || (Opc == Mips::SD)   ||
71      (Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
72    if ((MI->getOperand(1).isFI()) && // is a stack slot
73        (MI->getOperand(2).isImm()) &&  // the imm is zero
74        (isZeroImm(MI->getOperand(2)))) {
75      FrameIndex = MI->getOperand(1).getIndex();
76      return MI->getOperand(0).getReg();
77    }
78  }
79  return 0;
80}
81
82void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
83                                  MachineBasicBlock::iterator I, DebugLoc DL,
84                                  unsigned DestReg, unsigned SrcReg,
85                                  bool KillSrc) const {
86  unsigned Opc = 0, ZeroReg = 0;
87  bool isMicroMips = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode();
88
89  if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg.
90    if (Mips::GPR32RegClass.contains(SrcReg)) {
91      if (isMicroMips)
92        Opc = Mips::MOVE16_MM;
93      else
94        Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
95    } else if (Mips::CCRRegClass.contains(SrcReg))
96      Opc = Mips::CFC1;
97    else if (Mips::FGR32RegClass.contains(SrcReg))
98      Opc = Mips::MFC1;
99    else if (Mips::HI32RegClass.contains(SrcReg)) {
100      Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
101      SrcReg = 0;
102    } else if (Mips::LO32RegClass.contains(SrcReg)) {
103      Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO;
104      SrcReg = 0;
105    } else if (Mips::HI32DSPRegClass.contains(SrcReg))
106      Opc = Mips::MFHI_DSP;
107    else if (Mips::LO32DSPRegClass.contains(SrcReg))
108      Opc = Mips::MFLO_DSP;
109    else if (Mips::DSPCCRegClass.contains(SrcReg)) {
110      BuildMI(MBB, I, DL, get(Mips::RDDSP), DestReg).addImm(1 << 4)
111        .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
112      return;
113    }
114    else if (Mips::MSACtrlRegClass.contains(SrcReg))
115      Opc = Mips::CFCMSA;
116  }
117  else if (Mips::GPR32RegClass.contains(SrcReg)) { // Copy from CPU Reg.
118    if (Mips::CCRRegClass.contains(DestReg))
119      Opc = Mips::CTC1;
120    else if (Mips::FGR32RegClass.contains(DestReg))
121      Opc = Mips::MTC1;
122    else if (Mips::HI32RegClass.contains(DestReg))
123      Opc = Mips::MTHI, DestReg = 0;
124    else if (Mips::LO32RegClass.contains(DestReg))
125      Opc = Mips::MTLO, DestReg = 0;
126    else if (Mips::HI32DSPRegClass.contains(DestReg))
127      Opc = Mips::MTHI_DSP;
128    else if (Mips::LO32DSPRegClass.contains(DestReg))
129      Opc = Mips::MTLO_DSP;
130    else if (Mips::DSPCCRegClass.contains(DestReg)) {
131      BuildMI(MBB, I, DL, get(Mips::WRDSP))
132        .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1 << 4)
133        .addReg(DestReg, RegState::ImplicitDefine);
134      return;
135    }
136    else if (Mips::MSACtrlRegClass.contains(DestReg))
137      Opc = Mips::CTCMSA;
138  }
139  else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
140    Opc = Mips::FMOV_S;
141  else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg))
142    Opc = Mips::FMOV_D32;
143  else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
144    Opc = Mips::FMOV_D64;
145  else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg.
146    if (Mips::GPR64RegClass.contains(SrcReg))
147      Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64;
148    else if (Mips::HI64RegClass.contains(SrcReg))
149      Opc = Mips::MFHI64, SrcReg = 0;
150    else if (Mips::LO64RegClass.contains(SrcReg))
151      Opc = Mips::MFLO64, SrcReg = 0;
152    else if (Mips::FGR64RegClass.contains(SrcReg))
153      Opc = Mips::DMFC1;
154  }
155  else if (Mips::GPR64RegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
156    if (Mips::HI64RegClass.contains(DestReg))
157      Opc = Mips::MTHI64, DestReg = 0;
158    else if (Mips::LO64RegClass.contains(DestReg))
159      Opc = Mips::MTLO64, DestReg = 0;
160    else if (Mips::FGR64RegClass.contains(DestReg))
161      Opc = Mips::DMTC1;
162  }
163  else if (Mips::MSA128BRegClass.contains(DestReg)) { // Copy to MSA reg
164    if (Mips::MSA128BRegClass.contains(SrcReg))
165      Opc = Mips::MOVE_V;
166  }
167
168  assert(Opc && "Cannot copy registers");
169
170  MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
171
172  if (DestReg)
173    MIB.addReg(DestReg, RegState::Define);
174
175  if (SrcReg)
176    MIB.addReg(SrcReg, getKillRegState(KillSrc));
177
178  if (ZeroReg)
179    MIB.addReg(ZeroReg);
180}
181
182void MipsSEInstrInfo::
183storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
184                unsigned SrcReg, bool isKill, int FI,
185                const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
186                int64_t Offset) const {
187  DebugLoc DL;
188  if (I != MBB.end()) DL = I->getDebugLoc();
189  MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
190
191  unsigned Opc = 0;
192
193  if (Mips::GPR32RegClass.hasSubClassEq(RC))
194    Opc = Mips::SW;
195  else if (Mips::GPR64RegClass.hasSubClassEq(RC))
196    Opc = Mips::SD;
197  else if (Mips::ACC64RegClass.hasSubClassEq(RC))
198    Opc = Mips::STORE_ACC64;
199  else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
200    Opc = Mips::STORE_ACC64DSP;
201  else if (Mips::ACC128RegClass.hasSubClassEq(RC))
202    Opc = Mips::STORE_ACC128;
203  else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
204    Opc = Mips::STORE_CCOND_DSP;
205  else if (Mips::FGR32RegClass.hasSubClassEq(RC))
206    Opc = Mips::SWC1;
207  else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
208    Opc = Mips::SDC1;
209  else if (Mips::FGR64RegClass.hasSubClassEq(RC))
210    Opc = Mips::SDC164;
211  else if (RC->hasType(MVT::v16i8))
212    Opc = Mips::ST_B;
213  else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16))
214    Opc = Mips::ST_H;
215  else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32))
216    Opc = Mips::ST_W;
217  else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
218    Opc = Mips::ST_D;
219
220  assert(Opc && "Register class not handled!");
221  BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
222    .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
223}
224
225void MipsSEInstrInfo::
226loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
227                 unsigned DestReg, int FI, const TargetRegisterClass *RC,
228                 const TargetRegisterInfo *TRI, int64_t Offset) const {
229  DebugLoc DL;
230  if (I != MBB.end()) DL = I->getDebugLoc();
231  MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
232  unsigned Opc = 0;
233
234  if (Mips::GPR32RegClass.hasSubClassEq(RC))
235    Opc = Mips::LW;
236  else if (Mips::GPR64RegClass.hasSubClassEq(RC))
237    Opc = Mips::LD;
238  else if (Mips::ACC64RegClass.hasSubClassEq(RC))
239    Opc = Mips::LOAD_ACC64;
240  else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
241    Opc = Mips::LOAD_ACC64DSP;
242  else if (Mips::ACC128RegClass.hasSubClassEq(RC))
243    Opc = Mips::LOAD_ACC128;
244  else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
245    Opc = Mips::LOAD_CCOND_DSP;
246  else if (Mips::FGR32RegClass.hasSubClassEq(RC))
247    Opc = Mips::LWC1;
248  else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
249    Opc = Mips::LDC1;
250  else if (Mips::FGR64RegClass.hasSubClassEq(RC))
251    Opc = Mips::LDC164;
252  else if (RC->hasType(MVT::v16i8))
253    Opc = Mips::LD_B;
254  else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16))
255    Opc = Mips::LD_H;
256  else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32))
257    Opc = Mips::LD_W;
258  else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
259    Opc = Mips::LD_D;
260
261  assert(Opc && "Register class not handled!");
262  BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset)
263    .addMemOperand(MMO);
264}
265
266bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
267  MachineBasicBlock &MBB = *MI->getParent();
268  bool isMicroMips = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode();
269  unsigned Opc;
270
271  switch(MI->getDesc().getOpcode()) {
272  default:
273    return false;
274  case Mips::RetRA:
275    expandRetRA(MBB, MI);
276    break;
277  case Mips::PseudoMFHI:
278    Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
279    expandPseudoMFHiLo(MBB, MI, Opc);
280    break;
281  case Mips::PseudoMFLO:
282    Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO;
283    expandPseudoMFHiLo(MBB, MI, Opc);
284    break;
285  case Mips::PseudoMFHI64:
286    expandPseudoMFHiLo(MBB, MI, Mips::MFHI64);
287    break;
288  case Mips::PseudoMFLO64:
289    expandPseudoMFHiLo(MBB, MI, Mips::MFLO64);
290    break;
291  case Mips::PseudoMTLOHI:
292    expandPseudoMTLoHi(MBB, MI, Mips::MTLO, Mips::MTHI, false);
293    break;
294  case Mips::PseudoMTLOHI64:
295    expandPseudoMTLoHi(MBB, MI, Mips::MTLO64, Mips::MTHI64, false);
296    break;
297  case Mips::PseudoMTLOHI_DSP:
298    expandPseudoMTLoHi(MBB, MI, Mips::MTLO_DSP, Mips::MTHI_DSP, true);
299    break;
300  case Mips::PseudoCVT_S_W:
301    expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false);
302    break;
303  case Mips::PseudoCVT_D32_W:
304    expandCvtFPInt(MBB, MI, Mips::CVT_D32_W, Mips::MTC1, false);
305    break;
306  case Mips::PseudoCVT_S_L:
307    expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true);
308    break;
309  case Mips::PseudoCVT_D64_W:
310    expandCvtFPInt(MBB, MI, Mips::CVT_D64_W, Mips::MTC1, true);
311    break;
312  case Mips::PseudoCVT_D64_L:
313    expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true);
314    break;
315  case Mips::BuildPairF64:
316    expandBuildPairF64(MBB, MI, false);
317    break;
318  case Mips::BuildPairF64_64:
319    expandBuildPairF64(MBB, MI, true);
320    break;
321  case Mips::ExtractElementF64:
322    expandExtractElementF64(MBB, MI, false);
323    break;
324  case Mips::ExtractElementF64_64:
325    expandExtractElementF64(MBB, MI, true);
326    break;
327  case Mips::MIPSeh_return32:
328  case Mips::MIPSeh_return64:
329    expandEhReturn(MBB, MI);
330    break;
331  }
332
333  MBB.erase(MI);
334  return true;
335}
336
337/// getOppositeBranchOpc - Return the inverse of the specified
338/// opcode, e.g. turning BEQ to BNE.
339unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
340  switch (Opc) {
341  default:           llvm_unreachable("Illegal opcode!");
342  case Mips::BEQ:    return Mips::BNE;
343  case Mips::BNE:    return Mips::BEQ;
344  case Mips::BGTZ:   return Mips::BLEZ;
345  case Mips::BGEZ:   return Mips::BLTZ;
346  case Mips::BLTZ:   return Mips::BGEZ;
347  case Mips::BLEZ:   return Mips::BGTZ;
348  case Mips::BEQ64:  return Mips::BNE64;
349  case Mips::BNE64:  return Mips::BEQ64;
350  case Mips::BGTZ64: return Mips::BLEZ64;
351  case Mips::BGEZ64: return Mips::BLTZ64;
352  case Mips::BLTZ64: return Mips::BGEZ64;
353  case Mips::BLEZ64: return Mips::BGTZ64;
354  case Mips::BC1T:   return Mips::BC1F;
355  case Mips::BC1F:   return Mips::BC1T;
356  }
357}
358
359/// Adjust SP by Amount bytes.
360void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
361                                     MachineBasicBlock &MBB,
362                                     MachineBasicBlock::iterator I) const {
363  const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
364  DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
365  unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
366  unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
367
368  if (isInt<16>(Amount))// addi sp, sp, amount
369    BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
370  else { // Expand immediate that doesn't fit in 16-bit.
371    unsigned Reg = loadImmediate(Amount, MBB, I, DL, nullptr);
372    BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill);
373  }
374}
375
376/// This function generates the sequence of instructions needed to get the
377/// result of adding register REG and immediate IMM.
378unsigned
379MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
380                               MachineBasicBlock::iterator II, DebugLoc DL,
381                               unsigned *NewImm) const {
382  MipsAnalyzeImmediate AnalyzeImm;
383  const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
384  MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
385  unsigned Size = STI.isABI_N64() ? 64 : 32;
386  unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
387  unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
388  const TargetRegisterClass *RC = STI.isABI_N64() ?
389    &Mips::GPR64RegClass : &Mips::GPR32RegClass;
390  bool LastInstrIsADDiu = NewImm;
391
392  const MipsAnalyzeImmediate::InstSeq &Seq =
393    AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu);
394  MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
395
396  assert(Seq.size() && (!LastInstrIsADDiu || (Seq.size() > 1)));
397
398  // The first instruction can be a LUi, which is different from other
399  // instructions (ADDiu, ORI and SLL) in that it does not have a register
400  // operand.
401  unsigned Reg = RegInfo.createVirtualRegister(RC);
402
403  if (Inst->Opc == LUi)
404    BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd));
405  else
406    BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(ZEROReg)
407      .addImm(SignExtend64<16>(Inst->ImmOpnd));
408
409  // Build the remaining instructions in Seq.
410  for (++Inst; Inst != Seq.end() - LastInstrIsADDiu; ++Inst)
411    BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(Reg, RegState::Kill)
412      .addImm(SignExtend64<16>(Inst->ImmOpnd));
413
414  if (LastInstrIsADDiu)
415    *NewImm = Inst->ImmOpnd;
416
417  return Reg;
418}
419
420unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
421  return (Opc == Mips::BEQ    || Opc == Mips::BNE    || Opc == Mips::BGTZ   ||
422          Opc == Mips::BGEZ   || Opc == Mips::BLTZ   || Opc == Mips::BLEZ   ||
423          Opc == Mips::BEQ64  || Opc == Mips::BNE64  || Opc == Mips::BGTZ64 ||
424          Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
425          Opc == Mips::BC1T   || Opc == Mips::BC1F   || Opc == Mips::B      ||
426          Opc == Mips::J) ?
427         Opc : 0;
428}
429
430void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
431                                  MachineBasicBlock::iterator I) const {
432  const auto &Subtarget = TM.getSubtarget<MipsSubtarget>();
433
434  if (Subtarget.isGP64bit())
435    BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn64))
436        .addReg(Mips::RA_64);
437  else
438    BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn)).addReg(Mips::RA);
439}
440
441std::pair<bool, bool>
442MipsSEInstrInfo::compareOpndSize(unsigned Opc,
443                                 const MachineFunction &MF) const {
444  const MCInstrDesc &Desc = get(Opc);
445  assert(Desc.NumOperands == 2 && "Unary instruction expected.");
446  const MipsRegisterInfo *RI = &getRegisterInfo();
447  unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize();
448  unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize();
449
450  return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
451}
452
453void MipsSEInstrInfo::expandPseudoMFHiLo(MachineBasicBlock &MBB,
454                                         MachineBasicBlock::iterator I,
455                                         unsigned NewOpc) const {
456  BuildMI(MBB, I, I->getDebugLoc(), get(NewOpc), I->getOperand(0).getReg());
457}
458
459void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB,
460                                         MachineBasicBlock::iterator I,
461                                         unsigned LoOpc,
462                                         unsigned HiOpc,
463                                         bool HasExplicitDef) const {
464  // Expand
465  //  lo_hi pseudomtlohi $gpr0, $gpr1
466  // to these two instructions:
467  //  mtlo $gpr0
468  //  mthi $gpr1
469
470  DebugLoc DL = I->getDebugLoc();
471  const MachineOperand &SrcLo = I->getOperand(1), &SrcHi = I->getOperand(2);
472  MachineInstrBuilder LoInst = BuildMI(MBB, I, DL, get(LoOpc));
473  MachineInstrBuilder HiInst = BuildMI(MBB, I, DL, get(HiOpc));
474  LoInst.addReg(SrcLo.getReg(), getKillRegState(SrcLo.isKill()));
475  HiInst.addReg(SrcHi.getReg(), getKillRegState(SrcHi.isKill()));
476
477  // Add lo/hi registers if the mtlo/hi instructions created have explicit
478  // def registers.
479  if (HasExplicitDef) {
480    unsigned DstReg = I->getOperand(0).getReg();
481    unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
482    unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi);
483    LoInst.addReg(DstLo, RegState::Define);
484    HiInst.addReg(DstHi, RegState::Define);
485  }
486}
487
488void MipsSEInstrInfo::expandCvtFPInt(MachineBasicBlock &MBB,
489                                     MachineBasicBlock::iterator I,
490                                     unsigned CvtOpc, unsigned MovOpc,
491                                     bool IsI64) const {
492  const MCInstrDesc &CvtDesc = get(CvtOpc), &MovDesc = get(MovOpc);
493  const MachineOperand &Dst = I->getOperand(0), &Src = I->getOperand(1);
494  unsigned DstReg = Dst.getReg(), SrcReg = Src.getReg(), TmpReg = DstReg;
495  unsigned KillSrc =  getKillRegState(Src.isKill());
496  DebugLoc DL = I->getDebugLoc();
497  bool DstIsLarger, SrcIsLarger;
498
499  std::tie(DstIsLarger, SrcIsLarger) =
500      compareOpndSize(CvtOpc, *MBB.getParent());
501
502  if (DstIsLarger)
503    TmpReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
504
505  if (SrcIsLarger)
506    DstReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
507
508  BuildMI(MBB, I, DL, MovDesc, TmpReg).addReg(SrcReg, KillSrc);
509  BuildMI(MBB, I, DL, CvtDesc, DstReg).addReg(TmpReg, RegState::Kill);
510}
511
512void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
513                                              MachineBasicBlock::iterator I,
514                                              bool FP64) const {
515  unsigned DstReg = I->getOperand(0).getReg();
516  unsigned SrcReg = I->getOperand(1).getReg();
517  unsigned N = I->getOperand(2).getImm();
518  DebugLoc dl = I->getDebugLoc();
519
520  assert(N < 2 && "Invalid immediate");
521  unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
522  unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
523
524  if (SubIdx == Mips::sub_hi && FP64) {
525    // FIXME: The .addReg(SrcReg, RegState::Implicit) is a white lie used to
526    //        temporarily work around a widespread bug in the -mfp64 support.
527    //        The problem is that none of the 32-bit fpu ops mention the fact
528    //        that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
529    //        requires a major overhaul of the FPU implementation which can't
530    //        be done right now due to time constraints.
531    //        MFHC1 is one of two instructions that are affected since they are
532    //        the only instructions that don't read the lower 32-bits.
533    //        We therefore pretend that it reads the bottom 32-bits to
534    //        artificially create a dependency and prevent the scheduler
535    //        changing the behaviour of the code.
536    BuildMI(MBB, I, dl, get(Mips::MFHC1), DstReg).addReg(SubReg).addReg(
537        SrcReg, RegState::Implicit);
538  } else
539    BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg);
540}
541
542void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB,
543                                         MachineBasicBlock::iterator I,
544                                         bool FP64) const {
545  unsigned DstReg = I->getOperand(0).getReg();
546  unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
547  const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
548  DebugLoc dl = I->getDebugLoc();
549  const TargetRegisterInfo &TRI = getRegisterInfo();
550  bool HasMTHC1 = TM.getSubtarget<MipsSubtarget>().hasMips32r2() ||
551                  TM.getSubtarget<MipsSubtarget>().hasMips32r6();
552
553  // When mthc1 is available, use:
554  //   mtc1 Lo, $fp
555  //   mthc1 Hi, $fp
556  //
557  // Otherwise, for FP64:
558  //   spill + reload via ldc1
559  // This has not been implemented since FP64 on MIPS32 and earlier is not
560  // supported.
561  //
562  // Otherwise, for FP32:
563  //   mtc1 Lo, $fp
564  //   mtc1 Hi, $fp + 1
565
566  BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo))
567    .addReg(LoReg);
568
569  if (HasMTHC1 || FP64) {
570    assert(TM.getSubtarget<MipsSubtarget>().hasMips32r2() &&
571           "MTHC1 requires MIPS32r2");
572
573    // FIXME: The .addReg(DstReg) is a white lie used to temporarily work
574    //        around a widespread bug in the -mfp64 support.
575    //        The problem is that none of the 32-bit fpu ops mention the fact
576    //        that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
577    //        requires a major overhaul of the FPU implementation which can't
578    //        be done right now due to time constraints.
579    //        MTHC1 is one of two instructions that are affected since they are
580    //        the only instructions that don't read the lower 32-bits.
581    //        We therefore pretend that it reads the bottom 32-bits to
582    //        artificially create a dependency and prevent the scheduler
583    //        changing the behaviour of the code.
584    BuildMI(MBB, I, dl, get(FP64 ? Mips::MTHC1_D64 : Mips::MTHC1_D32), DstReg)
585        .addReg(DstReg)
586        .addReg(HiReg);
587  } else
588    BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi))
589      .addReg(HiReg);
590}
591
592void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
593                                     MachineBasicBlock::iterator I) const {
594  // This pseudo instruction is generated as part of the lowering of
595  // ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and
596  // indirect jump to TargetReg
597  const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
598  unsigned ADDU = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
599  unsigned SP = STI.isGP64bit() ? Mips::SP_64 : Mips::SP;
600  unsigned RA = STI.isGP64bit() ? Mips::RA_64 : Mips::RA;
601  unsigned T9 = STI.isGP64bit() ? Mips::T9_64 : Mips::T9;
602  unsigned ZERO = STI.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
603  unsigned OffsetReg = I->getOperand(0).getReg();
604  unsigned TargetReg = I->getOperand(1).getReg();
605
606  // addu $ra, $v0, $zero
607  // addu $sp, $sp, $v1
608  // jr   $ra (via RetRA)
609  if (TM.getRelocationModel() == Reloc::PIC_)
610    BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), T9)
611        .addReg(TargetReg).addReg(ZERO);
612  BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), RA)
613      .addReg(TargetReg).addReg(ZERO);
614  BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP)
615      .addReg(SP).addReg(OffsetReg);
616  expandRetRA(MBB, I);
617}
618
619const MipsInstrInfo *llvm::createMipsSEInstrInfo(MipsTargetMachine &TM) {
620  return new MipsSEInstrInfo(TM);
621}
622