SPUInstrInfo.cpp revision c9c8b2a804b2cd3d33a6a965e06a21ff93968f97
1//===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Cell SPU implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPURegisterNames.h"
15#include "SPUInstrInfo.h"
16#include "SPUInstrBuilder.h"
17#include "SPUTargetMachine.h"
18#include "SPUGenInstrInfo.inc"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/Support/Streams.h"
21#include "llvm/Support/Debug.h"
22
23using namespace llvm;
24
25namespace {
26  //! Predicate for an unconditional branch instruction
27  inline bool isUncondBranch(const MachineInstr *I) {
28    unsigned opc = I->getOpcode();
29
30    return (opc == SPU::BR
31	    || opc == SPU::BRA
32	    || opc == SPU::BI);
33  }
34
35  //! Predicate for a conditional branch instruction
36  inline bool isCondBranch(const MachineInstr *I) {
37    unsigned opc = I->getOpcode();
38
39    return (opc == SPU::BRNZr32
40            || opc == SPU::BRNZv4i32
41	    || opc == SPU::BRZr32
42	    || opc == SPU::BRZv4i32
43	    || opc == SPU::BRHNZr16
44	    || opc == SPU::BRHNZv8i16
45	    || opc == SPU::BRHZr16
46	    || opc == SPU::BRHZv8i16);
47  }
48}
49
50SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
51  : TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
52    TM(tm),
53    RI(*TM.getSubtargetImpl(), *this)
54{ /* NOP */ }
55
56/// getPointerRegClass - Return the register class to use to hold pointers.
57/// This is used for addressing modes.
58const TargetRegisterClass *
59SPUInstrInfo::getPointerRegClass() const
60{
61  return &SPU::R32CRegClass;
62}
63
64bool
65SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
66                          unsigned& sourceReg,
67                          unsigned& destReg,
68                          unsigned& SrcSR, unsigned& DstSR) const {
69  SrcSR = DstSR = 0;  // No sub-registers.
70
71  // Primarily, ORI and OR are generated by copyRegToReg. But, there are other
72  // cases where we can safely say that what's being done is really a move
73  // (see how PowerPC does this -- it's the model for this code too.)
74  switch (MI.getOpcode()) {
75  default:
76    break;
77  case SPU::ORIv4i32:
78  case SPU::ORIr32:
79  case SPU::ORHIv8i16:
80  case SPU::ORHIr16:
81  case SPU::ORHIi8i16:
82  case SPU::ORBIv16i8:
83  case SPU::ORBIr8:
84  case SPU::ORIi16i32:
85  case SPU::ORIi8i32:
86  case SPU::AHIvec:
87  case SPU::AHIr16:
88  case SPU::AIv4i32:
89    assert(MI.getNumOperands() == 3 &&
90           MI.getOperand(0).isReg() &&
91           MI.getOperand(1).isReg() &&
92           MI.getOperand(2).isImm() &&
93           "invalid SPU ORI/ORHI/ORBI/AHI/AI/SFI/SFHI instruction!");
94    if (MI.getOperand(2).getImm() == 0) {
95      sourceReg = MI.getOperand(1).getReg();
96      destReg = MI.getOperand(0).getReg();
97      return true;
98    }
99    break;
100  case SPU::AIr32:
101    assert(MI.getNumOperands() == 3 &&
102           "wrong number of operands to AIr32");
103    if (MI.getOperand(0).isReg() &&
104        MI.getOperand(1).isReg() &&
105        (MI.getOperand(2).isImm() &&
106         MI.getOperand(2).getImm() == 0)) {
107      sourceReg = MI.getOperand(1).getReg();
108      destReg = MI.getOperand(0).getReg();
109      return true;
110    }
111    break;
112  case SPU::LRr8:
113  case SPU::LRr16:
114  case SPU::LRr32:
115  case SPU::LRf32:
116  case SPU::LRr64:
117  case SPU::LRf64:
118  case SPU::LRr128:
119  case SPU::LRv16i8:
120  case SPU::LRv8i16:
121  case SPU::LRv4i32:
122  case SPU::LRv4f32:
123  case SPU::LRv2i64:
124  case SPU::LRv2f64:
125  case SPU::ORv16i8_i8:
126  case SPU::ORv8i16_i16:
127  case SPU::ORv4i32_i32:
128  case SPU::ORv2i64_i64:
129  case SPU::ORv4f32_f32:
130  case SPU::ORv2f64_f64:
131  case SPU::ORi8_v16i8:
132  case SPU::ORi16_v8i16:
133  case SPU::ORi32_v4i32:
134  case SPU::ORi64_v2i64:
135  case SPU::ORf32_v4f32:
136  case SPU::ORf64_v2f64:
137/*
138  case SPU::ORi128_r64:
139  case SPU::ORi128_f64:
140  case SPU::ORi128_r32:
141  case SPU::ORi128_f32:
142  case SPU::ORi128_r16:
143  case SPU::ORi128_r8:
144  case SPU::ORi128_vec:
145  case SPU::ORr64_i128:
146  case SPU::ORf64_i128:
147  case SPU::ORr32_i128:
148  case SPU::ORf32_i128:
149  case SPU::ORr16_i128:
150  case SPU::ORr8_i128:
151  case SPU::ORvec_i128:
152*/
153/*
154  case SPU::ORr16_r32:
155  case SPU::ORr8_r32:
156  case SPU::ORr32_r16:
157  case SPU::ORr32_r8:
158  case SPU::ORr16_r64:
159  case SPU::ORr8_r64:
160  case SPU::ORr64_r16:
161  case SPU::ORr64_r8:
162*/
163  case SPU::ORr64_r32:
164  case SPU::ORr32_r64:
165  case SPU::ORf32_r32:
166  case SPU::ORr32_f32:
167  case SPU::ORf64_r64:
168  case SPU::ORr64_f64: {
169    assert(MI.getNumOperands() == 2 &&
170           MI.getOperand(0).isReg() &&
171           MI.getOperand(1).isReg() &&
172           "invalid SPU OR<type>_<vec> or LR instruction!");
173    if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
174      sourceReg = MI.getOperand(0).getReg();
175      destReg = MI.getOperand(0).getReg();
176      return true;
177    }
178    break;
179  }
180  case SPU::ORv16i8:
181  case SPU::ORv8i16:
182  case SPU::ORv4i32:
183  case SPU::ORv2i64:
184  case SPU::ORr8:
185  case SPU::ORr16:
186  case SPU::ORr32:
187  case SPU::ORr64:
188  case SPU::ORf32:
189  case SPU::ORf64:
190    assert(MI.getNumOperands() == 3 &&
191           MI.getOperand(0).isReg() &&
192           MI.getOperand(1).isReg() &&
193           MI.getOperand(2).isReg() &&
194           "invalid SPU OR(vec|r32|r64|gprc) instruction!");
195    if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
196      sourceReg = MI.getOperand(1).getReg();
197      destReg = MI.getOperand(0).getReg();
198      return true;
199    }
200    break;
201  }
202
203  return false;
204}
205
206unsigned
207SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
208                                  int &FrameIndex) const {
209  switch (MI->getOpcode()) {
210  default: break;
211  case SPU::LQDv16i8:
212  case SPU::LQDv8i16:
213  case SPU::LQDv4i32:
214  case SPU::LQDv4f32:
215  case SPU::LQDv2f64:
216  case SPU::LQDr128:
217  case SPU::LQDr64:
218  case SPU::LQDr32:
219  case SPU::LQDr16: {
220    const MachineOperand MOp1 = MI->getOperand(1);
221    const MachineOperand MOp2 = MI->getOperand(2);
222    if (MOp1.isImm() && MOp2.isFI()) {
223      FrameIndex = MOp2.getIndex();
224      return MI->getOperand(0).getReg();
225    }
226    break;
227  }
228  }
229  return 0;
230}
231
232unsigned
233SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
234                                 int &FrameIndex) const {
235  switch (MI->getOpcode()) {
236  default: break;
237  case SPU::STQDv16i8:
238  case SPU::STQDv8i16:
239  case SPU::STQDv4i32:
240  case SPU::STQDv4f32:
241  case SPU::STQDv2f64:
242  case SPU::STQDr128:
243  case SPU::STQDr64:
244  case SPU::STQDr32:
245  case SPU::STQDr16:
246  case SPU::STQDr8: {
247    const MachineOperand MOp1 = MI->getOperand(1);
248    const MachineOperand MOp2 = MI->getOperand(2);
249    if (MOp1.isImm() && MOp2.isFI()) {
250      FrameIndex = MOp2.getIndex();
251      return MI->getOperand(0).getReg();
252    }
253    break;
254  }
255  }
256  return 0;
257}
258
259bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
260                                   MachineBasicBlock::iterator MI,
261                                   unsigned DestReg, unsigned SrcReg,
262                                   const TargetRegisterClass *DestRC,
263                                   const TargetRegisterClass *SrcRC) const
264{
265  // We support cross register class moves for our aliases, such as R3 in any
266  // reg class to any other reg class containing R3.  This is required because
267  // we instruction select bitconvert i64 -> f64 as a noop for example, so our
268  // types have no specific meaning.
269
270  if (DestRC == SPU::R8CRegisterClass) {
271    BuildMI(MBB, MI, get(SPU::LRr8), DestReg).addReg(SrcReg);
272  } else if (DestRC == SPU::R16CRegisterClass) {
273    BuildMI(MBB, MI, get(SPU::LRr16), DestReg).addReg(SrcReg);
274  } else if (DestRC == SPU::R32CRegisterClass) {
275    BuildMI(MBB, MI, get(SPU::LRr32), DestReg).addReg(SrcReg);
276  } else if (DestRC == SPU::R32FPRegisterClass) {
277    BuildMI(MBB, MI, get(SPU::LRf32), DestReg).addReg(SrcReg);
278  } else if (DestRC == SPU::R64CRegisterClass) {
279    BuildMI(MBB, MI, get(SPU::LRr64), DestReg).addReg(SrcReg);
280  } else if (DestRC == SPU::R64FPRegisterClass) {
281    BuildMI(MBB, MI, get(SPU::LRf64), DestReg).addReg(SrcReg);
282  } else if (DestRC == SPU::GPRCRegisterClass) {
283    BuildMI(MBB, MI, get(SPU::LRr128), DestReg).addReg(SrcReg);
284  } else if (DestRC == SPU::VECREGRegisterClass) {
285    BuildMI(MBB, MI, get(SPU::LRv16i8), DestReg).addReg(SrcReg);
286  } else {
287    // Attempt to copy unknown/unsupported register class!
288    return false;
289  }
290
291  return true;
292}
293
294void
295SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
296                                     MachineBasicBlock::iterator MI,
297                                     unsigned SrcReg, bool isKill, int FrameIdx,
298                                     const TargetRegisterClass *RC) const
299{
300  unsigned opc;
301  bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
302  if (RC == SPU::GPRCRegisterClass) {
303    opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
304  } else if (RC == SPU::R64CRegisterClass) {
305    opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
306  } else if (RC == SPU::R64FPRegisterClass) {
307    opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
308  } else if (RC == SPU::R32CRegisterClass) {
309    opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
310  } else if (RC == SPU::R32FPRegisterClass) {
311    opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
312  } else if (RC == SPU::R16CRegisterClass) {
313    opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
314  } else if (RC == SPU::R8CRegisterClass) {
315    opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
316  } else if (RC == SPU::VECREGRegisterClass) {
317    opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
318  } else {
319    assert(0 && "Unknown regclass!");
320    abort();
321  }
322
323  addFrameReference(BuildMI(MBB, MI, get(opc))
324                    .addReg(SrcReg, false, false, isKill), FrameIdx);
325}
326
327void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
328                                     bool isKill,
329                                     SmallVectorImpl<MachineOperand> &Addr,
330                                     const TargetRegisterClass *RC,
331                                     SmallVectorImpl<MachineInstr*> &NewMIs) const {
332  cerr << "storeRegToAddr() invoked!\n";
333  abort();
334
335  if (Addr[0].isFI()) {
336    /* do what storeRegToStackSlot does here */
337  } else {
338    unsigned Opc = 0;
339    if (RC == SPU::GPRCRegisterClass) {
340      /* Opc = PPC::STW; */
341    } else if (RC == SPU::R16CRegisterClass) {
342      /* Opc = PPC::STD; */
343    } else if (RC == SPU::R32CRegisterClass) {
344      /* Opc = PPC::STFD; */
345    } else if (RC == SPU::R32FPRegisterClass) {
346      /* Opc = PPC::STFD; */
347    } else if (RC == SPU::R64FPRegisterClass) {
348      /* Opc = PPC::STFS; */
349    } else if (RC == SPU::VECREGRegisterClass) {
350      /* Opc = PPC::STVX; */
351    } else {
352      assert(0 && "Unknown regclass!");
353      abort();
354    }
355    MachineInstrBuilder MIB = BuildMI(MF, get(Opc))
356      .addReg(SrcReg, false, false, isKill);
357    for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
358      MachineOperand &MO = Addr[i];
359      if (MO.isReg())
360        MIB.addReg(MO.getReg());
361      else if (MO.isImm())
362        MIB.addImm(MO.getImm());
363      else
364        MIB.addFrameIndex(MO.getIndex());
365    }
366    NewMIs.push_back(MIB);
367  }
368}
369
370void
371SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
372                                        MachineBasicBlock::iterator MI,
373                                        unsigned DestReg, int FrameIdx,
374                                        const TargetRegisterClass *RC) const
375{
376  unsigned opc;
377  bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
378  if (RC == SPU::GPRCRegisterClass) {
379    opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
380  } else if (RC == SPU::R64CRegisterClass) {
381    opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
382  } else if (RC == SPU::R64FPRegisterClass) {
383    opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
384  } else if (RC == SPU::R32CRegisterClass) {
385    opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
386  } else if (RC == SPU::R32FPRegisterClass) {
387    opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
388  } else if (RC == SPU::R16CRegisterClass) {
389    opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
390  } else if (RC == SPU::R8CRegisterClass) {
391    opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
392  } else if (RC == SPU::VECREGRegisterClass) {
393    opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
394  } else {
395    assert(0 && "Unknown regclass in loadRegFromStackSlot!");
396    abort();
397  }
398
399  addFrameReference(BuildMI(MBB, MI, get(opc)).addReg(DestReg), FrameIdx);
400}
401
402/*!
403  \note We are really pessimistic here about what kind of a load we're doing.
404 */
405void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
406                                   SmallVectorImpl<MachineOperand> &Addr,
407                                   const TargetRegisterClass *RC,
408                                   SmallVectorImpl<MachineInstr*> &NewMIs)
409    const {
410  cerr << "loadRegToAddr() invoked!\n";
411  abort();
412
413  if (Addr[0].isFI()) {
414    /* do what loadRegFromStackSlot does here... */
415  } else {
416    unsigned Opc = 0;
417    if (RC == SPU::R8CRegisterClass) {
418      /* do brilliance here */
419    } else if (RC == SPU::R16CRegisterClass) {
420      /* Opc = PPC::LWZ; */
421    } else if (RC == SPU::R32CRegisterClass) {
422      /* Opc = PPC::LD; */
423    } else if (RC == SPU::R32FPRegisterClass) {
424      /* Opc = PPC::LFD; */
425    } else if (RC == SPU::R64FPRegisterClass) {
426      /* Opc = PPC::LFS; */
427    } else if (RC == SPU::VECREGRegisterClass) {
428      /* Opc = PPC::LVX; */
429    } else if (RC == SPU::GPRCRegisterClass) {
430      /* Opc = something else! */
431    } else {
432      assert(0 && "Unknown regclass!");
433      abort();
434    }
435    MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
436    for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
437      MachineOperand &MO = Addr[i];
438      if (MO.isReg())
439        MIB.addReg(MO.getReg());
440      else if (MO.isImm())
441        MIB.addImm(MO.getImm());
442      else
443        MIB.addFrameIndex(MO.getIndex());
444    }
445    NewMIs.push_back(MIB);
446  }
447}
448
449//! Return true if the specified load or store can be folded
450bool
451SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
452                                   const SmallVectorImpl<unsigned> &Ops) const {
453  if (Ops.size() != 1) return false;
454
455  // Make sure this is a reg-reg copy.
456  unsigned Opc = MI->getOpcode();
457
458  switch (Opc) {
459  case SPU::ORv16i8:
460  case SPU::ORv8i16:
461  case SPU::ORv4i32:
462  case SPU::ORv2i64:
463  case SPU::ORr8:
464  case SPU::ORr16:
465  case SPU::ORr32:
466  case SPU::ORr64:
467  case SPU::ORf32:
468  case SPU::ORf64:
469    if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
470      return true;
471    break;
472  }
473
474  return false;
475}
476
477/// foldMemoryOperand - SPU, like PPC, can only fold spills into
478/// copy instructions, turning them into load/store instructions.
479MachineInstr *
480SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
481                                    MachineInstr *MI,
482                                    const SmallVectorImpl<unsigned> &Ops,
483                                    int FrameIndex) const
484{
485  if (Ops.size() != 1) return 0;
486
487  unsigned OpNum = Ops[0];
488  unsigned Opc = MI->getOpcode();
489  MachineInstr *NewMI = 0;
490
491  switch (Opc) {
492  case SPU::ORv16i8:
493  case SPU::ORv8i16:
494  case SPU::ORv4i32:
495  case SPU::ORv2i64:
496  case SPU::ORr8:
497  case SPU::ORr16:
498  case SPU::ORr32:
499  case SPU::ORr64:
500  case SPU::ORf32:
501  case SPU::ORf64:
502    if (OpNum == 0) {  // move -> store
503      unsigned InReg = MI->getOperand(1).getReg();
504      bool isKill = MI->getOperand(1).isKill();
505      if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
506        MachineInstrBuilder MIB = BuildMI(MF, get(SPU::STQDr32));
507
508        MIB.addReg(InReg, false, false, isKill);
509        NewMI = addFrameReference(MIB, FrameIndex);
510      }
511    } else {           // move -> load
512      unsigned OutReg = MI->getOperand(0).getReg();
513      bool isDead = MI->getOperand(0).isDead();
514      MachineInstrBuilder MIB = BuildMI(MF, get(Opc));
515
516      MIB.addReg(OutReg, true, false, false, isDead);
517      Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
518        ? SPU::STQDr32 : SPU::STQXr32;
519      NewMI = addFrameReference(MIB, FrameIndex);
520    break;
521  }
522  }
523
524  return NewMI;
525}
526
527//! Branch analysis
528/*!
529  \note This code was kiped from PPC. There may be more branch analysis for
530  CellSPU than what's currently done here.
531 */
532bool
533SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
534			    MachineBasicBlock *&FBB,
535			    SmallVectorImpl<MachineOperand> &Cond) const {
536  // If the block has no terminators, it just falls into the block after it.
537  MachineBasicBlock::iterator I = MBB.end();
538  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
539    return false;
540
541  // Get the last instruction in the block.
542  MachineInstr *LastInst = I;
543
544  // If there is only one terminator instruction, process it.
545  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
546    if (isUncondBranch(LastInst)) {
547      TBB = LastInst->getOperand(0).getMBB();
548      return false;
549    } else if (isCondBranch(LastInst)) {
550      // Block ends with fall-through condbranch.
551      TBB = LastInst->getOperand(1).getMBB();
552      DEBUG(cerr << "Pushing LastInst:               ");
553      DEBUG(LastInst->dump());
554      Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
555      Cond.push_back(LastInst->getOperand(0));
556      return false;
557    }
558    // Otherwise, don't know what this is.
559    return true;
560  }
561
562  // Get the instruction before it if it's a terminator.
563  MachineInstr *SecondLastInst = I;
564
565  // If there are three terminators, we don't know what sort of block this is.
566  if (SecondLastInst && I != MBB.begin() &&
567      isUnpredicatedTerminator(--I))
568    return true;
569
570  // If the block ends with a conditional and unconditional branch, handle it.
571  if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
572    TBB =  SecondLastInst->getOperand(1).getMBB();
573    DEBUG(cerr << "Pushing SecondLastInst:         ");
574    DEBUG(SecondLastInst->dump());
575    Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
576    Cond.push_back(SecondLastInst->getOperand(0));
577    FBB = LastInst->getOperand(0).getMBB();
578    return false;
579  }
580
581  // If the block ends with two unconditional branches, handle it.  The second
582  // one is not executed, so remove it.
583  if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
584    TBB = SecondLastInst->getOperand(0).getMBB();
585    I = LastInst;
586    I->eraseFromParent();
587    return false;
588  }
589
590  // Otherwise, can't handle this.
591  return true;
592}
593
594unsigned
595SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
596  MachineBasicBlock::iterator I = MBB.end();
597  if (I == MBB.begin())
598    return 0;
599  --I;
600  if (!isCondBranch(I) && !isUncondBranch(I))
601    return 0;
602
603  // Remove the first branch.
604  DEBUG(cerr << "Removing branch:                ");
605  DEBUG(I->dump());
606  I->eraseFromParent();
607  I = MBB.end();
608  if (I == MBB.begin())
609    return 1;
610
611  --I;
612  if (!(isCondBranch(I) || isUncondBranch(I)))
613    return 1;
614
615  // Remove the second branch.
616  DEBUG(cerr << "Removing second branch:         ");
617  DEBUG(I->dump());
618  I->eraseFromParent();
619  return 2;
620}
621
622unsigned
623SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
624			   MachineBasicBlock *FBB,
625			   const SmallVectorImpl<MachineOperand> &Cond) const {
626  // Shouldn't be a fall through.
627  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
628  assert((Cond.size() == 2 || Cond.size() == 0) &&
629         "SPU branch conditions have two components!");
630
631  // One-way branch.
632  if (FBB == 0) {
633    if (Cond.empty()) {
634      // Unconditional branch
635      MachineInstrBuilder MIB = BuildMI(&MBB, get(SPU::BR));
636      MIB.addMBB(TBB);
637
638      DEBUG(cerr << "Inserted one-way uncond branch: ");
639      DEBUG((*MIB).dump());
640    } else {
641      // Conditional branch
642      MachineInstrBuilder  MIB = BuildMI(&MBB, get(Cond[0].getImm()));
643      MIB.addReg(Cond[1].getReg()).addMBB(TBB);
644
645      DEBUG(cerr << "Inserted one-way cond branch:   ");
646      DEBUG((*MIB).dump());
647    }
648    return 1;
649  } else {
650    MachineInstrBuilder MIB = BuildMI(&MBB, get(Cond[0].getImm()));
651    MachineInstrBuilder MIB2 = BuildMI(&MBB, get(SPU::BR));
652
653    // Two-way Conditional Branch.
654    MIB.addReg(Cond[1].getReg()).addMBB(TBB);
655    MIB2.addMBB(FBB);
656
657    DEBUG(cerr << "Inserted conditional branch:    ");
658    DEBUG((*MIB).dump());
659    DEBUG(cerr << "part 2: ");
660    DEBUG((*MIB2).dump());
661   return 2;
662  }
663}
664
665bool
666SPUInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
667  return (!MBB.empty() && isUncondBranch(&MBB.back()));
668}
669//! Reverses a branch's condition, returning false on success.
670bool
671SPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
672  const {
673  // Pretty brainless way of inverting the condition, but it works, considering
674  // there are only two conditions...
675  static struct {
676    unsigned Opc;               //! The incoming opcode
677    unsigned RevCondOpc;        //! The reversed condition opcode
678  } revconds[] = {
679    { SPU::BRNZr32, SPU::BRZr32 },
680    { SPU::BRNZv4i32, SPU::BRZv4i32 },
681    { SPU::BRZr32, SPU::BRNZr32 },
682    { SPU::BRZv4i32, SPU::BRNZv4i32 },
683    { SPU::BRHNZr16, SPU::BRHZr16 },
684    { SPU::BRHNZv8i16, SPU::BRHZv8i16 },
685    { SPU::BRHZr16, SPU::BRHNZr16 },
686    { SPU::BRHZv8i16, SPU::BRHNZv8i16 }
687  };
688
689  unsigned Opc = unsigned(Cond[0].getImm());
690  // Pretty dull mapping between the two conditions that SPU can generate:
691  for (int i = sizeof(revconds)/sizeof(revconds[0]) - 1; i >= 0; --i) {
692    if (revconds[i].Opc == Opc) {
693      Cond[0].setImm(revconds[i].RevCondOpc);
694      return false;
695    }
696  }
697
698  return true;
699}
700