SPUInstrInfo.cpp revision 7ea02ffe918baff29a39981276e83b0e845ede03
1//===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Cell SPU implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPURegisterNames.h"
15#include "SPUInstrInfo.h"
16#include "SPUInstrBuilder.h"
17#include "SPUTargetMachine.h"
18#include "SPUGenInstrInfo.inc"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/Support/Streams.h"
21#include "llvm/Support/Debug.h"
22
23using namespace llvm;
24
25namespace {
26  //! Predicate for an unconditional branch instruction
27  inline bool isUncondBranch(const MachineInstr *I) {
28    unsigned opc = I->getOpcode();
29
30    return (opc == SPU::BR
31            || opc == SPU::BRA
32            || opc == SPU::BI);
33  }
34
35  //! Predicate for a conditional branch instruction
36  inline bool isCondBranch(const MachineInstr *I) {
37    unsigned opc = I->getOpcode();
38
39    return (opc == SPU::BRNZr32
40            || opc == SPU::BRNZv4i32
41            || opc == SPU::BRZr32
42            || opc == SPU::BRZv4i32
43            || opc == SPU::BRHNZr16
44            || opc == SPU::BRHNZv8i16
45            || opc == SPU::BRHZr16
46            || opc == SPU::BRHZv8i16);
47  }
48}
49
50SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
51  : TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
52    TM(tm),
53    RI(*TM.getSubtargetImpl(), *this)
54{ /* NOP */ }
55
56bool
57SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
58                          unsigned& sourceReg,
59                          unsigned& destReg,
60                          unsigned& SrcSR, unsigned& DstSR) const {
61  SrcSR = DstSR = 0;  // No sub-registers.
62
63  switch (MI.getOpcode()) {
64  default:
65    break;
66  case SPU::ORIv4i32:
67  case SPU::ORIr32:
68  case SPU::ORHIv8i16:
69  case SPU::ORHIr16:
70  case SPU::ORHIi8i16:
71  case SPU::ORBIv16i8:
72  case SPU::ORBIr8:
73  case SPU::ORIi16i32:
74  case SPU::ORIi8i32:
75  case SPU::AHIvec:
76  case SPU::AHIr16:
77  case SPU::AIv4i32:
78    assert(MI.getNumOperands() == 3 &&
79           MI.getOperand(0).isReg() &&
80           MI.getOperand(1).isReg() &&
81           MI.getOperand(2).isImm() &&
82           "invalid SPU ORI/ORHI/ORBI/AHI/AI/SFI/SFHI instruction!");
83    if (MI.getOperand(2).getImm() == 0) {
84      sourceReg = MI.getOperand(1).getReg();
85      destReg = MI.getOperand(0).getReg();
86      return true;
87    }
88    break;
89  case SPU::AIr32:
90    assert(MI.getNumOperands() == 3 &&
91           "wrong number of operands to AIr32");
92    if (MI.getOperand(0).isReg() &&
93        MI.getOperand(1).isReg() &&
94        (MI.getOperand(2).isImm() &&
95         MI.getOperand(2).getImm() == 0)) {
96      sourceReg = MI.getOperand(1).getReg();
97      destReg = MI.getOperand(0).getReg();
98      return true;
99    }
100    break;
101  case SPU::LRr8:
102  case SPU::LRr16:
103  case SPU::LRr32:
104  case SPU::LRf32:
105  case SPU::LRr64:
106  case SPU::LRf64:
107  case SPU::LRr128:
108  case SPU::LRv16i8:
109  case SPU::LRv8i16:
110  case SPU::LRv4i32:
111  case SPU::LRv4f32:
112  case SPU::LRv2i64:
113  case SPU::LRv2f64:
114  case SPU::ORv16i8_i8:
115  case SPU::ORv8i16_i16:
116  case SPU::ORv4i32_i32:
117  case SPU::ORv2i64_i64:
118  case SPU::ORv4f32_f32:
119  case SPU::ORv2f64_f64:
120  case SPU::ORi8_v16i8:
121  case SPU::ORi16_v8i16:
122  case SPU::ORi32_v4i32:
123  case SPU::ORi64_v2i64:
124  case SPU::ORf32_v4f32:
125  case SPU::ORf64_v2f64:
126/*
127  case SPU::ORi128_r64:
128  case SPU::ORi128_f64:
129  case SPU::ORi128_r32:
130  case SPU::ORi128_f32:
131  case SPU::ORi128_r16:
132  case SPU::ORi128_r8:
133*/
134  case SPU::ORi128_vec:
135/*
136  case SPU::ORr64_i128:
137  case SPU::ORf64_i128:
138  case SPU::ORr32_i128:
139  case SPU::ORf32_i128:
140  case SPU::ORr16_i128:
141  case SPU::ORr8_i128:
142*/
143  case SPU::ORvec_i128:
144/*
145  case SPU::ORr16_r32:
146  case SPU::ORr8_r32:
147  case SPU::ORf32_r32:
148  case SPU::ORr32_f32:
149  case SPU::ORr32_r16:
150  case SPU::ORr32_r8:
151  case SPU::ORr16_r64:
152  case SPU::ORr8_r64:
153  case SPU::ORr64_r16:
154  case SPU::ORr64_r8:
155*/
156  case SPU::ORr64_r32:
157  case SPU::ORr32_r64:
158  case SPU::ORf32_r32:
159  case SPU::ORr32_f32:
160  case SPU::ORf64_r64:
161  case SPU::ORr64_f64: {
162    assert(MI.getNumOperands() == 2 &&
163           MI.getOperand(0).isReg() &&
164           MI.getOperand(1).isReg() &&
165           "invalid SPU OR<type>_<vec> or LR instruction!");
166    if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
167      sourceReg = MI.getOperand(1).getReg();
168      destReg = MI.getOperand(0).getReg();
169      return true;
170    }
171    break;
172  }
173  case SPU::ORv16i8:
174  case SPU::ORv8i16:
175  case SPU::ORv4i32:
176  case SPU::ORv2i64:
177  case SPU::ORr8:
178  case SPU::ORr16:
179  case SPU::ORr32:
180  case SPU::ORr64:
181  case SPU::ORr128:
182  case SPU::ORf32:
183  case SPU::ORf64:
184    assert(MI.getNumOperands() == 3 &&
185           MI.getOperand(0).isReg() &&
186           MI.getOperand(1).isReg() &&
187           MI.getOperand(2).isReg() &&
188           "invalid SPU OR(vec|r32|r64|gprc) instruction!");
189    if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
190      sourceReg = MI.getOperand(1).getReg();
191      destReg = MI.getOperand(0).getReg();
192      return true;
193    }
194    break;
195  }
196
197  return false;
198}
199
200unsigned
201SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
202                                  int &FrameIndex) const {
203  switch (MI->getOpcode()) {
204  default: break;
205  case SPU::LQDv16i8:
206  case SPU::LQDv8i16:
207  case SPU::LQDv4i32:
208  case SPU::LQDv4f32:
209  case SPU::LQDv2f64:
210  case SPU::LQDr128:
211  case SPU::LQDr64:
212  case SPU::LQDr32:
213  case SPU::LQDr16: {
214    const MachineOperand MOp1 = MI->getOperand(1);
215    const MachineOperand MOp2 = MI->getOperand(2);
216    if (MOp1.isImm() && MOp2.isFI()) {
217      FrameIndex = MOp2.getIndex();
218      return MI->getOperand(0).getReg();
219    }
220    break;
221  }
222  }
223  return 0;
224}
225
226unsigned
227SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
228                                 int &FrameIndex) const {
229  switch (MI->getOpcode()) {
230  default: break;
231  case SPU::STQDv16i8:
232  case SPU::STQDv8i16:
233  case SPU::STQDv4i32:
234  case SPU::STQDv4f32:
235  case SPU::STQDv2f64:
236  case SPU::STQDr128:
237  case SPU::STQDr64:
238  case SPU::STQDr32:
239  case SPU::STQDr16:
240  case SPU::STQDr8: {
241    const MachineOperand MOp1 = MI->getOperand(1);
242    const MachineOperand MOp2 = MI->getOperand(2);
243    if (MOp1.isImm() && MOp2.isFI()) {
244      FrameIndex = MOp2.getIndex();
245      return MI->getOperand(0).getReg();
246    }
247    break;
248  }
249  }
250  return 0;
251}
252
253bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
254                                   MachineBasicBlock::iterator MI,
255                                   unsigned DestReg, unsigned SrcReg,
256                                   const TargetRegisterClass *DestRC,
257                                   const TargetRegisterClass *SrcRC) const
258{
259  // We support cross register class moves for our aliases, such as R3 in any
260  // reg class to any other reg class containing R3.  This is required because
261  // we instruction select bitconvert i64 -> f64 as a noop for example, so our
262  // types have no specific meaning.
263
264  DebugLoc DL = DebugLoc::getUnknownLoc();
265  if (MI != MBB.end()) DL = MI->getDebugLoc();
266
267  if (DestRC == SPU::R8CRegisterClass) {
268    BuildMI(MBB, MI, DL, get(SPU::LRr8), DestReg).addReg(SrcReg);
269  } else if (DestRC == SPU::R16CRegisterClass) {
270    BuildMI(MBB, MI, DL, get(SPU::LRr16), DestReg).addReg(SrcReg);
271  } else if (DestRC == SPU::R32CRegisterClass) {
272    BuildMI(MBB, MI, DL, get(SPU::LRr32), DestReg).addReg(SrcReg);
273  } else if (DestRC == SPU::R32FPRegisterClass) {
274    BuildMI(MBB, MI, DL, get(SPU::LRf32), DestReg).addReg(SrcReg);
275  } else if (DestRC == SPU::R64CRegisterClass) {
276    BuildMI(MBB, MI, DL, get(SPU::LRr64), DestReg).addReg(SrcReg);
277  } else if (DestRC == SPU::R64FPRegisterClass) {
278    BuildMI(MBB, MI, DL, get(SPU::LRf64), DestReg).addReg(SrcReg);
279  } else if (DestRC == SPU::GPRCRegisterClass) {
280    BuildMI(MBB, MI, DL, get(SPU::LRr128), DestReg).addReg(SrcReg);
281  } else if (DestRC == SPU::VECREGRegisterClass) {
282    BuildMI(MBB, MI, DL, get(SPU::LRv16i8), DestReg).addReg(SrcReg);
283  } else {
284    // Attempt to copy unknown/unsupported register class!
285    return false;
286  }
287
288  return true;
289}
290
291void
292SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
293                                     MachineBasicBlock::iterator MI,
294                                     unsigned SrcReg, bool isKill, int FrameIdx,
295                                     const TargetRegisterClass *RC) const
296{
297  unsigned opc;
298  bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
299  if (RC == SPU::GPRCRegisterClass) {
300    opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
301  } else if (RC == SPU::R64CRegisterClass) {
302    opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
303  } else if (RC == SPU::R64FPRegisterClass) {
304    opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
305  } else if (RC == SPU::R32CRegisterClass) {
306    opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
307  } else if (RC == SPU::R32FPRegisterClass) {
308    opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
309  } else if (RC == SPU::R16CRegisterClass) {
310    opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
311  } else if (RC == SPU::R8CRegisterClass) {
312    opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
313  } else if (RC == SPU::VECREGRegisterClass) {
314    opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
315  } else {
316    assert(0 && "Unknown regclass!");
317    abort();
318  }
319
320  DebugLoc DL = DebugLoc::getUnknownLoc();
321  if (MI != MBB.end()) DL = MI->getDebugLoc();
322  addFrameReference(BuildMI(MBB, MI, DL, get(opc))
323                    .addReg(SrcReg, false, false, isKill), FrameIdx);
324}
325
326void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
327                                  bool isKill,
328                                  SmallVectorImpl<MachineOperand> &Addr,
329                                  const TargetRegisterClass *RC,
330                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
331  cerr << "storeRegToAddr() invoked!\n";
332  abort();
333
334  if (Addr[0].isFI()) {
335    /* do what storeRegToStackSlot does here */
336  } else {
337    unsigned Opc = 0;
338    if (RC == SPU::GPRCRegisterClass) {
339      /* Opc = PPC::STW; */
340    } else if (RC == SPU::R16CRegisterClass) {
341      /* Opc = PPC::STD; */
342    } else if (RC == SPU::R32CRegisterClass) {
343      /* Opc = PPC::STFD; */
344    } else if (RC == SPU::R32FPRegisterClass) {
345      /* Opc = PPC::STFD; */
346    } else if (RC == SPU::R64FPRegisterClass) {
347      /* Opc = PPC::STFS; */
348    } else if (RC == SPU::VECREGRegisterClass) {
349      /* Opc = PPC::STVX; */
350    } else {
351      assert(0 && "Unknown regclass!");
352      abort();
353    }
354    DebugLoc DL = DebugLoc::getUnknownLoc();
355    MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc))
356      .addReg(SrcReg, false, false, isKill);
357    for (unsigned i = 0, e = Addr.size(); i != e; ++i)
358      MIB.addOperand(Addr[i]);
359    NewMIs.push_back(MIB);
360  }
361}
362
363void
364SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
365                                        MachineBasicBlock::iterator MI,
366                                        unsigned DestReg, int FrameIdx,
367                                        const TargetRegisterClass *RC) const
368{
369  unsigned opc;
370  bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
371  if (RC == SPU::GPRCRegisterClass) {
372    opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
373  } else if (RC == SPU::R64CRegisterClass) {
374    opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
375  } else if (RC == SPU::R64FPRegisterClass) {
376    opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
377  } else if (RC == SPU::R32CRegisterClass) {
378    opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
379  } else if (RC == SPU::R32FPRegisterClass) {
380    opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
381  } else if (RC == SPU::R16CRegisterClass) {
382    opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
383  } else if (RC == SPU::R8CRegisterClass) {
384    opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
385  } else if (RC == SPU::VECREGRegisterClass) {
386    opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
387  } else {
388    assert(0 && "Unknown regclass in loadRegFromStackSlot!");
389    abort();
390  }
391
392  DebugLoc DL = DebugLoc::getUnknownLoc();
393  if (MI != MBB.end()) DL = MI->getDebugLoc();
394  addFrameReference(BuildMI(MBB, MI, DL, get(opc)).addReg(DestReg), FrameIdx);
395}
396
397/*!
398  \note We are really pessimistic here about what kind of a load we're doing.
399 */
400void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
401                                   SmallVectorImpl<MachineOperand> &Addr,
402                                   const TargetRegisterClass *RC,
403                                   SmallVectorImpl<MachineInstr*> &NewMIs)
404    const {
405  cerr << "loadRegToAddr() invoked!\n";
406  abort();
407
408  if (Addr[0].isFI()) {
409    /* do what loadRegFromStackSlot does here... */
410  } else {
411    unsigned Opc = 0;
412    if (RC == SPU::R8CRegisterClass) {
413      /* do brilliance here */
414    } else if (RC == SPU::R16CRegisterClass) {
415      /* Opc = PPC::LWZ; */
416    } else if (RC == SPU::R32CRegisterClass) {
417      /* Opc = PPC::LD; */
418    } else if (RC == SPU::R32FPRegisterClass) {
419      /* Opc = PPC::LFD; */
420    } else if (RC == SPU::R64FPRegisterClass) {
421      /* Opc = PPC::LFS; */
422    } else if (RC == SPU::VECREGRegisterClass) {
423      /* Opc = PPC::LVX; */
424    } else if (RC == SPU::GPRCRegisterClass) {
425      /* Opc = something else! */
426    } else {
427      assert(0 && "Unknown regclass!");
428      abort();
429    }
430    DebugLoc DL = DebugLoc::getUnknownLoc();
431    MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
432    for (unsigned i = 0, e = Addr.size(); i != e; ++i)
433      MIB.addOperand(Addr[i]);
434    NewMIs.push_back(MIB);
435  }
436}
437
438//! Return true if the specified load or store can be folded
439bool
440SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
441                                   const SmallVectorImpl<unsigned> &Ops) const {
442  if (Ops.size() != 1) return false;
443
444  // Make sure this is a reg-reg copy.
445  unsigned Opc = MI->getOpcode();
446
447  switch (Opc) {
448  case SPU::ORv16i8:
449  case SPU::ORv8i16:
450  case SPU::ORv4i32:
451  case SPU::ORv2i64:
452  case SPU::ORr8:
453  case SPU::ORr16:
454  case SPU::ORr32:
455  case SPU::ORr64:
456  case SPU::ORf32:
457  case SPU::ORf64:
458    if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
459      return true;
460    break;
461  }
462
463  return false;
464}
465
466/// foldMemoryOperand - SPU, like PPC, can only fold spills into
467/// copy instructions, turning them into load/store instructions.
468MachineInstr *
469SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
470                                    MachineInstr *MI,
471                                    const SmallVectorImpl<unsigned> &Ops,
472                                    int FrameIndex) const
473{
474  if (Ops.size() != 1) return 0;
475
476  unsigned OpNum = Ops[0];
477  unsigned Opc = MI->getOpcode();
478  MachineInstr *NewMI = 0;
479
480  switch (Opc) {
481  case SPU::ORv16i8:
482  case SPU::ORv8i16:
483  case SPU::ORv4i32:
484  case SPU::ORv2i64:
485  case SPU::ORr8:
486  case SPU::ORr16:
487  case SPU::ORr32:
488  case SPU::ORr64:
489  case SPU::ORf32:
490  case SPU::ORf64:
491    if (OpNum == 0) {  // move -> store
492      unsigned InReg = MI->getOperand(1).getReg();
493      bool isKill = MI->getOperand(1).isKill();
494      if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
495        MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(),
496                                          get(SPU::STQDr32));
497
498        MIB.addReg(InReg, false, false, isKill);
499        NewMI = addFrameReference(MIB, FrameIndex);
500      }
501    } else {           // move -> load
502      unsigned OutReg = MI->getOperand(0).getReg();
503      bool isDead = MI->getOperand(0).isDead();
504      MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc));
505
506      MIB.addReg(OutReg, true, false, false, isDead);
507      Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
508        ? SPU::STQDr32 : SPU::STQXr32;
509      NewMI = addFrameReference(MIB, FrameIndex);
510    break;
511  }
512  }
513
514  return NewMI;
515}
516
517//! Branch analysis
518/*!
519  \note This code was kiped from PPC. There may be more branch analysis for
520  CellSPU than what's currently done here.
521 */
522bool
523SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
524                            MachineBasicBlock *&FBB,
525                            SmallVectorImpl<MachineOperand> &Cond,
526                            bool AllowModify) const {
527  // If the block has no terminators, it just falls into the block after it.
528  MachineBasicBlock::iterator I = MBB.end();
529  if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
530    return false;
531
532  // Get the last instruction in the block.
533  MachineInstr *LastInst = I;
534
535  // If there is only one terminator instruction, process it.
536  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
537    if (isUncondBranch(LastInst)) {
538      TBB = LastInst->getOperand(0).getMBB();
539      return false;
540    } else if (isCondBranch(LastInst)) {
541      // Block ends with fall-through condbranch.
542      TBB = LastInst->getOperand(1).getMBB();
543      DEBUG(cerr << "Pushing LastInst:               ");
544      DEBUG(LastInst->dump());
545      Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
546      Cond.push_back(LastInst->getOperand(0));
547      return false;
548    }
549    // Otherwise, don't know what this is.
550    return true;
551  }
552
553  // Get the instruction before it if it's a terminator.
554  MachineInstr *SecondLastInst = I;
555
556  // If there are three terminators, we don't know what sort of block this is.
557  if (SecondLastInst && I != MBB.begin() &&
558      isUnpredicatedTerminator(--I))
559    return true;
560
561  // If the block ends with a conditional and unconditional branch, handle it.
562  if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
563    TBB =  SecondLastInst->getOperand(1).getMBB();
564    DEBUG(cerr << "Pushing SecondLastInst:         ");
565    DEBUG(SecondLastInst->dump());
566    Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
567    Cond.push_back(SecondLastInst->getOperand(0));
568    FBB = LastInst->getOperand(0).getMBB();
569    return false;
570  }
571
572  // If the block ends with two unconditional branches, handle it.  The second
573  // one is not executed, so remove it.
574  if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
575    TBB = SecondLastInst->getOperand(0).getMBB();
576    I = LastInst;
577    if (AllowModify)
578      I->eraseFromParent();
579    return false;
580  }
581
582  // Otherwise, can't handle this.
583  return true;
584}
585
586unsigned
587SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
588  MachineBasicBlock::iterator I = MBB.end();
589  if (I == MBB.begin())
590    return 0;
591  --I;
592  if (!isCondBranch(I) && !isUncondBranch(I))
593    return 0;
594
595  // Remove the first branch.
596  DEBUG(cerr << "Removing branch:                ");
597  DEBUG(I->dump());
598  I->eraseFromParent();
599  I = MBB.end();
600  if (I == MBB.begin())
601    return 1;
602
603  --I;
604  if (!(isCondBranch(I) || isUncondBranch(I)))
605    return 1;
606
607  // Remove the second branch.
608  DEBUG(cerr << "Removing second branch:         ");
609  DEBUG(I->dump());
610  I->eraseFromParent();
611  return 2;
612}
613
614unsigned
615SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
616                           MachineBasicBlock *FBB,
617                           const SmallVectorImpl<MachineOperand> &Cond) const {
618  // FIXME this should probably have a DebugLoc argument
619  DebugLoc dl = DebugLoc::getUnknownLoc();
620  // Shouldn't be a fall through.
621  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
622  assert((Cond.size() == 2 || Cond.size() == 0) &&
623         "SPU branch conditions have two components!");
624
625  // One-way branch.
626  if (FBB == 0) {
627    if (Cond.empty()) {
628      // Unconditional branch
629      MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(SPU::BR));
630      MIB.addMBB(TBB);
631
632      DEBUG(cerr << "Inserted one-way uncond branch: ");
633      DEBUG((*MIB).dump());
634    } else {
635      // Conditional branch
636      MachineInstrBuilder  MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
637      MIB.addReg(Cond[1].getReg()).addMBB(TBB);
638
639      DEBUG(cerr << "Inserted one-way cond branch:   ");
640      DEBUG((*MIB).dump());
641    }
642    return 1;
643  } else {
644    MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
645    MachineInstrBuilder MIB2 = BuildMI(&MBB, dl, get(SPU::BR));
646
647    // Two-way Conditional Branch.
648    MIB.addReg(Cond[1].getReg()).addMBB(TBB);
649    MIB2.addMBB(FBB);
650
651    DEBUG(cerr << "Inserted conditional branch:    ");
652    DEBUG((*MIB).dump());
653    DEBUG(cerr << "part 2: ");
654    DEBUG((*MIB2).dump());
655   return 2;
656  }
657}
658
659bool
660SPUInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
661  return (!MBB.empty() && isUncondBranch(&MBB.back()));
662}
663//! Reverses a branch's condition, returning false on success.
664bool
665SPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
666  const {
667  // Pretty brainless way of inverting the condition, but it works, considering
668  // there are only two conditions...
669  static struct {
670    unsigned Opc;               //! The incoming opcode
671    unsigned RevCondOpc;        //! The reversed condition opcode
672  } revconds[] = {
673    { SPU::BRNZr32, SPU::BRZr32 },
674    { SPU::BRNZv4i32, SPU::BRZv4i32 },
675    { SPU::BRZr32, SPU::BRNZr32 },
676    { SPU::BRZv4i32, SPU::BRNZv4i32 },
677    { SPU::BRHNZr16, SPU::BRHZr16 },
678    { SPU::BRHNZv8i16, SPU::BRHZv8i16 },
679    { SPU::BRHZr16, SPU::BRHNZr16 },
680    { SPU::BRHZv8i16, SPU::BRHNZv8i16 }
681  };
682
683  unsigned Opc = unsigned(Cond[0].getImm());
684  // Pretty dull mapping between the two conditions that SPU can generate:
685  for (int i = sizeof(revconds)/sizeof(revconds[0]) - 1; i >= 0; --i) {
686    if (revconds[i].Opc == Opc) {
687      Cond[0].setImm(revconds[i].RevCondOpc);
688      return false;
689    }
690  }
691
692  return true;
693}
694