SPUInstrInfo.cpp revision 2320a44b903fa0fc3f05cf91f89dc0254923a736
1//===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the Cell SPU implementation of the TargetInstrInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPURegisterNames.h"
15#include "SPUInstrInfo.h"
16#include "SPUInstrBuilder.h"
17#include "SPUTargetMachine.h"
18#include "SPUGenInstrInfo.inc"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/Support/Debug.h"
21#include "llvm/Support/ErrorHandling.h"
22#include "llvm/Support/raw_ostream.h"
23
24using namespace llvm;
25
26namespace {
27  //! Predicate for an unconditional branch instruction
28  inline bool isUncondBranch(const MachineInstr *I) {
29    unsigned opc = I->getOpcode();
30
31    return (opc == SPU::BR
32            || opc == SPU::BRA
33            || opc == SPU::BI);
34  }
35
36  //! Predicate for a conditional branch instruction
37  inline bool isCondBranch(const MachineInstr *I) {
38    unsigned opc = I->getOpcode();
39
40    return (opc == SPU::BRNZr32
41            || opc == SPU::BRNZv4i32
42            || opc == SPU::BRZr32
43            || opc == SPU::BRZv4i32
44            || opc == SPU::BRHNZr16
45            || opc == SPU::BRHNZv8i16
46            || opc == SPU::BRHZr16
47            || opc == SPU::BRHZv8i16);
48  }
49}
50
51SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
52  : TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
53    TM(tm),
54    RI(*TM.getSubtargetImpl(), *this)
55{ /* NOP */ }
56
57bool
58SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
59                          unsigned& sourceReg,
60                          unsigned& destReg,
61                          unsigned& SrcSR, unsigned& DstSR) const {
62  SrcSR = DstSR = 0;  // No sub-registers.
63
64  switch (MI.getOpcode()) {
65  default:
66    break;
67  case SPU::ORIv4i32:
68  case SPU::ORIr32:
69  case SPU::ORHIv8i16:
70  case SPU::ORHIr16:
71  case SPU::ORHIi8i16:
72  case SPU::ORBIv16i8:
73  case SPU::ORBIr8:
74  case SPU::ORIi16i32:
75  case SPU::ORIi8i32:
76  case SPU::AHIvec:
77  case SPU::AHIr16:
78  case SPU::AIv4i32:
79    assert(MI.getNumOperands() == 3 &&
80           MI.getOperand(0).isReg() &&
81           MI.getOperand(1).isReg() &&
82           MI.getOperand(2).isImm() &&
83           "invalid SPU ORI/ORHI/ORBI/AHI/AI/SFI/SFHI instruction!");
84    if (MI.getOperand(2).getImm() == 0) {
85      sourceReg = MI.getOperand(1).getReg();
86      destReg = MI.getOperand(0).getReg();
87      return true;
88    }
89    break;
90  case SPU::AIr32:
91    assert(MI.getNumOperands() == 3 &&
92           "wrong number of operands to AIr32");
93    if (MI.getOperand(0).isReg() &&
94        MI.getOperand(1).isReg() &&
95        (MI.getOperand(2).isImm() &&
96         MI.getOperand(2).getImm() == 0)) {
97      sourceReg = MI.getOperand(1).getReg();
98      destReg = MI.getOperand(0).getReg();
99      return true;
100    }
101    break;
102  case SPU::LRr8:
103  case SPU::LRr16:
104  case SPU::LRr32:
105  case SPU::LRf32:
106  case SPU::LRr64:
107  case SPU::LRf64:
108  case SPU::LRr128:
109  case SPU::LRv16i8:
110  case SPU::LRv8i16:
111  case SPU::LRv4i32:
112  case SPU::LRv4f32:
113  case SPU::LRv2i64:
114  case SPU::LRv2f64:
115  case SPU::ORv16i8_i8:
116  case SPU::ORv8i16_i16:
117  case SPU::ORv4i32_i32:
118  case SPU::ORv2i64_i64:
119  case SPU::ORv4f32_f32:
120  case SPU::ORv2f64_f64:
121  case SPU::ORi8_v16i8:
122  case SPU::ORi16_v8i16:
123  case SPU::ORi32_v4i32:
124  case SPU::ORi64_v2i64:
125  case SPU::ORf32_v4f32:
126  case SPU::ORf64_v2f64:
127/*
128  case SPU::ORi128_r64:
129  case SPU::ORi128_f64:
130  case SPU::ORi128_r32:
131  case SPU::ORi128_f32:
132  case SPU::ORi128_r16:
133  case SPU::ORi128_r8:
134*/
135  case SPU::ORi128_vec:
136/*
137  case SPU::ORr64_i128:
138  case SPU::ORf64_i128:
139  case SPU::ORr32_i128:
140  case SPU::ORf32_i128:
141  case SPU::ORr16_i128:
142  case SPU::ORr8_i128:
143*/
144  case SPU::ORvec_i128:
145/*
146  case SPU::ORr16_r32:
147  case SPU::ORr8_r32:
148  case SPU::ORf32_r32:
149  case SPU::ORr32_f32:
150  case SPU::ORr32_r16:
151  case SPU::ORr32_r8:
152  case SPU::ORr16_r64:
153  case SPU::ORr8_r64:
154  case SPU::ORr64_r16:
155  case SPU::ORr64_r8:
156*/
157  case SPU::ORr64_r32:
158  case SPU::ORr32_r64:
159  case SPU::ORf32_r32:
160  case SPU::ORr32_f32:
161  case SPU::ORf64_r64:
162  case SPU::ORr64_f64: {
163    assert(MI.getNumOperands() == 2 &&
164           MI.getOperand(0).isReg() &&
165           MI.getOperand(1).isReg() &&
166           "invalid SPU OR<type>_<vec> or LR instruction!");
167    if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
168      sourceReg = MI.getOperand(1).getReg();
169      destReg = MI.getOperand(0).getReg();
170      return true;
171    }
172    break;
173  }
174  case SPU::ORv16i8:
175  case SPU::ORv8i16:
176  case SPU::ORv4i32:
177  case SPU::ORv2i64:
178  case SPU::ORr8:
179  case SPU::ORr16:
180  case SPU::ORr32:
181  case SPU::ORr64:
182  case SPU::ORr128:
183  case SPU::ORf32:
184  case SPU::ORf64:
185    assert(MI.getNumOperands() == 3 &&
186           MI.getOperand(0).isReg() &&
187           MI.getOperand(1).isReg() &&
188           MI.getOperand(2).isReg() &&
189           "invalid SPU OR(vec|r32|r64|gprc) instruction!");
190    if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
191      sourceReg = MI.getOperand(1).getReg();
192      destReg = MI.getOperand(0).getReg();
193      return true;
194    }
195    break;
196  }
197
198  return false;
199}
200
201unsigned
202SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
203                                  int &FrameIndex) const {
204  switch (MI->getOpcode()) {
205  default: break;
206  case SPU::LQDv16i8:
207  case SPU::LQDv8i16:
208  case SPU::LQDv4i32:
209  case SPU::LQDv4f32:
210  case SPU::LQDv2f64:
211  case SPU::LQDr128:
212  case SPU::LQDr64:
213  case SPU::LQDr32:
214  case SPU::LQDr16: {
215    const MachineOperand MOp1 = MI->getOperand(1);
216    const MachineOperand MOp2 = MI->getOperand(2);
217    if (MOp1.isImm() && MOp2.isFI()) {
218      FrameIndex = MOp2.getIndex();
219      return MI->getOperand(0).getReg();
220    }
221    break;
222  }
223  }
224  return 0;
225}
226
227unsigned
228SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
229                                 int &FrameIndex) const {
230  switch (MI->getOpcode()) {
231  default: break;
232  case SPU::STQDv16i8:
233  case SPU::STQDv8i16:
234  case SPU::STQDv4i32:
235  case SPU::STQDv4f32:
236  case SPU::STQDv2f64:
237  case SPU::STQDr128:
238  case SPU::STQDr64:
239  case SPU::STQDr32:
240  case SPU::STQDr16:
241  case SPU::STQDr8: {
242    const MachineOperand MOp1 = MI->getOperand(1);
243    const MachineOperand MOp2 = MI->getOperand(2);
244    if (MOp1.isImm() && MOp2.isFI()) {
245      FrameIndex = MOp2.getIndex();
246      return MI->getOperand(0).getReg();
247    }
248    break;
249  }
250  }
251  return 0;
252}
253
254bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
255                                   MachineBasicBlock::iterator MI,
256                                   unsigned DestReg, unsigned SrcReg,
257                                   const TargetRegisterClass *DestRC,
258                                   const TargetRegisterClass *SrcRC,
259                                   DebugLoc DL) const
260{
261  // We support cross register class moves for our aliases, such as R3 in any
262  // reg class to any other reg class containing R3.  This is required because
263  // we instruction select bitconvert i64 -> f64 as a noop for example, so our
264  // types have no specific meaning.
265
266  if (DestRC == SPU::R8CRegisterClass) {
267    BuildMI(MBB, MI, DL, get(SPU::LRr8), DestReg).addReg(SrcReg);
268  } else if (DestRC == SPU::R16CRegisterClass) {
269    BuildMI(MBB, MI, DL, get(SPU::LRr16), DestReg).addReg(SrcReg);
270  } else if (DestRC == SPU::R32CRegisterClass) {
271    BuildMI(MBB, MI, DL, get(SPU::LRr32), DestReg).addReg(SrcReg);
272  } else if (DestRC == SPU::R32FPRegisterClass) {
273    BuildMI(MBB, MI, DL, get(SPU::LRf32), DestReg).addReg(SrcReg);
274  } else if (DestRC == SPU::R64CRegisterClass) {
275    BuildMI(MBB, MI, DL, get(SPU::LRr64), DestReg).addReg(SrcReg);
276  } else if (DestRC == SPU::R64FPRegisterClass) {
277    BuildMI(MBB, MI, DL, get(SPU::LRf64), DestReg).addReg(SrcReg);
278  } else if (DestRC == SPU::GPRCRegisterClass) {
279    BuildMI(MBB, MI, DL, get(SPU::LRr128), DestReg).addReg(SrcReg);
280  } else if (DestRC == SPU::VECREGRegisterClass) {
281    BuildMI(MBB, MI, DL, get(SPU::LRv16i8), DestReg).addReg(SrcReg);
282  } else {
283    // Attempt to copy unknown/unsupported register class!
284    return false;
285  }
286
287  return true;
288}
289
290void
291SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
292                                  MachineBasicBlock::iterator MI,
293                                  unsigned SrcReg, bool isKill, int FrameIdx,
294                                  const TargetRegisterClass *RC,
295                                  const TargetRegisterInfo *TRI) const
296{
297  unsigned opc;
298  bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
299  if (RC == SPU::GPRCRegisterClass) {
300    opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
301  } else if (RC == SPU::R64CRegisterClass) {
302    opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
303  } else if (RC == SPU::R64FPRegisterClass) {
304    opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
305  } else if (RC == SPU::R32CRegisterClass) {
306    opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
307  } else if (RC == SPU::R32FPRegisterClass) {
308    opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
309  } else if (RC == SPU::R16CRegisterClass) {
310    opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
311  } else if (RC == SPU::R8CRegisterClass) {
312    opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
313  } else if (RC == SPU::VECREGRegisterClass) {
314    opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
315  } else {
316    llvm_unreachable("Unknown regclass!");
317  }
318
319  DebugLoc DL;
320  if (MI != MBB.end()) DL = MI->getDebugLoc();
321  addFrameReference(BuildMI(MBB, MI, DL, get(opc))
322                    .addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
323}
324
325void
326SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
327                                   MachineBasicBlock::iterator MI,
328                                   unsigned DestReg, int FrameIdx,
329                                   const TargetRegisterClass *RC,
330                                   const TargetRegisterInfo *TRI) const
331{
332  unsigned opc;
333  bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
334  if (RC == SPU::GPRCRegisterClass) {
335    opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
336  } else if (RC == SPU::R64CRegisterClass) {
337    opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
338  } else if (RC == SPU::R64FPRegisterClass) {
339    opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
340  } else if (RC == SPU::R32CRegisterClass) {
341    opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
342  } else if (RC == SPU::R32FPRegisterClass) {
343    opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
344  } else if (RC == SPU::R16CRegisterClass) {
345    opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
346  } else if (RC == SPU::R8CRegisterClass) {
347    opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
348  } else if (RC == SPU::VECREGRegisterClass) {
349    opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
350  } else {
351    llvm_unreachable("Unknown regclass in loadRegFromStackSlot!");
352  }
353
354  DebugLoc DL;
355  if (MI != MBB.end()) DL = MI->getDebugLoc();
356  addFrameReference(BuildMI(MBB, MI, DL, get(opc), DestReg), FrameIdx);
357}
358
359//! Return true if the specified load or store can be folded
360bool
361SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
362                                   const SmallVectorImpl<unsigned> &Ops) const {
363  if (Ops.size() != 1) return false;
364
365  // Make sure this is a reg-reg copy.
366  unsigned Opc = MI->getOpcode();
367
368  switch (Opc) {
369  case SPU::ORv16i8:
370  case SPU::ORv8i16:
371  case SPU::ORv4i32:
372  case SPU::ORv2i64:
373  case SPU::ORr8:
374  case SPU::ORr16:
375  case SPU::ORr32:
376  case SPU::ORr64:
377  case SPU::ORf32:
378  case SPU::ORf64:
379    if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
380      return true;
381    break;
382  }
383
384  return false;
385}
386
387/// foldMemoryOperand - SPU, like PPC, can only fold spills into
388/// copy instructions, turning them into load/store instructions.
389MachineInstr *
390SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
391                                    MachineInstr *MI,
392                                    const SmallVectorImpl<unsigned> &Ops,
393                                    int FrameIndex) const
394{
395  if (Ops.size() != 1) return 0;
396
397  unsigned OpNum = Ops[0];
398  unsigned Opc = MI->getOpcode();
399  MachineInstr *NewMI = 0;
400
401  switch (Opc) {
402  case SPU::ORv16i8:
403  case SPU::ORv8i16:
404  case SPU::ORv4i32:
405  case SPU::ORv2i64:
406  case SPU::ORr8:
407  case SPU::ORr16:
408  case SPU::ORr32:
409  case SPU::ORr64:
410  case SPU::ORf32:
411  case SPU::ORf64:
412    if (OpNum == 0) {  // move -> store
413      unsigned InReg = MI->getOperand(1).getReg();
414      bool isKill = MI->getOperand(1).isKill();
415      bool isUndef = MI->getOperand(1).isUndef();
416      if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
417        MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(),
418                                          get(SPU::STQDr32));
419
420        MIB.addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef));
421        NewMI = addFrameReference(MIB, FrameIndex);
422      }
423    } else {           // move -> load
424      unsigned OutReg = MI->getOperand(0).getReg();
425      bool isDead = MI->getOperand(0).isDead();
426      bool isUndef = MI->getOperand(0).isUndef();
427      MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc));
428
429      MIB.addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
430                 getUndefRegState(isUndef));
431      Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
432        ? SPU::STQDr32 : SPU::STQXr32;
433      NewMI = addFrameReference(MIB, FrameIndex);
434    break;
435  }
436  }
437
438  return NewMI;
439}
440
441//! Branch analysis
442/*!
443  \note This code was kiped from PPC. There may be more branch analysis for
444  CellSPU than what's currently done here.
445 */
446bool
447SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
448                            MachineBasicBlock *&FBB,
449                            SmallVectorImpl<MachineOperand> &Cond,
450                            bool AllowModify) const {
451  // If the block has no terminators, it just falls into the block after it.
452  MachineBasicBlock::iterator I = MBB.end();
453  if (I == MBB.begin())
454    return false;
455  --I;
456  while (I->isDebugValue()) {
457    if (I == MBB.begin())
458      return false;
459    --I;
460  }
461  if (!isUnpredicatedTerminator(I))
462    return false;
463
464  // Get the last instruction in the block.
465  MachineInstr *LastInst = I;
466
467  // If there is only one terminator instruction, process it.
468  if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
469    if (isUncondBranch(LastInst)) {
470      // Check for jump tables
471      if (!LastInst->getOperand(0).isMBB())
472        return true;
473      TBB = LastInst->getOperand(0).getMBB();
474      return false;
475    } else if (isCondBranch(LastInst)) {
476      // Block ends with fall-through condbranch.
477      TBB = LastInst->getOperand(1).getMBB();
478      DEBUG(errs() << "Pushing LastInst:               ");
479      DEBUG(LastInst->dump());
480      Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
481      Cond.push_back(LastInst->getOperand(0));
482      return false;
483    }
484    // Otherwise, don't know what this is.
485    return true;
486  }
487
488  // Get the instruction before it if it's a terminator.
489  MachineInstr *SecondLastInst = I;
490
491  // If there are three terminators, we don't know what sort of block this is.
492  if (SecondLastInst && I != MBB.begin() &&
493      isUnpredicatedTerminator(--I))
494    return true;
495
496  // If the block ends with a conditional and unconditional branch, handle it.
497  if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
498    TBB =  SecondLastInst->getOperand(1).getMBB();
499    DEBUG(errs() << "Pushing SecondLastInst:         ");
500    DEBUG(SecondLastInst->dump());
501    Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
502    Cond.push_back(SecondLastInst->getOperand(0));
503    FBB = LastInst->getOperand(0).getMBB();
504    return false;
505  }
506
507  // If the block ends with two unconditional branches, handle it.  The second
508  // one is not executed, so remove it.
509  if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
510    TBB = SecondLastInst->getOperand(0).getMBB();
511    I = LastInst;
512    if (AllowModify)
513      I->eraseFromParent();
514    return false;
515  }
516
517  // Otherwise, can't handle this.
518  return true;
519}
520
521unsigned
522SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
523  MachineBasicBlock::iterator I = MBB.end();
524  if (I == MBB.begin())
525    return 0;
526  --I;
527  while (I->isDebugValue()) {
528    if (I == MBB.begin())
529      return 0;
530    --I;
531  }
532  if (!isCondBranch(I) && !isUncondBranch(I))
533    return 0;
534
535  // Remove the first branch.
536  DEBUG(errs() << "Removing branch:                ");
537  DEBUG(I->dump());
538  I->eraseFromParent();
539  I = MBB.end();
540  if (I == MBB.begin())
541    return 1;
542
543  --I;
544  if (!(isCondBranch(I) || isUncondBranch(I)))
545    return 1;
546
547  // Remove the second branch.
548  DEBUG(errs() << "Removing second branch:         ");
549  DEBUG(I->dump());
550  I->eraseFromParent();
551  return 2;
552}
553
554unsigned
555SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
556                           MachineBasicBlock *FBB,
557                           const SmallVectorImpl<MachineOperand> &Cond) const {
558  // FIXME this should probably have a DebugLoc argument
559  DebugLoc dl;
560  // Shouldn't be a fall through.
561  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
562  assert((Cond.size() == 2 || Cond.size() == 0) &&
563         "SPU branch conditions have two components!");
564
565  // One-way branch.
566  if (FBB == 0) {
567    if (Cond.empty()) {
568      // Unconditional branch
569      MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(SPU::BR));
570      MIB.addMBB(TBB);
571
572      DEBUG(errs() << "Inserted one-way uncond branch: ");
573      DEBUG((*MIB).dump());
574    } else {
575      // Conditional branch
576      MachineInstrBuilder  MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
577      MIB.addReg(Cond[1].getReg()).addMBB(TBB);
578
579      DEBUG(errs() << "Inserted one-way cond branch:   ");
580      DEBUG((*MIB).dump());
581    }
582    return 1;
583  } else {
584    MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
585    MachineInstrBuilder MIB2 = BuildMI(&MBB, dl, get(SPU::BR));
586
587    // Two-way Conditional Branch.
588    MIB.addReg(Cond[1].getReg()).addMBB(TBB);
589    MIB2.addMBB(FBB);
590
591    DEBUG(errs() << "Inserted conditional branch:    ");
592    DEBUG((*MIB).dump());
593    DEBUG(errs() << "part 2: ");
594    DEBUG((*MIB2).dump());
595   return 2;
596  }
597}
598
599//! Reverses a branch's condition, returning false on success.
600bool
601SPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
602  const {
603  // Pretty brainless way of inverting the condition, but it works, considering
604  // there are only two conditions...
605  static struct {
606    unsigned Opc;               //! The incoming opcode
607    unsigned RevCondOpc;        //! The reversed condition opcode
608  } revconds[] = {
609    { SPU::BRNZr32, SPU::BRZr32 },
610    { SPU::BRNZv4i32, SPU::BRZv4i32 },
611    { SPU::BRZr32, SPU::BRNZr32 },
612    { SPU::BRZv4i32, SPU::BRNZv4i32 },
613    { SPU::BRHNZr16, SPU::BRHZr16 },
614    { SPU::BRHNZv8i16, SPU::BRHZv8i16 },
615    { SPU::BRHZr16, SPU::BRHNZr16 },
616    { SPU::BRHZv8i16, SPU::BRHNZv8i16 }
617  };
618
619  unsigned Opc = unsigned(Cond[0].getImm());
620  // Pretty dull mapping between the two conditions that SPU can generate:
621  for (int i = sizeof(revconds)/sizeof(revconds[0]) - 1; i >= 0; --i) {
622    if (revconds[i].Opc == Opc) {
623      Cond[0].setImm(revconds[i].RevCondOpc);
624      return false;
625    }
626  }
627
628  return true;
629}
630