X86InstrInfo.cpp revision 849f214a4e3676e41168b0c5398165c4d4fb99f8
1//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86GenInstrInfo.inc" 17#include "X86InstrBuilder.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/CodeGen/MachineInstrBuilder.h" 21#include "llvm/CodeGen/LiveVariables.h" 22using namespace llvm; 23 24X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) 25 : TargetInstrInfo(X86Insts, sizeof(X86Insts)/sizeof(X86Insts[0])), 26 TM(tm), RI(tm, *this) { 27} 28 29bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, 30 unsigned& sourceReg, 31 unsigned& destReg) const { 32 MachineOpCode oc = MI.getOpcode(); 33 if (oc == X86::MOV8rr || oc == X86::MOV16rr || 34 oc == X86::MOV32rr || oc == X86::MOV64rr || 35 oc == X86::MOV16to16_ || oc == X86::MOV32to32_ || 36 oc == X86::FpMOV3232 || oc == X86::MOVSSrr || oc == X86::MOVSDrr || 37 oc == X86::FpMOV3264 || oc == X86::FpMOV6432 || oc == X86::FpMOV6464 || 38 oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr || 39 oc == X86::MOVAPSrr || oc == X86::MOVAPDrr || 40 oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr || 41 oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr || 42 oc == X86::MMX_MOVD64rr || oc == X86::MMX_MOVQ64rr) { 43 assert(MI.getNumOperands() >= 2 && 44 MI.getOperand(0).isRegister() && 45 MI.getOperand(1).isRegister() && 46 "invalid register-register move instruction"); 47 sourceReg = MI.getOperand(1).getReg(); 48 destReg = MI.getOperand(0).getReg(); 49 return true; 50 } 51 return false; 52} 53 54unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI, 55 int &FrameIndex) const { 56 switch (MI->getOpcode()) { 57 default: break; 58 case X86::MOV8rm: 59 case X86::MOV16rm: 60 case X86::MOV16_rm: 61 case X86::MOV32rm: 62 case X86::MOV32_rm: 63 case X86::MOV64rm: 64 case X86::FpLD64m: 65 case X86::MOVSSrm: 66 case X86::MOVSDrm: 67 case X86::MOVAPSrm: 68 case X86::MOVAPDrm: 69 case X86::MMX_MOVD64rm: 70 case X86::MMX_MOVQ64rm: 71 if (MI->getOperand(1).isFrameIndex() && MI->getOperand(2).isImmediate() && 72 MI->getOperand(3).isRegister() && MI->getOperand(4).isImmediate() && 73 MI->getOperand(2).getImmedValue() == 1 && 74 MI->getOperand(3).getReg() == 0 && 75 MI->getOperand(4).getImmedValue() == 0) { 76 FrameIndex = MI->getOperand(1).getFrameIndex(); 77 return MI->getOperand(0).getReg(); 78 } 79 break; 80 } 81 return 0; 82} 83 84unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI, 85 int &FrameIndex) const { 86 switch (MI->getOpcode()) { 87 default: break; 88 case X86::MOV8mr: 89 case X86::MOV16mr: 90 case X86::MOV16_mr: 91 case X86::MOV32mr: 92 case X86::MOV32_mr: 93 case X86::MOV64mr: 94 case X86::FpSTP64m: 95 case X86::MOVSSmr: 96 case X86::MOVSDmr: 97 case X86::MOVAPSmr: 98 case X86::MOVAPDmr: 99 case X86::MMX_MOVD64mr: 100 case X86::MMX_MOVQ64mr: 101 case X86::MMX_MOVNTQmr: 102 if (MI->getOperand(0).isFrameIndex() && MI->getOperand(1).isImmediate() && 103 MI->getOperand(2).isRegister() && MI->getOperand(3).isImmediate() && 104 MI->getOperand(1).getImmedValue() == 1 && 105 MI->getOperand(2).getReg() == 0 && 106 MI->getOperand(3).getImmedValue() == 0) { 107 FrameIndex = MI->getOperand(0).getFrameIndex(); 108 return MI->getOperand(4).getReg(); 109 } 110 break; 111 } 112 return 0; 113} 114 115 116bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const { 117 switch (MI->getOpcode()) { 118 default: break; 119 case X86::MOV8rm: 120 case X86::MOV16rm: 121 case X86::MOV16_rm: 122 case X86::MOV32rm: 123 case X86::MOV32_rm: 124 case X86::MOV64rm: 125 case X86::FpLD64m: 126 case X86::MOVSSrm: 127 case X86::MOVSDrm: 128 case X86::MOVAPSrm: 129 case X86::MOVAPDrm: 130 case X86::MMX_MOVD64rm: 131 case X86::MMX_MOVQ64rm: 132 // Loads from constant pools are trivially rematerializable. 133 return MI->getOperand(1).isRegister() && MI->getOperand(2).isImmediate() && 134 MI->getOperand(3).isRegister() && MI->getOperand(4).isConstantPoolIndex() && 135 MI->getOperand(1).getReg() == 0 && 136 MI->getOperand(2).getImmedValue() == 1 && 137 MI->getOperand(3).getReg() == 0; 138 } 139 // All other instructions marked M_REMATERIALIZABLE are always trivially 140 // rematerializable. 141 return true; 142} 143 144/// convertToThreeAddress - This method must be implemented by targets that 145/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 146/// may be able to convert a two-address instruction into a true 147/// three-address instruction on demand. This allows the X86 target (for 148/// example) to convert ADD and SHL instructions into LEA instructions if they 149/// would require register copies due to two-addressness. 150/// 151/// This method returns a null pointer if the transformation cannot be 152/// performed, otherwise it returns the new instruction. 153/// 154MachineInstr * 155X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 156 MachineBasicBlock::iterator &MBBI, 157 LiveVariables &LV) const { 158 MachineInstr *MI = MBBI; 159 // All instructions input are two-addr instructions. Get the known operands. 160 unsigned Dest = MI->getOperand(0).getReg(); 161 unsigned Src = MI->getOperand(1).getReg(); 162 163 MachineInstr *NewMI = NULL; 164 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 165 // we have better subtarget support, enable the 16-bit LEA generation here. 166 bool DisableLEA16 = true; 167 168 switch (MI->getOpcode()) { 169 default: return 0; 170 case X86::SHUFPSrri: { 171 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); 172 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 173 174 unsigned A = MI->getOperand(0).getReg(); 175 unsigned B = MI->getOperand(1).getReg(); 176 unsigned C = MI->getOperand(2).getReg(); 177 unsigned M = MI->getOperand(3).getImm(); 178 if (B != C) return 0; 179 NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M); 180 break; 181 } 182 case X86::SHL64ri: { 183 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!"); 184 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 185 // the flags produced by a shift yet, so this is safe. 186 unsigned Dest = MI->getOperand(0).getReg(); 187 unsigned Src = MI->getOperand(1).getReg(); 188 unsigned ShAmt = MI->getOperand(2).getImm(); 189 if (ShAmt == 0 || ShAmt >= 4) return 0; 190 191 NewMI = BuildMI(get(X86::LEA64r), Dest) 192 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 193 break; 194 } 195 case X86::SHL32ri: { 196 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!"); 197 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 198 // the flags produced by a shift yet, so this is safe. 199 unsigned Dest = MI->getOperand(0).getReg(); 200 unsigned Src = MI->getOperand(1).getReg(); 201 unsigned ShAmt = MI->getOperand(2).getImm(); 202 if (ShAmt == 0 || ShAmt >= 4) return 0; 203 204 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ? 205 X86::LEA64_32r : X86::LEA32r; 206 NewMI = BuildMI(get(Opc), Dest) 207 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 208 break; 209 } 210 case X86::SHL16ri: { 211 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!"); 212 if (DisableLEA16) return 0; 213 214 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 215 // the flags produced by a shift yet, so this is safe. 216 unsigned Dest = MI->getOperand(0).getReg(); 217 unsigned Src = MI->getOperand(1).getReg(); 218 unsigned ShAmt = MI->getOperand(2).getImm(); 219 if (ShAmt == 0 || ShAmt >= 4) return 0; 220 221 NewMI = BuildMI(get(X86::LEA16r), Dest) 222 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 223 break; 224 } 225 } 226 227 // FIXME: None of these instructions are promotable to LEAs without 228 // additional information. In particular, LEA doesn't set the flags that 229 // add and inc do. :( 230 if (0) 231 switch (MI->getOpcode()) { 232 case X86::INC32r: 233 case X86::INC64_32r: 234 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!"); 235 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, 1); 236 break; 237 case X86::INC16r: 238 case X86::INC64_16r: 239 if (DisableLEA16) return 0; 240 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!"); 241 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1); 242 break; 243 case X86::DEC32r: 244 case X86::DEC64_32r: 245 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!"); 246 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, -1); 247 break; 248 case X86::DEC16r: 249 case X86::DEC64_16r: 250 if (DisableLEA16) return 0; 251 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!"); 252 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1); 253 break; 254 case X86::ADD32rr: 255 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 256 NewMI = addRegReg(BuildMI(get(X86::LEA32r), Dest), Src, 257 MI->getOperand(2).getReg()); 258 break; 259 case X86::ADD16rr: 260 if (DisableLEA16) return 0; 261 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 262 NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src, 263 MI->getOperand(2).getReg()); 264 break; 265 case X86::ADD32ri: 266 case X86::ADD32ri8: 267 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 268 if (MI->getOperand(2).isImmediate()) 269 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, 270 MI->getOperand(2).getImmedValue()); 271 break; 272 case X86::ADD16ri: 273 case X86::ADD16ri8: 274 if (DisableLEA16) return 0; 275 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 276 if (MI->getOperand(2).isImmediate()) 277 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 278 MI->getOperand(2).getImmedValue()); 279 break; 280 case X86::SHL16ri: 281 if (DisableLEA16) return 0; 282 case X86::SHL32ri: 283 assert(MI->getNumOperands() == 3 && MI->getOperand(2).isImmediate() && 284 "Unknown shl instruction!"); 285 unsigned ShAmt = MI->getOperand(2).getImmedValue(); 286 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) { 287 X86AddressMode AM; 288 AM.Scale = 1 << ShAmt; 289 AM.IndexReg = Src; 290 unsigned Opc = MI->getOpcode() == X86::SHL32ri ? X86::LEA32r :X86::LEA16r; 291 NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM); 292 } 293 break; 294 } 295 296 if (NewMI) { 297 NewMI->copyKillDeadInfo(MI); 298 LV.instructionChanged(MI, NewMI); // Update live variables 299 MFI->insert(MBBI, NewMI); // Insert the new inst 300 } 301 return NewMI; 302} 303 304/// commuteInstruction - We have a few instructions that must be hacked on to 305/// commute them. 306/// 307MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const { 308 // FIXME: Can commute cmoves by changing the condition! 309 switch (MI->getOpcode()) { 310 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 311 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 312 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 313 case X86::SHLD32rri8:{// A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 314 unsigned Opc; 315 unsigned Size; 316 switch (MI->getOpcode()) { 317 default: assert(0 && "Unreachable!"); 318 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 319 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 320 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 321 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 322 } 323 unsigned Amt = MI->getOperand(3).getImmedValue(); 324 unsigned A = MI->getOperand(0).getReg(); 325 unsigned B = MI->getOperand(1).getReg(); 326 unsigned C = MI->getOperand(2).getReg(); 327 bool BisKill = MI->getOperand(1).isKill(); 328 bool CisKill = MI->getOperand(2).isKill(); 329 return BuildMI(get(Opc), A).addReg(C, false, false, CisKill) 330 .addReg(B, false, false, BisKill).addImm(Size-Amt); 331 } 332 default: 333 return TargetInstrInfo::commuteInstruction(MI); 334 } 335} 336 337static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { 338 switch (BrOpc) { 339 default: return X86::COND_INVALID; 340 case X86::JE: return X86::COND_E; 341 case X86::JNE: return X86::COND_NE; 342 case X86::JL: return X86::COND_L; 343 case X86::JLE: return X86::COND_LE; 344 case X86::JG: return X86::COND_G; 345 case X86::JGE: return X86::COND_GE; 346 case X86::JB: return X86::COND_B; 347 case X86::JBE: return X86::COND_BE; 348 case X86::JA: return X86::COND_A; 349 case X86::JAE: return X86::COND_AE; 350 case X86::JS: return X86::COND_S; 351 case X86::JNS: return X86::COND_NS; 352 case X86::JP: return X86::COND_P; 353 case X86::JNP: return X86::COND_NP; 354 case X86::JO: return X86::COND_O; 355 case X86::JNO: return X86::COND_NO; 356 } 357} 358 359unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 360 switch (CC) { 361 default: assert(0 && "Illegal condition code!"); 362 case X86::COND_E: return X86::JE; 363 case X86::COND_NE: return X86::JNE; 364 case X86::COND_L: return X86::JL; 365 case X86::COND_LE: return X86::JLE; 366 case X86::COND_G: return X86::JG; 367 case X86::COND_GE: return X86::JGE; 368 case X86::COND_B: return X86::JB; 369 case X86::COND_BE: return X86::JBE; 370 case X86::COND_A: return X86::JA; 371 case X86::COND_AE: return X86::JAE; 372 case X86::COND_S: return X86::JS; 373 case X86::COND_NS: return X86::JNS; 374 case X86::COND_P: return X86::JP; 375 case X86::COND_NP: return X86::JNP; 376 case X86::COND_O: return X86::JO; 377 case X86::COND_NO: return X86::JNO; 378 } 379} 380 381/// GetOppositeBranchCondition - Return the inverse of the specified condition, 382/// e.g. turning COND_E to COND_NE. 383X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 384 switch (CC) { 385 default: assert(0 && "Illegal condition code!"); 386 case X86::COND_E: return X86::COND_NE; 387 case X86::COND_NE: return X86::COND_E; 388 case X86::COND_L: return X86::COND_GE; 389 case X86::COND_LE: return X86::COND_G; 390 case X86::COND_G: return X86::COND_LE; 391 case X86::COND_GE: return X86::COND_L; 392 case X86::COND_B: return X86::COND_AE; 393 case X86::COND_BE: return X86::COND_A; 394 case X86::COND_A: return X86::COND_BE; 395 case X86::COND_AE: return X86::COND_B; 396 case X86::COND_S: return X86::COND_NS; 397 case X86::COND_NS: return X86::COND_S; 398 case X86::COND_P: return X86::COND_NP; 399 case X86::COND_NP: return X86::COND_P; 400 case X86::COND_O: return X86::COND_NO; 401 case X86::COND_NO: return X86::COND_O; 402 } 403} 404 405// For purposes of branch analysis do not count FP_REG_KILL as a terminator. 406bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 407 const TargetInstrDescriptor *TID = MI->getInstrDescriptor(); 408 if (MI->getOpcode() == X86::FP_REG_KILL) 409 return false; 410 if (TID->Flags & M_TERMINATOR_FLAG) 411 return !isPredicated(MI); 412 return false; 413} 414 415bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 416 MachineBasicBlock *&TBB, 417 MachineBasicBlock *&FBB, 418 std::vector<MachineOperand> &Cond) const { 419 // If the block has no terminators, it just falls into the block after it. 420 MachineBasicBlock::iterator I = MBB.end(); 421 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) 422 return false; 423 424 // Get the last instruction in the block. 425 MachineInstr *LastInst = I; 426 427 // If there is only one terminator instruction, process it. 428 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 429 if (!isBranch(LastInst->getOpcode())) 430 return true; 431 432 // If the block ends with a branch there are 3 possibilities: 433 // it's an unconditional, conditional, or indirect branch. 434 435 if (LastInst->getOpcode() == X86::JMP) { 436 TBB = LastInst->getOperand(0).getMachineBasicBlock(); 437 return false; 438 } 439 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); 440 if (BranchCode == X86::COND_INVALID) 441 return true; // Can't handle indirect branch. 442 443 // Otherwise, block ends with fall-through condbranch. 444 TBB = LastInst->getOperand(0).getMachineBasicBlock(); 445 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 446 return false; 447 } 448 449 // Get the instruction before it if it's a terminator. 450 MachineInstr *SecondLastInst = I; 451 452 // If there are three terminators, we don't know what sort of block this is. 453 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) 454 return true; 455 456 // If the block ends with X86::JMP and a conditional branch, handle it. 457 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode()); 458 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) { 459 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock(); 460 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 461 FBB = LastInst->getOperand(0).getMachineBasicBlock(); 462 return false; 463 } 464 465 // If the block ends with two X86::JMPs, handle it. The second one is not 466 // executed, so remove it. 467 if (SecondLastInst->getOpcode() == X86::JMP && 468 LastInst->getOpcode() == X86::JMP) { 469 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock(); 470 I = LastInst; 471 I->eraseFromParent(); 472 return false; 473 } 474 475 // Otherwise, can't handle this. 476 return true; 477} 478 479unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 480 MachineBasicBlock::iterator I = MBB.end(); 481 if (I == MBB.begin()) return 0; 482 --I; 483 if (I->getOpcode() != X86::JMP && 484 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 485 return 0; 486 487 // Remove the branch. 488 I->eraseFromParent(); 489 490 I = MBB.end(); 491 492 if (I == MBB.begin()) return 1; 493 --I; 494 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 495 return 1; 496 497 // Remove the branch. 498 I->eraseFromParent(); 499 return 2; 500} 501 502unsigned 503X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 504 MachineBasicBlock *FBB, 505 const std::vector<MachineOperand> &Cond) const { 506 // Shouldn't be a fall through. 507 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 508 assert((Cond.size() == 1 || Cond.size() == 0) && 509 "X86 branch conditions have one component!"); 510 511 if (FBB == 0) { // One way branch. 512 if (Cond.empty()) { 513 // Unconditional branch? 514 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB); 515 } else { 516 // Conditional branch. 517 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 518 BuildMI(&MBB, get(Opc)).addMBB(TBB); 519 } 520 return 1; 521 } 522 523 // Two-way Conditional branch. 524 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 525 BuildMI(&MBB, get(Opc)).addMBB(TBB); 526 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB); 527 return 2; 528} 529 530bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const { 531 if (MBB.empty()) return false; 532 533 switch (MBB.back().getOpcode()) { 534 case X86::RET: // Return. 535 case X86::RETI: 536 case X86::TAILJMPd: 537 case X86::TAILJMPr: 538 case X86::TAILJMPm: 539 case X86::JMP: // Uncond branch. 540 case X86::JMP32r: // Indirect branch. 541 case X86::JMP32m: // Indirect branch through mem. 542 return true; 543 default: return false; 544 } 545} 546 547bool X86InstrInfo:: 548ReverseBranchCondition(std::vector<MachineOperand> &Cond) const { 549 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 550 Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm())); 551 return false; 552} 553 554const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const { 555 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 556 if (Subtarget->is64Bit()) 557 return &X86::GR64RegClass; 558 else 559 return &X86::GR32RegClass; 560} 561