X86InstrInfo.cpp revision 85dce6cf781b0c75de0aa178e3ad0df128b3b977
1//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86GenInstrInfo.inc" 17#include "X86InstrBuilder.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/CodeGen/MachineInstrBuilder.h" 21#include "llvm/CodeGen/LiveVariables.h" 22using namespace llvm; 23 24X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) 25 : TargetInstrInfo(X86Insts, sizeof(X86Insts)/sizeof(X86Insts[0])), 26 TM(tm), RI(tm, *this) { 27} 28 29bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, 30 unsigned& sourceReg, 31 unsigned& destReg) const { 32 MachineOpCode oc = MI.getOpcode(); 33 if (oc == X86::MOV8rr || oc == X86::MOV16rr || 34 oc == X86::MOV32rr || oc == X86::MOV64rr || 35 oc == X86::MOV16to16_ || oc == X86::MOV32to32_ || 36 oc == X86::MOV_Fp3232 || oc == X86::MOVSSrr || oc == X86::MOVSDrr || 37 oc == X86::MOV_Fp3264 || oc == X86::MOV_Fp6432 || oc == X86::MOV_Fp6464 || 38 oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr || 39 oc == X86::MOVAPSrr || oc == X86::MOVAPDrr || 40 oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr || 41 oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr || 42 oc == X86::MMX_MOVD64rr || oc == X86::MMX_MOVQ64rr) { 43 assert(MI.getNumOperands() >= 2 && 44 MI.getOperand(0).isRegister() && 45 MI.getOperand(1).isRegister() && 46 "invalid register-register move instruction"); 47 sourceReg = MI.getOperand(1).getReg(); 48 destReg = MI.getOperand(0).getReg(); 49 return true; 50 } 51 return false; 52} 53 54unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI, 55 int &FrameIndex) const { 56 switch (MI->getOpcode()) { 57 default: break; 58 case X86::MOV8rm: 59 case X86::MOV16rm: 60 case X86::MOV16_rm: 61 case X86::MOV32rm: 62 case X86::MOV32_rm: 63 case X86::MOV64rm: 64 case X86::LD_Fp64m: 65 case X86::MOVSSrm: 66 case X86::MOVSDrm: 67 case X86::MOVAPSrm: 68 case X86::MOVAPDrm: 69 case X86::MMX_MOVD64rm: 70 case X86::MMX_MOVQ64rm: 71 if (MI->getOperand(1).isFrameIndex() && MI->getOperand(2).isImmediate() && 72 MI->getOperand(3).isRegister() && MI->getOperand(4).isImmediate() && 73 MI->getOperand(2).getImmedValue() == 1 && 74 MI->getOperand(3).getReg() == 0 && 75 MI->getOperand(4).getImmedValue() == 0) { 76 FrameIndex = MI->getOperand(1).getFrameIndex(); 77 return MI->getOperand(0).getReg(); 78 } 79 break; 80 } 81 return 0; 82} 83 84unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI, 85 int &FrameIndex) const { 86 switch (MI->getOpcode()) { 87 default: break; 88 case X86::MOV8mr: 89 case X86::MOV16mr: 90 case X86::MOV16_mr: 91 case X86::MOV32mr: 92 case X86::MOV32_mr: 93 case X86::MOV64mr: 94 case X86::ST_FpP64m: 95 case X86::MOVSSmr: 96 case X86::MOVSDmr: 97 case X86::MOVAPSmr: 98 case X86::MOVAPDmr: 99 case X86::MMX_MOVD64mr: 100 case X86::MMX_MOVQ64mr: 101 case X86::MMX_MOVNTQmr: 102 if (MI->getOperand(0).isFrameIndex() && MI->getOperand(1).isImmediate() && 103 MI->getOperand(2).isRegister() && MI->getOperand(3).isImmediate() && 104 MI->getOperand(1).getImmedValue() == 1 && 105 MI->getOperand(2).getReg() == 0 && 106 MI->getOperand(3).getImmedValue() == 0) { 107 FrameIndex = MI->getOperand(0).getFrameIndex(); 108 return MI->getOperand(4).getReg(); 109 } 110 break; 111 } 112 return 0; 113} 114 115 116bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const { 117 switch (MI->getOpcode()) { 118 default: break; 119 case X86::MOV8rm: 120 case X86::MOV16rm: 121 case X86::MOV16_rm: 122 case X86::MOV32rm: 123 case X86::MOV32_rm: 124 case X86::MOV64rm: 125 case X86::LD_Fp64m: 126 case X86::MOVSSrm: 127 case X86::MOVSDrm: 128 case X86::MOVAPSrm: 129 case X86::MOVAPDrm: 130 case X86::MMX_MOVD64rm: 131 case X86::MMX_MOVQ64rm: 132 // Loads from constant pools are trivially rematerializable. 133 return MI->getOperand(1).isRegister() && MI->getOperand(2).isImmediate() && 134 MI->getOperand(3).isRegister() && MI->getOperand(4).isConstantPoolIndex() && 135 MI->getOperand(1).getReg() == 0 && 136 MI->getOperand(2).getImmedValue() == 1 && 137 MI->getOperand(3).getReg() == 0; 138 } 139 // All other instructions marked M_REMATERIALIZABLE are always trivially 140 // rematerializable. 141 return true; 142} 143 144/// convertToThreeAddress - This method must be implemented by targets that 145/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 146/// may be able to convert a two-address instruction into a true 147/// three-address instruction on demand. This allows the X86 target (for 148/// example) to convert ADD and SHL instructions into LEA instructions if they 149/// would require register copies due to two-addressness. 150/// 151/// This method returns a null pointer if the transformation cannot be 152/// performed, otherwise it returns the new instruction. 153/// 154MachineInstr * 155X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 156 MachineBasicBlock::iterator &MBBI, 157 LiveVariables &LV) const { 158 MachineInstr *MI = MBBI; 159 // All instructions input are two-addr instructions. Get the known operands. 160 unsigned Dest = MI->getOperand(0).getReg(); 161 unsigned Src = MI->getOperand(1).getReg(); 162 163 MachineInstr *NewMI = NULL; 164 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 165 // we have better subtarget support, enable the 16-bit LEA generation here. 166 bool DisableLEA16 = true; 167 168 switch (MI->getOpcode()) { 169 default: return 0; 170 case X86::SHUFPSrri: { 171 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); 172 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 173 174 unsigned A = MI->getOperand(0).getReg(); 175 unsigned B = MI->getOperand(1).getReg(); 176 unsigned C = MI->getOperand(2).getReg(); 177 unsigned M = MI->getOperand(3).getImm(); 178 if (B != C) return 0; 179 NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M); 180 break; 181 } 182 case X86::SHL64ri: { 183 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!"); 184 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 185 // the flags produced by a shift yet, so this is safe. 186 unsigned Dest = MI->getOperand(0).getReg(); 187 unsigned Src = MI->getOperand(1).getReg(); 188 unsigned ShAmt = MI->getOperand(2).getImm(); 189 if (ShAmt == 0 || ShAmt >= 4) return 0; 190 191 NewMI = BuildMI(get(X86::LEA64r), Dest) 192 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 193 break; 194 } 195 case X86::SHL32ri: { 196 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!"); 197 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 198 // the flags produced by a shift yet, so this is safe. 199 unsigned Dest = MI->getOperand(0).getReg(); 200 unsigned Src = MI->getOperand(1).getReg(); 201 unsigned ShAmt = MI->getOperand(2).getImm(); 202 if (ShAmt == 0 || ShAmt >= 4) return 0; 203 204 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ? 205 X86::LEA64_32r : X86::LEA32r; 206 NewMI = BuildMI(get(Opc), Dest) 207 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 208 break; 209 } 210 case X86::SHL16ri: { 211 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!"); 212 if (DisableLEA16) return 0; 213 214 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 215 // the flags produced by a shift yet, so this is safe. 216 unsigned Dest = MI->getOperand(0).getReg(); 217 unsigned Src = MI->getOperand(1).getReg(); 218 unsigned ShAmt = MI->getOperand(2).getImm(); 219 if (ShAmt == 0 || ShAmt >= 4) return 0; 220 221 NewMI = BuildMI(get(X86::LEA16r), Dest) 222 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 223 break; 224 } 225 } 226 227 // FIXME: None of these instructions are promotable to LEAs without 228 // additional information. In particular, LEA doesn't set the flags that 229 // add and inc do. :( 230 if (0) 231 switch (MI->getOpcode()) { 232 case X86::INC32r: 233 case X86::INC64_32r: 234 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!"); 235 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, 1); 236 break; 237 case X86::INC16r: 238 case X86::INC64_16r: 239 if (DisableLEA16) return 0; 240 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!"); 241 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1); 242 break; 243 case X86::DEC32r: 244 case X86::DEC64_32r: 245 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!"); 246 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, -1); 247 break; 248 case X86::DEC16r: 249 case X86::DEC64_16r: 250 if (DisableLEA16) return 0; 251 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!"); 252 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1); 253 break; 254 case X86::ADD32rr: 255 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 256 NewMI = addRegReg(BuildMI(get(X86::LEA32r), Dest), Src, 257 MI->getOperand(2).getReg()); 258 break; 259 case X86::ADD16rr: 260 if (DisableLEA16) return 0; 261 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 262 NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src, 263 MI->getOperand(2).getReg()); 264 break; 265 case X86::ADD32ri: 266 case X86::ADD32ri8: 267 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 268 if (MI->getOperand(2).isImmediate()) 269 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, 270 MI->getOperand(2).getImmedValue()); 271 break; 272 case X86::ADD16ri: 273 case X86::ADD16ri8: 274 if (DisableLEA16) return 0; 275 assert(MI->getNumOperands() == 3 && "Unknown add instruction!"); 276 if (MI->getOperand(2).isImmediate()) 277 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 278 MI->getOperand(2).getImmedValue()); 279 break; 280 case X86::SHL16ri: 281 if (DisableLEA16) return 0; 282 case X86::SHL32ri: 283 assert(MI->getNumOperands() == 3 && MI->getOperand(2).isImmediate() && 284 "Unknown shl instruction!"); 285 unsigned ShAmt = MI->getOperand(2).getImmedValue(); 286 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) { 287 X86AddressMode AM; 288 AM.Scale = 1 << ShAmt; 289 AM.IndexReg = Src; 290 unsigned Opc = MI->getOpcode() == X86::SHL32ri ? X86::LEA32r :X86::LEA16r; 291 NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM); 292 } 293 break; 294 } 295 296 if (NewMI) { 297 NewMI->copyKillDeadInfo(MI); 298 LV.instructionChanged(MI, NewMI); // Update live variables 299 MFI->insert(MBBI, NewMI); // Insert the new inst 300 } 301 return NewMI; 302} 303 304/// commuteInstruction - We have a few instructions that must be hacked on to 305/// commute them. 306/// 307MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const { 308 // FIXME: Can commute cmoves by changing the condition! 309 switch (MI->getOpcode()) { 310 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 311 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 312 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 313 case X86::SHLD32rri8:{// A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 314 unsigned Opc; 315 unsigned Size; 316 switch (MI->getOpcode()) { 317 default: assert(0 && "Unreachable!"); 318 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 319 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 320 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 321 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 322 } 323 unsigned Amt = MI->getOperand(3).getImmedValue(); 324 unsigned A = MI->getOperand(0).getReg(); 325 unsigned B = MI->getOperand(1).getReg(); 326 unsigned C = MI->getOperand(2).getReg(); 327 bool BisKill = MI->getOperand(1).isKill(); 328 bool CisKill = MI->getOperand(2).isKill(); 329 return BuildMI(get(Opc), A).addReg(C, false, false, CisKill) 330 .addReg(B, false, false, BisKill).addImm(Size-Amt); 331 } 332 default: 333 return TargetInstrInfo::commuteInstruction(MI); 334 } 335} 336 337static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { 338 switch (BrOpc) { 339 default: return X86::COND_INVALID; 340 case X86::JE: return X86::COND_E; 341 case X86::JNE: return X86::COND_NE; 342 case X86::JL: return X86::COND_L; 343 case X86::JLE: return X86::COND_LE; 344 case X86::JG: return X86::COND_G; 345 case X86::JGE: return X86::COND_GE; 346 case X86::JB: return X86::COND_B; 347 case X86::JBE: return X86::COND_BE; 348 case X86::JA: return X86::COND_A; 349 case X86::JAE: return X86::COND_AE; 350 case X86::JS: return X86::COND_S; 351 case X86::JNS: return X86::COND_NS; 352 case X86::JP: return X86::COND_P; 353 case X86::JNP: return X86::COND_NP; 354 case X86::JO: return X86::COND_O; 355 case X86::JNO: return X86::COND_NO; 356 } 357} 358 359unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 360 switch (CC) { 361 default: assert(0 && "Illegal condition code!"); 362 case X86::COND_E: return X86::JE; 363 case X86::COND_NE: return X86::JNE; 364 case X86::COND_L: return X86::JL; 365 case X86::COND_LE: return X86::JLE; 366 case X86::COND_G: return X86::JG; 367 case X86::COND_GE: return X86::JGE; 368 case X86::COND_B: return X86::JB; 369 case X86::COND_BE: return X86::JBE; 370 case X86::COND_A: return X86::JA; 371 case X86::COND_AE: return X86::JAE; 372 case X86::COND_S: return X86::JS; 373 case X86::COND_NS: return X86::JNS; 374 case X86::COND_P: return X86::JP; 375 case X86::COND_NP: return X86::JNP; 376 case X86::COND_O: return X86::JO; 377 case X86::COND_NO: return X86::JNO; 378 } 379} 380 381/// GetOppositeBranchCondition - Return the inverse of the specified condition, 382/// e.g. turning COND_E to COND_NE. 383X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 384 switch (CC) { 385 default: assert(0 && "Illegal condition code!"); 386 case X86::COND_E: return X86::COND_NE; 387 case X86::COND_NE: return X86::COND_E; 388 case X86::COND_L: return X86::COND_GE; 389 case X86::COND_LE: return X86::COND_G; 390 case X86::COND_G: return X86::COND_LE; 391 case X86::COND_GE: return X86::COND_L; 392 case X86::COND_B: return X86::COND_AE; 393 case X86::COND_BE: return X86::COND_A; 394 case X86::COND_A: return X86::COND_BE; 395 case X86::COND_AE: return X86::COND_B; 396 case X86::COND_S: return X86::COND_NS; 397 case X86::COND_NS: return X86::COND_S; 398 case X86::COND_P: return X86::COND_NP; 399 case X86::COND_NP: return X86::COND_P; 400 case X86::COND_O: return X86::COND_NO; 401 case X86::COND_NO: return X86::COND_O; 402 } 403} 404 405bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 406 const TargetInstrDescriptor *TID = MI->getInstrDescriptor(); 407 if (TID->Flags & M_TERMINATOR_FLAG) { 408 // Conditional branch is a special case. 409 if ((TID->Flags & M_BRANCH_FLAG) != 0 && (TID->Flags & M_BARRIER_FLAG) == 0) 410 return true; 411 if ((TID->Flags & M_PREDICABLE) == 0) 412 return true; 413 return !isPredicated(MI); 414 } 415 return false; 416} 417 418// For purposes of branch analysis do not count FP_REG_KILL as a terminator. 419static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI, 420 const X86InstrInfo &TII) { 421 if (MI->getOpcode() == X86::FP_REG_KILL) 422 return false; 423 return TII.isUnpredicatedTerminator(MI); 424} 425 426bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 427 MachineBasicBlock *&TBB, 428 MachineBasicBlock *&FBB, 429 std::vector<MachineOperand> &Cond) const { 430 // If the block has no terminators, it just falls into the block after it. 431 MachineBasicBlock::iterator I = MBB.end(); 432 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) 433 return false; 434 435 // Get the last instruction in the block. 436 MachineInstr *LastInst = I; 437 438 // If there is only one terminator instruction, process it. 439 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) { 440 if (!isBranch(LastInst->getOpcode())) 441 return true; 442 443 // If the block ends with a branch there are 3 possibilities: 444 // it's an unconditional, conditional, or indirect branch. 445 446 if (LastInst->getOpcode() == X86::JMP) { 447 TBB = LastInst->getOperand(0).getMachineBasicBlock(); 448 return false; 449 } 450 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); 451 if (BranchCode == X86::COND_INVALID) 452 return true; // Can't handle indirect branch. 453 454 // Otherwise, block ends with fall-through condbranch. 455 TBB = LastInst->getOperand(0).getMachineBasicBlock(); 456 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 457 return false; 458 } 459 460 // Get the instruction before it if it's a terminator. 461 MachineInstr *SecondLastInst = I; 462 463 // If there are three terminators, we don't know what sort of block this is. 464 if (SecondLastInst && I != MBB.begin() && 465 isBrAnalysisUnpredicatedTerminator(--I, *this)) 466 return true; 467 468 // If the block ends with X86::JMP and a conditional branch, handle it. 469 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode()); 470 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) { 471 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock(); 472 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 473 FBB = LastInst->getOperand(0).getMachineBasicBlock(); 474 return false; 475 } 476 477 // If the block ends with two X86::JMPs, handle it. The second one is not 478 // executed, so remove it. 479 if (SecondLastInst->getOpcode() == X86::JMP && 480 LastInst->getOpcode() == X86::JMP) { 481 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock(); 482 I = LastInst; 483 I->eraseFromParent(); 484 return false; 485 } 486 487 // Otherwise, can't handle this. 488 return true; 489} 490 491unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 492 MachineBasicBlock::iterator I = MBB.end(); 493 if (I == MBB.begin()) return 0; 494 --I; 495 if (I->getOpcode() != X86::JMP && 496 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 497 return 0; 498 499 // Remove the branch. 500 I->eraseFromParent(); 501 502 I = MBB.end(); 503 504 if (I == MBB.begin()) return 1; 505 --I; 506 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 507 return 1; 508 509 // Remove the branch. 510 I->eraseFromParent(); 511 return 2; 512} 513 514unsigned 515X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 516 MachineBasicBlock *FBB, 517 const std::vector<MachineOperand> &Cond) const { 518 // Shouldn't be a fall through. 519 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 520 assert((Cond.size() == 1 || Cond.size() == 0) && 521 "X86 branch conditions have one component!"); 522 523 if (FBB == 0) { // One way branch. 524 if (Cond.empty()) { 525 // Unconditional branch? 526 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB); 527 } else { 528 // Conditional branch. 529 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 530 BuildMI(&MBB, get(Opc)).addMBB(TBB); 531 } 532 return 1; 533 } 534 535 // Two-way Conditional branch. 536 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 537 BuildMI(&MBB, get(Opc)).addMBB(TBB); 538 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB); 539 return 2; 540} 541 542bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const { 543 if (MBB.empty()) return false; 544 545 switch (MBB.back().getOpcode()) { 546 case X86::RET: // Return. 547 case X86::RETI: 548 case X86::TAILJMPd: 549 case X86::TAILJMPr: 550 case X86::TAILJMPm: 551 case X86::JMP: // Uncond branch. 552 case X86::JMP32r: // Indirect branch. 553 case X86::JMP32m: // Indirect branch through mem. 554 return true; 555 default: return false; 556 } 557} 558 559bool X86InstrInfo:: 560ReverseBranchCondition(std::vector<MachineOperand> &Cond) const { 561 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 562 Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm())); 563 return false; 564} 565 566const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const { 567 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 568 if (Subtarget->is64Bit()) 569 return &X86::GR64RegClass; 570 else 571 return &X86::GR32RegClass; 572} 573