FastISel.cpp revision 638c6830c6d0d6871065d2b00178ee4aa7d4d044
1///===-- FastISel.cpp - Implementation of the FastISel class --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the FastISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Instructions.h" 15#include "llvm/CodeGen/FastISel.h" 16#include "llvm/CodeGen/MachineInstrBuilder.h" 17#include "llvm/CodeGen/MachineRegisterInfo.h" 18#include "llvm/Target/TargetData.h" 19#include "llvm/Target/TargetInstrInfo.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetMachine.h" 22using namespace llvm; 23 24unsigned FastISel::getRegForValue(Value *V) { 25 // Look up the value to see if we already have a register for it. We 26 // cache values defined by Instructions across blocks, and other values 27 // only locally. This is because Instructions already have the SSA 28 // def-dominatess-use requirement enforced. 29 if (ValueMap.count(V)) 30 return ValueMap[V]; 31 unsigned Reg = LocalValueMap[V]; 32 if (Reg != 0) 33 return Reg; 34 35 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT(); 36 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 37 if (CI->getValue().getActiveBits() > 64) 38 return 0; 39 // Don't cache constant materializations. To do so would require 40 // tracking what uses they dominate. 41 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 42 } else if (isa<ConstantPointerNull>(V)) { 43 Reg = FastEmit_i(VT, VT, ISD::Constant, 0); 44 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 45 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF); 46 47 if (!Reg) { 48 const APFloat &Flt = CF->getValueAPF(); 49 MVT IntVT = TLI.getPointerTy(); 50 51 uint64_t x[2]; 52 uint32_t IntBitWidth = IntVT.getSizeInBits(); 53 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, 54 APFloat::rmTowardZero) != APFloat::opOK) 55 return 0; 56 APInt IntVal(IntBitWidth, 2, x); 57 58 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(), 59 ISD::Constant, IntVal.getZExtValue()); 60 if (IntegerReg == 0) 61 return 0; 62 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg); 63 if (Reg == 0) 64 return 0; 65 } 66 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 67 if (!SelectOperator(CE, CE->getOpcode())) return 0; 68 Reg = LocalValueMap[CE]; 69 } else if (isa<UndefValue>(V)) { 70 Reg = createResultReg(TLI.getRegClassFor(VT)); 71 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg); 72 } else { 73 return 0; 74 } 75 76 LocalValueMap[V] = Reg; 77 return Reg; 78} 79 80/// UpdateValueMap - Update the value map to include the new mapping for this 81/// instruction, or insert an extra copy to get the result in a previous 82/// determined register. 83/// NOTE: This is only necessary because we might select a block that uses 84/// a value before we select the block that defines the value. It might be 85/// possible to fix this by selecting blocks in reverse postorder. 86void FastISel::UpdateValueMap(Value* I, unsigned Reg) { 87 if (!isa<Instruction>(I)) { 88 LocalValueMap[I] = Reg; 89 return; 90 } 91 if (!ValueMap.count(I)) 92 ValueMap[I] = Reg; 93 else 94 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I], 95 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg)); 96} 97 98/// SelectBinaryOp - Select and emit code for a binary operator instruction, 99/// which has an opcode which directly corresponds to the given ISD opcode. 100/// 101bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) { 102 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true); 103 if (VT == MVT::Other || !VT.isSimple()) 104 // Unhandled type. Halt "fast" selection and bail. 105 return false; 106 107 // We only handle legal types. For example, on x86-32 the instruction 108 // selector contains all of the 64-bit instructions from x86-64, 109 // under the assumption that i64 won't be used if the target doesn't 110 // support it. 111 if (!TLI.isTypeLegal(VT)) { 112 // MVT::i1 is special. Allow AND and OR (but not XOR) because they 113 // don't require additional zeroing, which makes them easy. 114 if (VT == MVT::i1 && 115 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR)) 116 VT = TLI.getTypeToTransformTo(VT); 117 else 118 return false; 119 } 120 121 unsigned Op0 = getRegForValue(I->getOperand(0)); 122 if (Op0 == 0) 123 // Unhandled operand. Halt "fast" selection and bail. 124 return false; 125 126 // Check if the second operand is a constant and handle it appropriately. 127 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 128 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(), 129 ISDOpcode, Op0, CI->getZExtValue()); 130 if (ResultReg != 0) { 131 // We successfully emitted code for the given LLVM Instruction. 132 UpdateValueMap(I, ResultReg); 133 return true; 134 } 135 } 136 137 // Check if the second operand is a constant float. 138 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) { 139 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), 140 ISDOpcode, Op0, CF); 141 if (ResultReg != 0) { 142 // We successfully emitted code for the given LLVM Instruction. 143 UpdateValueMap(I, ResultReg); 144 return true; 145 } 146 } 147 148 unsigned Op1 = getRegForValue(I->getOperand(1)); 149 if (Op1 == 0) 150 // Unhandled operand. Halt "fast" selection and bail. 151 return false; 152 153 // Now we have both operands in registers. Emit the instruction. 154 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 155 ISDOpcode, Op0, Op1); 156 if (ResultReg == 0) 157 // Target-specific code wasn't able to find a machine opcode for 158 // the given ISD opcode and type. Halt "fast" selection and bail. 159 return false; 160 161 // We successfully emitted code for the given LLVM Instruction. 162 UpdateValueMap(I, ResultReg); 163 return true; 164} 165 166bool FastISel::SelectGetElementPtr(User *I) { 167 unsigned N = getRegForValue(I->getOperand(0)); 168 if (N == 0) 169 // Unhandled operand. Halt "fast" selection and bail. 170 return false; 171 172 const Type *Ty = I->getOperand(0)->getType(); 173 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT(); 174 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end(); 175 OI != E; ++OI) { 176 Value *Idx = *OI; 177 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 178 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 179 if (Field) { 180 // N = N + Offset 181 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field); 182 // FIXME: This can be optimized by combining the add with a 183 // subsequent one. 184 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); 185 if (N == 0) 186 // Unhandled operand. Halt "fast" selection and bail. 187 return false; 188 } 189 Ty = StTy->getElementType(Field); 190 } else { 191 Ty = cast<SequentialType>(Ty)->getElementType(); 192 193 // If this is a constant subscript, handle it quickly. 194 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 195 if (CI->getZExtValue() == 0) continue; 196 uint64_t Offs = 197 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 198 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); 199 if (N == 0) 200 // Unhandled operand. Halt "fast" selection and bail. 201 return false; 202 continue; 203 } 204 205 // N = N + Idx * ElementSize; 206 uint64_t ElementSize = TD.getABITypeSize(Ty); 207 unsigned IdxN = getRegForValue(Idx); 208 if (IdxN == 0) 209 // Unhandled operand. Halt "fast" selection and bail. 210 return false; 211 212 // If the index is smaller or larger than intptr_t, truncate or extend 213 // it. 214 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false); 215 if (IdxVT.bitsLT(VT)) 216 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN); 217 else if (IdxVT.bitsGT(VT)) 218 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN); 219 if (IdxN == 0) 220 // Unhandled operand. Halt "fast" selection and bail. 221 return false; 222 223 if (ElementSize != 1) { 224 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); 225 if (IdxN == 0) 226 // Unhandled operand. Halt "fast" selection and bail. 227 return false; 228 } 229 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN); 230 if (N == 0) 231 // Unhandled operand. Halt "fast" selection and bail. 232 return false; 233 } 234 } 235 236 // We successfully emitted code for the given LLVM Instruction. 237 UpdateValueMap(I, N); 238 return true; 239} 240 241bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) { 242 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 243 MVT DstVT = TLI.getValueType(I->getType()); 244 245 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 246 DstVT == MVT::Other || !DstVT.isSimple() || 247 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) 248 // Unhandled type. Halt "fast" selection and bail. 249 return false; 250 251 unsigned InputReg = getRegForValue(I->getOperand(0)); 252 if (!InputReg) 253 // Unhandled operand. Halt "fast" selection and bail. 254 return false; 255 256 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), 257 DstVT.getSimpleVT(), 258 Opcode, 259 InputReg); 260 if (!ResultReg) 261 return false; 262 263 UpdateValueMap(I, ResultReg); 264 return true; 265} 266 267bool FastISel::SelectBitCast(User *I) { 268 // If the bitcast doesn't change the type, just use the operand value. 269 if (I->getType() == I->getOperand(0)->getType()) { 270 unsigned Reg = getRegForValue(I->getOperand(0)); 271 if (Reg == 0) 272 return false; 273 UpdateValueMap(I, Reg); 274 return true; 275 } 276 277 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators. 278 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 279 MVT DstVT = TLI.getValueType(I->getType()); 280 281 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 282 DstVT == MVT::Other || !DstVT.isSimple() || 283 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) 284 // Unhandled type. Halt "fast" selection and bail. 285 return false; 286 287 unsigned Op0 = getRegForValue(I->getOperand(0)); 288 if (Op0 == 0) 289 // Unhandled operand. Halt "fast" selection and bail. 290 return false; 291 292 // First, try to perform the bitcast by inserting a reg-reg copy. 293 unsigned ResultReg = 0; 294 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { 295 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); 296 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); 297 ResultReg = createResultReg(DstClass); 298 299 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 300 Op0, DstClass, SrcClass); 301 if (!InsertedCopy) 302 ResultReg = 0; 303 } 304 305 // If the reg-reg copy failed, select a BIT_CONVERT opcode. 306 if (!ResultReg) 307 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 308 ISD::BIT_CONVERT, Op0); 309 310 if (!ResultReg) 311 return false; 312 313 UpdateValueMap(I, ResultReg); 314 return true; 315} 316 317bool 318FastISel::SelectInstruction(Instruction *I) { 319 return SelectOperator(I, I->getOpcode()); 320} 321 322bool 323FastISel::SelectOperator(User *I, unsigned Opcode) { 324 switch (Opcode) { 325 case Instruction::Add: { 326 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD; 327 return SelectBinaryOp(I, Opc); 328 } 329 case Instruction::Sub: { 330 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB; 331 return SelectBinaryOp(I, Opc); 332 } 333 case Instruction::Mul: { 334 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL; 335 return SelectBinaryOp(I, Opc); 336 } 337 case Instruction::SDiv: 338 return SelectBinaryOp(I, ISD::SDIV); 339 case Instruction::UDiv: 340 return SelectBinaryOp(I, ISD::UDIV); 341 case Instruction::FDiv: 342 return SelectBinaryOp(I, ISD::FDIV); 343 case Instruction::SRem: 344 return SelectBinaryOp(I, ISD::SREM); 345 case Instruction::URem: 346 return SelectBinaryOp(I, ISD::UREM); 347 case Instruction::FRem: 348 return SelectBinaryOp(I, ISD::FREM); 349 case Instruction::Shl: 350 return SelectBinaryOp(I, ISD::SHL); 351 case Instruction::LShr: 352 return SelectBinaryOp(I, ISD::SRL); 353 case Instruction::AShr: 354 return SelectBinaryOp(I, ISD::SRA); 355 case Instruction::And: 356 return SelectBinaryOp(I, ISD::AND); 357 case Instruction::Or: 358 return SelectBinaryOp(I, ISD::OR); 359 case Instruction::Xor: 360 return SelectBinaryOp(I, ISD::XOR); 361 362 case Instruction::GetElementPtr: 363 return SelectGetElementPtr(I); 364 365 case Instruction::Br: { 366 BranchInst *BI = cast<BranchInst>(I); 367 368 if (BI->isUnconditional()) { 369 MachineFunction::iterator NextMBB = 370 next(MachineFunction::iterator(MBB)); 371 BasicBlock *LLVMSucc = BI->getSuccessor(0); 372 MachineBasicBlock *MSucc = MBBMap[LLVMSucc]; 373 374 if (NextMBB != MF.end() && MSucc == NextMBB) { 375 // The unconditional fall-through case, which needs no instructions. 376 } else { 377 // The unconditional branch case. 378 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>()); 379 } 380 MBB->addSuccessor(MSucc); 381 return true; 382 } 383 384 // Conditional branches are not handed yet. 385 // Halt "fast" selection and bail. 386 return false; 387 } 388 389 case Instruction::Unreachable: 390 // Nothing to emit. 391 return true; 392 393 case Instruction::PHI: 394 // PHI nodes are already emitted. 395 return true; 396 397 case Instruction::BitCast: 398 return SelectBitCast(I); 399 400 case Instruction::FPToSI: 401 return SelectCast(I, ISD::FP_TO_SINT); 402 case Instruction::ZExt: 403 return SelectCast(I, ISD::ZERO_EXTEND); 404 case Instruction::SExt: 405 return SelectCast(I, ISD::SIGN_EXTEND); 406 case Instruction::Trunc: 407 return SelectCast(I, ISD::TRUNCATE); 408 case Instruction::SIToFP: 409 return SelectCast(I, ISD::SINT_TO_FP); 410 411 case Instruction::IntToPtr: // Deliberate fall-through. 412 case Instruction::PtrToInt: { 413 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 414 MVT DstVT = TLI.getValueType(I->getType()); 415 if (DstVT.bitsGT(SrcVT)) 416 return SelectCast(I, ISD::ZERO_EXTEND); 417 if (DstVT.bitsLT(SrcVT)) 418 return SelectCast(I, ISD::TRUNCATE); 419 unsigned Reg = getRegForValue(I->getOperand(0)); 420 if (Reg == 0) return false; 421 UpdateValueMap(I, Reg); 422 return true; 423 } 424 425 default: 426 // Unhandled instruction. Halt "fast" selection and bail. 427 return false; 428 } 429} 430 431FastISel::FastISel(MachineFunction &mf, 432 DenseMap<const Value *, unsigned> &vm, 433 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) 434 : MBB(0), 435 ValueMap(vm), 436 MBBMap(bm), 437 MF(mf), 438 MRI(MF.getRegInfo()), 439 TM(MF.getTarget()), 440 TD(*TM.getTargetData()), 441 TII(*TM.getInstrInfo()), 442 TLI(*TM.getTargetLowering()) { 443} 444 445FastISel::~FastISel() {} 446 447unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType, 448 ISD::NodeType) { 449 return 0; 450} 451 452unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType, 453 ISD::NodeType, unsigned /*Op0*/) { 454 return 0; 455} 456 457unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType, 458 ISD::NodeType, unsigned /*Op0*/, 459 unsigned /*Op0*/) { 460 return 0; 461} 462 463unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType, 464 ISD::NodeType, uint64_t /*Imm*/) { 465 return 0; 466} 467 468unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType, 469 ISD::NodeType, ConstantFP * /*FPImm*/) { 470 return 0; 471} 472 473unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType, 474 ISD::NodeType, unsigned /*Op0*/, 475 uint64_t /*Imm*/) { 476 return 0; 477} 478 479unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType, 480 ISD::NodeType, unsigned /*Op0*/, 481 ConstantFP * /*FPImm*/) { 482 return 0; 483} 484 485unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType, 486 ISD::NodeType, 487 unsigned /*Op0*/, unsigned /*Op1*/, 488 uint64_t /*Imm*/) { 489 return 0; 490} 491 492/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries 493/// to emit an instruction with an immediate operand using FastEmit_ri. 494/// If that fails, it materializes the immediate into a register and try 495/// FastEmit_rr instead. 496unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode, 497 unsigned Op0, uint64_t Imm, 498 MVT::SimpleValueType ImmType) { 499 // First check if immediate type is legal. If not, we can't use the ri form. 500 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm); 501 if (ResultReg != 0) 502 return ResultReg; 503 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 504 if (MaterialReg == 0) 505 return 0; 506 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 507} 508 509/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries 510/// to emit an instruction with a floating-point immediate operand using 511/// FastEmit_rf. If that fails, it materializes the immediate into a register 512/// and try FastEmit_rr instead. 513unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode, 514 unsigned Op0, ConstantFP *FPImm, 515 MVT::SimpleValueType ImmType) { 516 // First check if immediate type is legal. If not, we can't use the rf form. 517 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm); 518 if (ResultReg != 0) 519 return ResultReg; 520 521 // Materialize the constant in a register. 522 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm); 523 if (MaterialReg == 0) { 524 // If the target doesn't have a way to directly enter a floating-point 525 // value into a register, use an alternate approach. 526 // TODO: The current approach only supports floating-point constants 527 // that can be constructed by conversion from integer values. This should 528 // be replaced by code that creates a load from a constant-pool entry, 529 // which will require some target-specific work. 530 const APFloat &Flt = FPImm->getValueAPF(); 531 MVT IntVT = TLI.getPointerTy(); 532 533 uint64_t x[2]; 534 uint32_t IntBitWidth = IntVT.getSizeInBits(); 535 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, 536 APFloat::rmTowardZero) != APFloat::opOK) 537 return 0; 538 APInt IntVal(IntBitWidth, 2, x); 539 540 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(), 541 ISD::Constant, IntVal.getZExtValue()); 542 if (IntegerReg == 0) 543 return 0; 544 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT, 545 ISD::SINT_TO_FP, IntegerReg); 546 if (MaterialReg == 0) 547 return 0; 548 } 549 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 550} 551 552unsigned FastISel::createResultReg(const TargetRegisterClass* RC) { 553 return MRI.createVirtualRegister(RC); 554} 555 556unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, 557 const TargetRegisterClass* RC) { 558 unsigned ResultReg = createResultReg(RC); 559 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 560 561 BuildMI(MBB, II, ResultReg); 562 return ResultReg; 563} 564 565unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, 566 const TargetRegisterClass *RC, 567 unsigned Op0) { 568 unsigned ResultReg = createResultReg(RC); 569 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 570 571 BuildMI(MBB, II, ResultReg).addReg(Op0); 572 return ResultReg; 573} 574 575unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 576 const TargetRegisterClass *RC, 577 unsigned Op0, unsigned Op1) { 578 unsigned ResultReg = createResultReg(RC); 579 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 580 581 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1); 582 return ResultReg; 583} 584 585unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 586 const TargetRegisterClass *RC, 587 unsigned Op0, uint64_t Imm) { 588 unsigned ResultReg = createResultReg(RC); 589 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 590 591 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm); 592 return ResultReg; 593} 594 595unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 596 const TargetRegisterClass *RC, 597 unsigned Op0, ConstantFP *FPImm) { 598 unsigned ResultReg = createResultReg(RC); 599 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 600 601 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm); 602 return ResultReg; 603} 604 605unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 606 const TargetRegisterClass *RC, 607 unsigned Op0, unsigned Op1, uint64_t Imm) { 608 unsigned ResultReg = createResultReg(RC); 609 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 610 611 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm); 612 return ResultReg; 613} 614 615unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, 616 const TargetRegisterClass *RC, 617 uint64_t Imm) { 618 unsigned ResultReg = createResultReg(RC); 619 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 620 621 BuildMI(MBB, II, ResultReg).addImm(Imm); 622 return ResultReg; 623} 624 625unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) { 626 const TargetRegisterClass* RC = MRI.getRegClass(Op0); 627 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1); 628 629 unsigned ResultReg = createResultReg(SRC); 630 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG); 631 632 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx); 633 return ResultReg; 634} 635