FastISel.cpp revision 821164875706dd28e48c6cc3cea5c8ffa6e658d1
1///===-- FastISel.cpp - Implementation of the FastISel class --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the FastISel class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Instructions.h" 15#include "llvm/CodeGen/FastISel.h" 16#include "llvm/CodeGen/MachineInstrBuilder.h" 17#include "llvm/CodeGen/MachineRegisterInfo.h" 18#include "llvm/Target/TargetData.h" 19#include "llvm/Target/TargetInstrInfo.h" 20#include "llvm/Target/TargetLowering.h" 21#include "llvm/Target/TargetMachine.h" 22using namespace llvm; 23 24unsigned FastISel::getRegForValue(Value *V) { 25 // Look up the value to see if we already have a register for it. We 26 // cache values defined by Instructions across blocks, and other values 27 // only locally. This is because Instructions already have the SSA 28 // def-dominatess-use requirement enforced. 29 if (ValueMap.count(V)) 30 return ValueMap[V]; 31 unsigned Reg = LocalValueMap[V]; 32 if (Reg != 0) 33 return Reg; 34 35 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT(); 36 37 // Ignore illegal types. 38 if (!TLI.isTypeLegal(VT)) { 39 // Promote MVT::i1 to a legal type though, because it's common and easy. 40 if (VT == MVT::i1) 41 VT = TLI.getTypeToTransformTo(VT).getSimpleVT(); 42 else 43 return 0; 44 } 45 46 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 47 if (CI->getValue().getActiveBits() > 64) 48 return TargetMaterializeConstant(CI); 49 // Don't cache constant materializations. To do so would require 50 // tracking what uses they dominate. 51 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 52 } else if (isa<GlobalValue>(V)) { 53 return TargetMaterializeConstant(cast<Constant>(V)); 54 } else if (isa<AllocaInst>(V)) { 55 return TargetMaterializeAlloca(cast<AllocaInst>(V)); 56 } else if (isa<ConstantPointerNull>(V)) { 57 Reg = FastEmit_i(VT, VT, ISD::Constant, 0); 58 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) { 59 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF); 60 61 if (!Reg) { 62 const APFloat &Flt = CF->getValueAPF(); 63 MVT IntVT = TLI.getPointerTy(); 64 65 uint64_t x[2]; 66 uint32_t IntBitWidth = IntVT.getSizeInBits(); 67 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, 68 APFloat::rmTowardZero) != APFloat::opOK) 69 return TargetMaterializeConstant(CF); 70 APInt IntVal(IntBitWidth, 2, x); 71 72 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(), 73 ISD::Constant, IntVal.getZExtValue()); 74 if (IntegerReg == 0) 75 return TargetMaterializeConstant(CF); 76 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg); 77 if (Reg == 0) 78 return TargetMaterializeConstant(CF); 79 } 80 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 81 if (!SelectOperator(CE, CE->getOpcode())) return 0; 82 Reg = LocalValueMap[CE]; 83 } else if (isa<UndefValue>(V)) { 84 Reg = createResultReg(TLI.getRegClassFor(VT)); 85 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg); 86 } else { 87 return 0; 88 } 89 90 if (!Reg && isa<Constant>(V)) 91 return TargetMaterializeConstant(cast<Constant>(V)); 92 93 LocalValueMap[V] = Reg; 94 return Reg; 95} 96 97unsigned FastISel::lookUpRegForValue(Value *V) { 98 // Look up the value to see if we already have a register for it. We 99 // cache values defined by Instructions across blocks, and other values 100 // only locally. This is because Instructions already have the SSA 101 // def-dominatess-use requirement enforced. 102 if (ValueMap.count(V)) 103 return ValueMap[V]; 104 return LocalValueMap[V]; 105} 106 107/// UpdateValueMap - Update the value map to include the new mapping for this 108/// instruction, or insert an extra copy to get the result in a previous 109/// determined register. 110/// NOTE: This is only necessary because we might select a block that uses 111/// a value before we select the block that defines the value. It might be 112/// possible to fix this by selecting blocks in reverse postorder. 113void FastISel::UpdateValueMap(Value* I, unsigned Reg) { 114 if (!isa<Instruction>(I)) { 115 LocalValueMap[I] = Reg; 116 return; 117 } 118 if (!ValueMap.count(I)) 119 ValueMap[I] = Reg; 120 else 121 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I], 122 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg)); 123} 124 125/// SelectBinaryOp - Select and emit code for a binary operator instruction, 126/// which has an opcode which directly corresponds to the given ISD opcode. 127/// 128bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) { 129 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true); 130 if (VT == MVT::Other || !VT.isSimple()) 131 // Unhandled type. Halt "fast" selection and bail. 132 return false; 133 134 // We only handle legal types. For example, on x86-32 the instruction 135 // selector contains all of the 64-bit instructions from x86-64, 136 // under the assumption that i64 won't be used if the target doesn't 137 // support it. 138 if (!TLI.isTypeLegal(VT)) { 139 // MVT::i1 is special. Allow AND and OR (but not XOR) because they 140 // don't require additional zeroing, which makes them easy. 141 if (VT == MVT::i1 && 142 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR)) 143 VT = TLI.getTypeToTransformTo(VT); 144 else 145 return false; 146 } 147 148 unsigned Op0 = getRegForValue(I->getOperand(0)); 149 if (Op0 == 0) 150 // Unhandled operand. Halt "fast" selection and bail. 151 return false; 152 153 // Check if the second operand is a constant and handle it appropriately. 154 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 155 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(), 156 ISDOpcode, Op0, CI->getZExtValue()); 157 if (ResultReg != 0) { 158 // We successfully emitted code for the given LLVM Instruction. 159 UpdateValueMap(I, ResultReg); 160 return true; 161 } 162 } 163 164 // Check if the second operand is a constant float. 165 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) { 166 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), 167 ISDOpcode, Op0, CF); 168 if (ResultReg != 0) { 169 // We successfully emitted code for the given LLVM Instruction. 170 UpdateValueMap(I, ResultReg); 171 return true; 172 } 173 } 174 175 unsigned Op1 = getRegForValue(I->getOperand(1)); 176 if (Op1 == 0) 177 // Unhandled operand. Halt "fast" selection and bail. 178 return false; 179 180 // Now we have both operands in registers. Emit the instruction. 181 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 182 ISDOpcode, Op0, Op1); 183 if (ResultReg == 0) 184 // Target-specific code wasn't able to find a machine opcode for 185 // the given ISD opcode and type. Halt "fast" selection and bail. 186 return false; 187 188 // We successfully emitted code for the given LLVM Instruction. 189 UpdateValueMap(I, ResultReg); 190 return true; 191} 192 193bool FastISel::SelectGetElementPtr(User *I) { 194 unsigned N = getRegForValue(I->getOperand(0)); 195 if (N == 0) 196 // Unhandled operand. Halt "fast" selection and bail. 197 return false; 198 199 const Type *Ty = I->getOperand(0)->getType(); 200 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT(); 201 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end(); 202 OI != E; ++OI) { 203 Value *Idx = *OI; 204 if (const StructType *StTy = dyn_cast<StructType>(Ty)) { 205 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue(); 206 if (Field) { 207 // N = N + Offset 208 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field); 209 // FIXME: This can be optimized by combining the add with a 210 // subsequent one. 211 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); 212 if (N == 0) 213 // Unhandled operand. Halt "fast" selection and bail. 214 return false; 215 } 216 Ty = StTy->getElementType(Field); 217 } else { 218 Ty = cast<SequentialType>(Ty)->getElementType(); 219 220 // If this is a constant subscript, handle it quickly. 221 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 222 if (CI->getZExtValue() == 0) continue; 223 uint64_t Offs = 224 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 225 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); 226 if (N == 0) 227 // Unhandled operand. Halt "fast" selection and bail. 228 return false; 229 continue; 230 } 231 232 // N = N + Idx * ElementSize; 233 uint64_t ElementSize = TD.getABITypeSize(Ty); 234 unsigned IdxN = getRegForValue(Idx); 235 if (IdxN == 0) 236 // Unhandled operand. Halt "fast" selection and bail. 237 return false; 238 239 // If the index is smaller or larger than intptr_t, truncate or extend 240 // it. 241 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false); 242 if (IdxVT.bitsLT(VT)) 243 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN); 244 else if (IdxVT.bitsGT(VT)) 245 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN); 246 if (IdxN == 0) 247 // Unhandled operand. Halt "fast" selection and bail. 248 return false; 249 250 if (ElementSize != 1) { 251 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); 252 if (IdxN == 0) 253 // Unhandled operand. Halt "fast" selection and bail. 254 return false; 255 } 256 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN); 257 if (N == 0) 258 // Unhandled operand. Halt "fast" selection and bail. 259 return false; 260 } 261 } 262 263 // We successfully emitted code for the given LLVM Instruction. 264 UpdateValueMap(I, N); 265 return true; 266} 267 268bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) { 269 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 270 MVT DstVT = TLI.getValueType(I->getType()); 271 272 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 273 DstVT == MVT::Other || !DstVT.isSimple() || 274 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) 275 // Unhandled type. Halt "fast" selection and bail. 276 return false; 277 278 unsigned InputReg = getRegForValue(I->getOperand(0)); 279 if (!InputReg) 280 // Unhandled operand. Halt "fast" selection and bail. 281 return false; 282 283 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), 284 DstVT.getSimpleVT(), 285 Opcode, 286 InputReg); 287 if (!ResultReg) 288 return false; 289 290 UpdateValueMap(I, ResultReg); 291 return true; 292} 293 294bool FastISel::SelectBitCast(User *I) { 295 // If the bitcast doesn't change the type, just use the operand value. 296 if (I->getType() == I->getOperand(0)->getType()) { 297 unsigned Reg = getRegForValue(I->getOperand(0)); 298 if (Reg == 0) 299 return false; 300 UpdateValueMap(I, Reg); 301 return true; 302 } 303 304 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators. 305 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 306 MVT DstVT = TLI.getValueType(I->getType()); 307 308 if (SrcVT == MVT::Other || !SrcVT.isSimple() || 309 DstVT == MVT::Other || !DstVT.isSimple() || 310 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) 311 // Unhandled type. Halt "fast" selection and bail. 312 return false; 313 314 unsigned Op0 = getRegForValue(I->getOperand(0)); 315 if (Op0 == 0) 316 // Unhandled operand. Halt "fast" selection and bail. 317 return false; 318 319 // First, try to perform the bitcast by inserting a reg-reg copy. 320 unsigned ResultReg = 0; 321 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { 322 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); 323 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); 324 ResultReg = createResultReg(DstClass); 325 326 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 327 Op0, DstClass, SrcClass); 328 if (!InsertedCopy) 329 ResultReg = 0; 330 } 331 332 // If the reg-reg copy failed, select a BIT_CONVERT opcode. 333 if (!ResultReg) 334 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 335 ISD::BIT_CONVERT, Op0); 336 337 if (!ResultReg) 338 return false; 339 340 UpdateValueMap(I, ResultReg); 341 return true; 342} 343 344bool 345FastISel::SelectInstruction(Instruction *I) { 346 return SelectOperator(I, I->getOpcode()); 347} 348 349bool 350FastISel::SelectOperator(User *I, unsigned Opcode) { 351 switch (Opcode) { 352 case Instruction::Add: { 353 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD; 354 return SelectBinaryOp(I, Opc); 355 } 356 case Instruction::Sub: { 357 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB; 358 return SelectBinaryOp(I, Opc); 359 } 360 case Instruction::Mul: { 361 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL; 362 return SelectBinaryOp(I, Opc); 363 } 364 case Instruction::SDiv: 365 return SelectBinaryOp(I, ISD::SDIV); 366 case Instruction::UDiv: 367 return SelectBinaryOp(I, ISD::UDIV); 368 case Instruction::FDiv: 369 return SelectBinaryOp(I, ISD::FDIV); 370 case Instruction::SRem: 371 return SelectBinaryOp(I, ISD::SREM); 372 case Instruction::URem: 373 return SelectBinaryOp(I, ISD::UREM); 374 case Instruction::FRem: 375 return SelectBinaryOp(I, ISD::FREM); 376 case Instruction::Shl: 377 return SelectBinaryOp(I, ISD::SHL); 378 case Instruction::LShr: 379 return SelectBinaryOp(I, ISD::SRL); 380 case Instruction::AShr: 381 return SelectBinaryOp(I, ISD::SRA); 382 case Instruction::And: 383 return SelectBinaryOp(I, ISD::AND); 384 case Instruction::Or: 385 return SelectBinaryOp(I, ISD::OR); 386 case Instruction::Xor: 387 return SelectBinaryOp(I, ISD::XOR); 388 389 case Instruction::GetElementPtr: 390 return SelectGetElementPtr(I); 391 392 case Instruction::Br: { 393 BranchInst *BI = cast<BranchInst>(I); 394 395 if (BI->isUnconditional()) { 396 MachineFunction::iterator NextMBB = 397 next(MachineFunction::iterator(MBB)); 398 BasicBlock *LLVMSucc = BI->getSuccessor(0); 399 MachineBasicBlock *MSucc = MBBMap[LLVMSucc]; 400 401 if (NextMBB != MF.end() && MSucc == NextMBB) { 402 // The unconditional fall-through case, which needs no instructions. 403 } else { 404 // The unconditional branch case. 405 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>()); 406 } 407 MBB->addSuccessor(MSucc); 408 return true; 409 } 410 411 // Conditional branches are not handed yet. 412 // Halt "fast" selection and bail. 413 return false; 414 } 415 416 case Instruction::Unreachable: 417 // Nothing to emit. 418 return true; 419 420 case Instruction::PHI: 421 // PHI nodes are already emitted. 422 return true; 423 424 case Instruction::Alloca: 425 // FunctionLowering has the static-sized case covered. 426 if (StaticAllocaMap.count(cast<AllocaInst>(I))) 427 return true; 428 429 // Dynamic-sized alloca is not handled yet. 430 return false; 431 432 case Instruction::BitCast: 433 return SelectBitCast(I); 434 435 case Instruction::FPToSI: 436 return SelectCast(I, ISD::FP_TO_SINT); 437 case Instruction::ZExt: 438 return SelectCast(I, ISD::ZERO_EXTEND); 439 case Instruction::SExt: 440 return SelectCast(I, ISD::SIGN_EXTEND); 441 case Instruction::Trunc: 442 return SelectCast(I, ISD::TRUNCATE); 443 case Instruction::SIToFP: 444 return SelectCast(I, ISD::SINT_TO_FP); 445 446 case Instruction::IntToPtr: // Deliberate fall-through. 447 case Instruction::PtrToInt: { 448 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 449 MVT DstVT = TLI.getValueType(I->getType()); 450 if (DstVT.bitsGT(SrcVT)) 451 return SelectCast(I, ISD::ZERO_EXTEND); 452 if (DstVT.bitsLT(SrcVT)) 453 return SelectCast(I, ISD::TRUNCATE); 454 unsigned Reg = getRegForValue(I->getOperand(0)); 455 if (Reg == 0) return false; 456 UpdateValueMap(I, Reg); 457 return true; 458 } 459 460 default: 461 // Unhandled instruction. Halt "fast" selection and bail. 462 return false; 463 } 464} 465 466FastISel::FastISel(MachineFunction &mf, 467 DenseMap<const Value *, unsigned> &vm, 468 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm, 469 DenseMap<const AllocaInst *, int> &am) 470 : MBB(0), 471 ValueMap(vm), 472 MBBMap(bm), 473 StaticAllocaMap(am), 474 MF(mf), 475 MRI(MF.getRegInfo()), 476 MFI(*MF.getFrameInfo()), 477 MCP(*MF.getConstantPool()), 478 TM(MF.getTarget()), 479 TD(*TM.getTargetData()), 480 TII(*TM.getInstrInfo()), 481 TLI(*TM.getTargetLowering()) { 482} 483 484FastISel::~FastISel() {} 485 486unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType, 487 ISD::NodeType) { 488 return 0; 489} 490 491unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType, 492 ISD::NodeType, unsigned /*Op0*/) { 493 return 0; 494} 495 496unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType, 497 ISD::NodeType, unsigned /*Op0*/, 498 unsigned /*Op0*/) { 499 return 0; 500} 501 502unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType, 503 ISD::NodeType, uint64_t /*Imm*/) { 504 return 0; 505} 506 507unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType, 508 ISD::NodeType, ConstantFP * /*FPImm*/) { 509 return 0; 510} 511 512unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType, 513 ISD::NodeType, unsigned /*Op0*/, 514 uint64_t /*Imm*/) { 515 return 0; 516} 517 518unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType, 519 ISD::NodeType, unsigned /*Op0*/, 520 ConstantFP * /*FPImm*/) { 521 return 0; 522} 523 524unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType, 525 ISD::NodeType, 526 unsigned /*Op0*/, unsigned /*Op1*/, 527 uint64_t /*Imm*/) { 528 return 0; 529} 530 531/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries 532/// to emit an instruction with an immediate operand using FastEmit_ri. 533/// If that fails, it materializes the immediate into a register and try 534/// FastEmit_rr instead. 535unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode, 536 unsigned Op0, uint64_t Imm, 537 MVT::SimpleValueType ImmType) { 538 // First check if immediate type is legal. If not, we can't use the ri form. 539 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm); 540 if (ResultReg != 0) 541 return ResultReg; 542 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 543 if (MaterialReg == 0) 544 return 0; 545 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 546} 547 548/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries 549/// to emit an instruction with a floating-point immediate operand using 550/// FastEmit_rf. If that fails, it materializes the immediate into a register 551/// and try FastEmit_rr instead. 552unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode, 553 unsigned Op0, ConstantFP *FPImm, 554 MVT::SimpleValueType ImmType) { 555 // First check if immediate type is legal. If not, we can't use the rf form. 556 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm); 557 if (ResultReg != 0) 558 return ResultReg; 559 560 // Materialize the constant in a register. 561 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm); 562 if (MaterialReg == 0) { 563 // If the target doesn't have a way to directly enter a floating-point 564 // value into a register, use an alternate approach. 565 // TODO: The current approach only supports floating-point constants 566 // that can be constructed by conversion from integer values. This should 567 // be replaced by code that creates a load from a constant-pool entry, 568 // which will require some target-specific work. 569 const APFloat &Flt = FPImm->getValueAPF(); 570 MVT IntVT = TLI.getPointerTy(); 571 572 uint64_t x[2]; 573 uint32_t IntBitWidth = IntVT.getSizeInBits(); 574 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, 575 APFloat::rmTowardZero) != APFloat::opOK) 576 return 0; 577 APInt IntVal(IntBitWidth, 2, x); 578 579 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(), 580 ISD::Constant, IntVal.getZExtValue()); 581 if (IntegerReg == 0) 582 return 0; 583 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT, 584 ISD::SINT_TO_FP, IntegerReg); 585 if (MaterialReg == 0) 586 return 0; 587 } 588 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 589} 590 591unsigned FastISel::createResultReg(const TargetRegisterClass* RC) { 592 return MRI.createVirtualRegister(RC); 593} 594 595unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, 596 const TargetRegisterClass* RC) { 597 unsigned ResultReg = createResultReg(RC); 598 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 599 600 BuildMI(MBB, II, ResultReg); 601 return ResultReg; 602} 603 604unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, 605 const TargetRegisterClass *RC, 606 unsigned Op0) { 607 unsigned ResultReg = createResultReg(RC); 608 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 609 610 if (II.getNumDefs() >= 1) 611 BuildMI(MBB, II, ResultReg).addReg(Op0); 612 else { 613 BuildMI(MBB, II).addReg(Op0); 614 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 615 II.ImplicitDefs[0], RC, RC); 616 if (!InsertedCopy) 617 ResultReg = 0; 618 } 619 620 return ResultReg; 621} 622 623unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 624 const TargetRegisterClass *RC, 625 unsigned Op0, unsigned Op1) { 626 unsigned ResultReg = createResultReg(RC); 627 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 628 629 if (II.getNumDefs() >= 1) 630 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1); 631 else { 632 BuildMI(MBB, II).addReg(Op0).addReg(Op1); 633 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 634 II.ImplicitDefs[0], RC, RC); 635 if (!InsertedCopy) 636 ResultReg = 0; 637 } 638 return ResultReg; 639} 640 641unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 642 const TargetRegisterClass *RC, 643 unsigned Op0, uint64_t Imm) { 644 unsigned ResultReg = createResultReg(RC); 645 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 646 647 if (II.getNumDefs() >= 1) 648 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm); 649 else { 650 BuildMI(MBB, II).addReg(Op0).addImm(Imm); 651 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 652 II.ImplicitDefs[0], RC, RC); 653 if (!InsertedCopy) 654 ResultReg = 0; 655 } 656 return ResultReg; 657} 658 659unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 660 const TargetRegisterClass *RC, 661 unsigned Op0, ConstantFP *FPImm) { 662 unsigned ResultReg = createResultReg(RC); 663 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 664 665 if (II.getNumDefs() >= 1) 666 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm); 667 else { 668 BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm); 669 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 670 II.ImplicitDefs[0], RC, RC); 671 if (!InsertedCopy) 672 ResultReg = 0; 673 } 674 return ResultReg; 675} 676 677unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 678 const TargetRegisterClass *RC, 679 unsigned Op0, unsigned Op1, uint64_t Imm) { 680 unsigned ResultReg = createResultReg(RC); 681 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 682 683 if (II.getNumDefs() >= 1) 684 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm); 685 else { 686 BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm); 687 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 688 II.ImplicitDefs[0], RC, RC); 689 if (!InsertedCopy) 690 ResultReg = 0; 691 } 692 return ResultReg; 693} 694 695unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, 696 const TargetRegisterClass *RC, 697 uint64_t Imm) { 698 unsigned ResultReg = createResultReg(RC); 699 const TargetInstrDesc &II = TII.get(MachineInstOpcode); 700 701 if (II.getNumDefs() >= 1) 702 BuildMI(MBB, II, ResultReg).addImm(Imm); 703 else { 704 BuildMI(MBB, II).addImm(Imm); 705 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 706 II.ImplicitDefs[0], RC, RC); 707 if (!InsertedCopy) 708 ResultReg = 0; 709 } 710 return ResultReg; 711} 712 713unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) { 714 const TargetRegisterClass* RC = MRI.getRegClass(Op0); 715 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1); 716 717 unsigned ResultReg = createResultReg(SRC); 718 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG); 719 720 if (II.getNumDefs() >= 1) 721 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx); 722 else { 723 BuildMI(MBB, II).addReg(Op0).addImm(Idx); 724 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 725 II.ImplicitDefs[0], RC, RC); 726 if (!InsertedCopy) 727 ResultReg = 0; 728 } 729 return ResultReg; 730} 731