X86FastISel.cpp revision e922c0201916e0b980ab3cfe91e1413e68d55647
1//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the X86-specific support for the FastISel class. Much 11// of the target-specific code is generated by tablegen in the file 12// X86GenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/CallingConv.h" 23#include "llvm/DerivedTypes.h" 24#include "llvm/GlobalVariable.h" 25#include "llvm/Instructions.h" 26#include "llvm/IntrinsicInst.h" 27#include "llvm/CodeGen/FastISel.h" 28#include "llvm/CodeGen/MachineConstantPool.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineRegisterInfo.h" 31#include "llvm/Support/CallSite.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/GetElementPtrTypeIterator.h" 34#include "llvm/Target/TargetOptions.h" 35using namespace llvm; 36 37namespace { 38 39class X86FastISel : public FastISel { 40 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 41 /// make the right decision when generating code for different targets. 42 const X86Subtarget *Subtarget; 43 44 /// StackPtr - Register used as the stack pointer. 45 /// 46 unsigned StackPtr; 47 48 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 49 /// floating point ops. 50 /// When SSE is available, use it for f32 operations. 51 /// When SSE2 is available, use it for f64 operations. 52 bool X86ScalarSSEf64; 53 bool X86ScalarSSEf32; 54 55public: 56 explicit X86FastISel(MachineFunction &mf, 57 MachineModuleInfo *mmi, 58 DwarfWriter *dw, 59 DenseMap<const Value *, unsigned> &vm, 60 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm, 61 DenseMap<const AllocaInst *, int> &am 62#ifndef NDEBUG 63 , SmallSet<Instruction*, 8> &cil 64#endif 65 ) 66 : FastISel(mf, mmi, dw, vm, bm, am 67#ifndef NDEBUG 68 , cil 69#endif 70 ) { 71 Subtarget = &TM.getSubtarget<X86Subtarget>(); 72 StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 73 X86ScalarSSEf64 = Subtarget->hasSSE2(); 74 X86ScalarSSEf32 = Subtarget->hasSSE1(); 75 } 76 77 virtual bool TargetSelectInstruction(Instruction *I); 78 79#include "X86GenFastISel.inc" 80 81private: 82 bool X86FastEmitCompare(Value *LHS, Value *RHS, MVT VT); 83 84 bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR); 85 86 bool X86FastEmitStore(MVT VT, Value *Val, 87 const X86AddressMode &AM); 88 bool X86FastEmitStore(MVT VT, unsigned Val, 89 const X86AddressMode &AM); 90 91 bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT, 92 unsigned &ResultReg); 93 94 bool X86SelectAddress(Value *V, X86AddressMode &AM); 95 bool X86SelectCallAddress(Value *V, X86AddressMode &AM); 96 97 bool X86SelectLoad(Instruction *I); 98 99 bool X86SelectStore(Instruction *I); 100 101 bool X86SelectCmp(Instruction *I); 102 103 bool X86SelectZExt(Instruction *I); 104 105 bool X86SelectBranch(Instruction *I); 106 107 bool X86SelectShift(Instruction *I); 108 109 bool X86SelectSelect(Instruction *I); 110 111 bool X86SelectTrunc(Instruction *I); 112 113 bool X86SelectFPExt(Instruction *I); 114 bool X86SelectFPTrunc(Instruction *I); 115 116 bool X86SelectExtractValue(Instruction *I); 117 118 bool X86VisitIntrinsicCall(IntrinsicInst &I); 119 bool X86SelectCall(Instruction *I); 120 121 CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false); 122 123 const X86InstrInfo *getInstrInfo() const { 124 return getTargetMachine()->getInstrInfo(); 125 } 126 const X86TargetMachine *getTargetMachine() const { 127 return static_cast<const X86TargetMachine *>(&TM); 128 } 129 130 unsigned TargetMaterializeConstant(Constant *C); 131 132 unsigned TargetMaterializeAlloca(AllocaInst *C); 133 134 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is 135 /// computed in an SSE register, not on the X87 floating point stack. 136 bool isScalarFPTypeInSSEReg(MVT VT) const { 137 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 138 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 139 } 140 141 bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false); 142}; 143 144} // end anonymous namespace. 145 146bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) { 147 VT = TLI.getValueType(Ty, /*HandleUnknown=*/true); 148 if (VT == MVT::Other || !VT.isSimple()) 149 // Unhandled type. Halt "fast" selection and bail. 150 return false; 151 152 // For now, require SSE/SSE2 for performing floating-point operations, 153 // since x87 requires additional work. 154 if (VT == MVT::f64 && !X86ScalarSSEf64) 155 return false; 156 if (VT == MVT::f32 && !X86ScalarSSEf32) 157 return false; 158 // Similarly, no f80 support yet. 159 if (VT == MVT::f80) 160 return false; 161 // We only handle legal types. For example, on x86-32 the instruction 162 // selector contains all of the 64-bit instructions from x86-64, 163 // under the assumption that i64 won't be used if the target doesn't 164 // support it. 165 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT); 166} 167 168#include "X86GenCallingConv.inc" 169 170/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling 171/// convention. 172CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) { 173 if (Subtarget->is64Bit()) { 174 if (Subtarget->isTargetWin64()) 175 return CC_X86_Win64_C; 176 else 177 return CC_X86_64_C; 178 } 179 180 if (CC == CallingConv::X86_FastCall) 181 return CC_X86_32_FastCall; 182 else if (CC == CallingConv::Fast) 183 return CC_X86_32_FastCC; 184 else 185 return CC_X86_32_C; 186} 187 188/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. 189/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. 190/// Return true and the result register by reference if it is possible. 191bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM, 192 unsigned &ResultReg) { 193 // Get opcode and regclass of the output for the given load instruction. 194 unsigned Opc = 0; 195 const TargetRegisterClass *RC = NULL; 196 switch (VT.getSimpleVT()) { 197 default: return false; 198 case MVT::i8: 199 Opc = X86::MOV8rm; 200 RC = X86::GR8RegisterClass; 201 break; 202 case MVT::i16: 203 Opc = X86::MOV16rm; 204 RC = X86::GR16RegisterClass; 205 break; 206 case MVT::i32: 207 Opc = X86::MOV32rm; 208 RC = X86::GR32RegisterClass; 209 break; 210 case MVT::i64: 211 // Must be in x86-64 mode. 212 Opc = X86::MOV64rm; 213 RC = X86::GR64RegisterClass; 214 break; 215 case MVT::f32: 216 if (Subtarget->hasSSE1()) { 217 Opc = X86::MOVSSrm; 218 RC = X86::FR32RegisterClass; 219 } else { 220 Opc = X86::LD_Fp32m; 221 RC = X86::RFP32RegisterClass; 222 } 223 break; 224 case MVT::f64: 225 if (Subtarget->hasSSE2()) { 226 Opc = X86::MOVSDrm; 227 RC = X86::FR64RegisterClass; 228 } else { 229 Opc = X86::LD_Fp64m; 230 RC = X86::RFP64RegisterClass; 231 } 232 break; 233 case MVT::f80: 234 // No f80 support yet. 235 return false; 236 } 237 238 ResultReg = createResultReg(RC); 239 addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); 240 return true; 241} 242 243/// X86FastEmitStore - Emit a machine instruction to store a value Val of 244/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr 245/// and a displacement offset, or a GlobalAddress, 246/// i.e. V. Return true if it is possible. 247bool 248X86FastISel::X86FastEmitStore(MVT VT, unsigned Val, 249 const X86AddressMode &AM) { 250 // Get opcode and regclass of the output for the given store instruction. 251 unsigned Opc = 0; 252 switch (VT.getSimpleVT()) { 253 case MVT::f80: // No f80 support yet. 254 default: return false; 255 case MVT::i8: Opc = X86::MOV8mr; break; 256 case MVT::i16: Opc = X86::MOV16mr; break; 257 case MVT::i32: Opc = X86::MOV32mr; break; 258 case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode. 259 case MVT::f32: 260 Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m; 261 break; 262 case MVT::f64: 263 Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m; 264 break; 265 } 266 267 addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val); 268 return true; 269} 270 271bool X86FastISel::X86FastEmitStore(MVT VT, Value *Val, 272 const X86AddressMode &AM) { 273 // Handle 'null' like i32/i64 0. 274 if (isa<ConstantPointerNull>(Val)) 275 Val = Val->getContext().getNullValue(TD.getIntPtrType()); 276 277 // If this is a store of a simple constant, fold the constant into the store. 278 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 279 unsigned Opc = 0; 280 switch (VT.getSimpleVT()) { 281 default: break; 282 case MVT::i8: Opc = X86::MOV8mi; break; 283 case MVT::i16: Opc = X86::MOV16mi; break; 284 case MVT::i32: Opc = X86::MOV32mi; break; 285 case MVT::i64: 286 // Must be a 32-bit sign extended value. 287 if ((int)CI->getSExtValue() == CI->getSExtValue()) 288 Opc = X86::MOV64mi32; 289 break; 290 } 291 292 if (Opc) { 293 addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM) 294 .addImm(CI->getSExtValue()); 295 return true; 296 } 297 } 298 299 unsigned ValReg = getRegForValue(Val); 300 if (ValReg == 0) 301 return false; 302 303 return X86FastEmitStore(VT, ValReg, AM); 304} 305 306/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of 307/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g. 308/// ISD::SIGN_EXTEND). 309bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, 310 unsigned Src, MVT SrcVT, 311 unsigned &ResultReg) { 312 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src); 313 314 if (RR != 0) { 315 ResultReg = RR; 316 return true; 317 } else 318 return false; 319} 320 321/// X86SelectAddress - Attempt to fill in an address from the given value. 322/// 323bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) { 324 User *U = NULL; 325 unsigned Opcode = Instruction::UserOp1; 326 if (Instruction *I = dyn_cast<Instruction>(V)) { 327 Opcode = I->getOpcode(); 328 U = I; 329 } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) { 330 Opcode = C->getOpcode(); 331 U = C; 332 } 333 334 switch (Opcode) { 335 default: break; 336 case Instruction::BitCast: 337 // Look past bitcasts. 338 return X86SelectAddress(U->getOperand(0), AM); 339 340 case Instruction::IntToPtr: 341 // Look past no-op inttoptrs. 342 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 343 return X86SelectAddress(U->getOperand(0), AM); 344 break; 345 346 case Instruction::PtrToInt: 347 // Look past no-op ptrtoints. 348 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 349 return X86SelectAddress(U->getOperand(0), AM); 350 break; 351 352 case Instruction::Alloca: { 353 // Do static allocas. 354 const AllocaInst *A = cast<AllocaInst>(V); 355 DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A); 356 if (SI != StaticAllocaMap.end()) { 357 AM.BaseType = X86AddressMode::FrameIndexBase; 358 AM.Base.FrameIndex = SI->second; 359 return true; 360 } 361 break; 362 } 363 364 case Instruction::Add: { 365 // Adds of constants are common and easy enough. 366 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 367 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue(); 368 // They have to fit in the 32-bit signed displacement field though. 369 if (isInt32(Disp)) { 370 AM.Disp = (uint32_t)Disp; 371 return X86SelectAddress(U->getOperand(0), AM); 372 } 373 } 374 break; 375 } 376 377 case Instruction::GetElementPtr: { 378 // Pattern-match simple GEPs. 379 uint64_t Disp = (int32_t)AM.Disp; 380 unsigned IndexReg = AM.IndexReg; 381 unsigned Scale = AM.Scale; 382 gep_type_iterator GTI = gep_type_begin(U); 383 // Iterate through the indices, folding what we can. Constants can be 384 // folded, and one dynamic index can be handled, if the scale is supported. 385 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); 386 i != e; ++i, ++GTI) { 387 Value *Op = *i; 388 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 389 const StructLayout *SL = TD.getStructLayout(STy); 390 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 391 Disp += SL->getElementOffset(Idx); 392 } else { 393 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 394 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 395 // Constant-offset addressing. 396 Disp += CI->getSExtValue() * S; 397 } else if (IndexReg == 0 && 398 (!AM.GV || !Subtarget->isPICStyleRIPRel()) && 399 (S == 1 || S == 2 || S == 4 || S == 8)) { 400 // Scaled-index addressing. 401 Scale = S; 402 IndexReg = getRegForGEPIndex(Op); 403 if (IndexReg == 0) 404 return false; 405 } else 406 // Unsupported. 407 goto unsupported_gep; 408 } 409 } 410 // Check for displacement overflow. 411 if (!isInt32(Disp)) 412 break; 413 // Ok, the GEP indices were covered by constant-offset and scaled-index 414 // addressing. Update the address state and move on to examining the base. 415 AM.IndexReg = IndexReg; 416 AM.Scale = Scale; 417 AM.Disp = (uint32_t)Disp; 418 return X86SelectAddress(U->getOperand(0), AM); 419 unsupported_gep: 420 // Ok, the GEP indices weren't all covered. 421 break; 422 } 423 } 424 425 // Handle constant address. 426 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 427 // Can't handle alternate code models yet. 428 if (TM.getCodeModel() != CodeModel::Small) 429 return false; 430 431 // RIP-relative addresses can't have additional register operands. 432 if (Subtarget->isPICStyleRIPRel() && 433 (AM.Base.Reg != 0 || AM.IndexReg != 0)) 434 return false; 435 436 // Can't handle TLS yet. 437 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) 438 if (GVar->isThreadLocal()) 439 return false; 440 441 // Okay, we've committed to selecting this global. Set up the basic address. 442 AM.GV = GV; 443 444 // Allow the subtarget to classify the global. 445 unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM); 446 447 // If this reference is relative to the pic base, set it now. 448 if (isGlobalRelativeToPICBase(GVFlags)) { 449 // FIXME: How do we know Base.Reg is free?? 450 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF); 451 } 452 453 // Unless the ABI requires an extra load, return a direct reference to 454 // the global. 455 if (!isGlobalStubReference(GVFlags)) { 456 if (Subtarget->isPICStyleRIPRel()) { 457 // Use rip-relative addressing if we can. Above we verified that the 458 // base and index registers are unused. 459 assert(AM.Base.Reg == 0 && AM.IndexReg == 0); 460 AM.Base.Reg = X86::RIP; 461 } 462 AM.GVOpFlags = GVFlags; 463 return true; 464 } 465 466 // Ok, we need to do a load from a stub. If we've already loaded from this 467 // stub, reuse the loaded pointer, otherwise emit the load now. 468 DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V); 469 unsigned LoadReg; 470 if (I != LocalValueMap.end() && I->second != 0) { 471 LoadReg = I->second; 472 } else { 473 // Issue load from stub. 474 unsigned Opc = 0; 475 const TargetRegisterClass *RC = NULL; 476 X86AddressMode StubAM; 477 StubAM.Base.Reg = AM.Base.Reg; 478 StubAM.GV = GV; 479 StubAM.GVOpFlags = GVFlags; 480 481 if (TLI.getPointerTy() == MVT::i64) { 482 Opc = X86::MOV64rm; 483 RC = X86::GR64RegisterClass; 484 485 if (Subtarget->isPICStyleRIPRel()) 486 StubAM.Base.Reg = X86::RIP; 487 } else { 488 Opc = X86::MOV32rm; 489 RC = X86::GR32RegisterClass; 490 } 491 492 LoadReg = createResultReg(RC); 493 addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM); 494 495 // Prevent loading GV stub multiple times in same MBB. 496 LocalValueMap[V] = LoadReg; 497 } 498 499 // Now construct the final address. Note that the Disp, Scale, 500 // and Index values may already be set here. 501 AM.Base.Reg = LoadReg; 502 AM.GV = 0; 503 return true; 504 } 505 506 // If all else fails, try to materialize the value in a register. 507 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) { 508 if (AM.Base.Reg == 0) { 509 AM.Base.Reg = getRegForValue(V); 510 return AM.Base.Reg != 0; 511 } 512 if (AM.IndexReg == 0) { 513 assert(AM.Scale == 1 && "Scale with no index!"); 514 AM.IndexReg = getRegForValue(V); 515 return AM.IndexReg != 0; 516 } 517 } 518 519 return false; 520} 521 522/// X86SelectCallAddress - Attempt to fill in an address from the given value. 523/// 524bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) { 525 User *U = NULL; 526 unsigned Opcode = Instruction::UserOp1; 527 if (Instruction *I = dyn_cast<Instruction>(V)) { 528 Opcode = I->getOpcode(); 529 U = I; 530 } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) { 531 Opcode = C->getOpcode(); 532 U = C; 533 } 534 535 switch (Opcode) { 536 default: break; 537 case Instruction::BitCast: 538 // Look past bitcasts. 539 return X86SelectCallAddress(U->getOperand(0), AM); 540 541 case Instruction::IntToPtr: 542 // Look past no-op inttoptrs. 543 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 544 return X86SelectCallAddress(U->getOperand(0), AM); 545 break; 546 547 case Instruction::PtrToInt: 548 // Look past no-op ptrtoints. 549 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 550 return X86SelectCallAddress(U->getOperand(0), AM); 551 break; 552 } 553 554 // Handle constant address. 555 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 556 // Can't handle alternate code models yet. 557 if (TM.getCodeModel() != CodeModel::Small) 558 return false; 559 560 // RIP-relative addresses can't have additional register operands. 561 if (Subtarget->isPICStyleRIPRel() && 562 (AM.Base.Reg != 0 || AM.IndexReg != 0)) 563 return false; 564 565 // Can't handle TLS or DLLImport. 566 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) 567 if (GVar->isThreadLocal() || GVar->hasDLLImportLinkage()) 568 return false; 569 570 // Okay, we've committed to selecting this global. Set up the basic address. 571 AM.GV = GV; 572 573 // No ABI requires an extra load for anything other than DLLImport, which 574 // we rejected above. Return a direct reference to the global. 575 if (Subtarget->isPICStyleRIPRel()) { 576 // Use rip-relative addressing if we can. Above we verified that the 577 // base and index registers are unused. 578 assert(AM.Base.Reg == 0 && AM.IndexReg == 0); 579 AM.Base.Reg = X86::RIP; 580 } else if (Subtarget->isPICStyleStubPIC()) { 581 AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET; 582 } else if (Subtarget->isPICStyleGOT()) { 583 AM.GVOpFlags = X86II::MO_GOTOFF; 584 } 585 586 return true; 587 } 588 589 // If all else fails, try to materialize the value in a register. 590 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) { 591 if (AM.Base.Reg == 0) { 592 AM.Base.Reg = getRegForValue(V); 593 return AM.Base.Reg != 0; 594 } 595 if (AM.IndexReg == 0) { 596 assert(AM.Scale == 1 && "Scale with no index!"); 597 AM.IndexReg = getRegForValue(V); 598 return AM.IndexReg != 0; 599 } 600 } 601 602 return false; 603} 604 605 606/// X86SelectStore - Select and emit code to implement store instructions. 607bool X86FastISel::X86SelectStore(Instruction* I) { 608 MVT VT; 609 if (!isTypeLegal(I->getOperand(0)->getType(), VT)) 610 return false; 611 612 X86AddressMode AM; 613 if (!X86SelectAddress(I->getOperand(1), AM)) 614 return false; 615 616 return X86FastEmitStore(VT, I->getOperand(0), AM); 617} 618 619/// X86SelectLoad - Select and emit code to implement load instructions. 620/// 621bool X86FastISel::X86SelectLoad(Instruction *I) { 622 MVT VT; 623 if (!isTypeLegal(I->getType(), VT)) 624 return false; 625 626 X86AddressMode AM; 627 if (!X86SelectAddress(I->getOperand(0), AM)) 628 return false; 629 630 unsigned ResultReg = 0; 631 if (X86FastEmitLoad(VT, AM, ResultReg)) { 632 UpdateValueMap(I, ResultReg); 633 return true; 634 } 635 return false; 636} 637 638static unsigned X86ChooseCmpOpcode(MVT VT) { 639 switch (VT.getSimpleVT()) { 640 default: return 0; 641 case MVT::i8: return X86::CMP8rr; 642 case MVT::i16: return X86::CMP16rr; 643 case MVT::i32: return X86::CMP32rr; 644 case MVT::i64: return X86::CMP64rr; 645 case MVT::f32: return X86::UCOMISSrr; 646 case MVT::f64: return X86::UCOMISDrr; 647 } 648} 649 650/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS 651/// of the comparison, return an opcode that works for the compare (e.g. 652/// CMP32ri) otherwise return 0. 653static unsigned X86ChooseCmpImmediateOpcode(MVT VT, ConstantInt *RHSC) { 654 switch (VT.getSimpleVT()) { 655 // Otherwise, we can't fold the immediate into this comparison. 656 default: return 0; 657 case MVT::i8: return X86::CMP8ri; 658 case MVT::i16: return X86::CMP16ri; 659 case MVT::i32: return X86::CMP32ri; 660 case MVT::i64: 661 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext 662 // field. 663 if ((int)RHSC->getSExtValue() == RHSC->getSExtValue()) 664 return X86::CMP64ri32; 665 return 0; 666 } 667} 668 669bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) { 670 unsigned Op0Reg = getRegForValue(Op0); 671 if (Op0Reg == 0) return false; 672 673 // Handle 'null' like i32/i64 0. 674 if (isa<ConstantPointerNull>(Op1)) 675 Op1 = Op0->getContext().getNullValue(TD.getIntPtrType()); 676 677 // We have two options: compare with register or immediate. If the RHS of 678 // the compare is an immediate that we can fold into this compare, use 679 // CMPri, otherwise use CMPrr. 680 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 681 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { 682 BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg) 683 .addImm(Op1C->getSExtValue()); 684 return true; 685 } 686 } 687 688 unsigned CompareOpc = X86ChooseCmpOpcode(VT); 689 if (CompareOpc == 0) return false; 690 691 unsigned Op1Reg = getRegForValue(Op1); 692 if (Op1Reg == 0) return false; 693 BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg); 694 695 return true; 696} 697 698bool X86FastISel::X86SelectCmp(Instruction *I) { 699 CmpInst *CI = cast<CmpInst>(I); 700 701 MVT VT; 702 if (!isTypeLegal(I->getOperand(0)->getType(), VT)) 703 return false; 704 705 unsigned ResultReg = createResultReg(&X86::GR8RegClass); 706 unsigned SetCCOpc; 707 bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0. 708 switch (CI->getPredicate()) { 709 case CmpInst::FCMP_OEQ: { 710 if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT)) 711 return false; 712 713 unsigned EReg = createResultReg(&X86::GR8RegClass); 714 unsigned NPReg = createResultReg(&X86::GR8RegClass); 715 BuildMI(MBB, DL, TII.get(X86::SETEr), EReg); 716 BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg); 717 BuildMI(MBB, DL, 718 TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); 719 UpdateValueMap(I, ResultReg); 720 return true; 721 } 722 case CmpInst::FCMP_UNE: { 723 if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT)) 724 return false; 725 726 unsigned NEReg = createResultReg(&X86::GR8RegClass); 727 unsigned PReg = createResultReg(&X86::GR8RegClass); 728 BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg); 729 BuildMI(MBB, DL, TII.get(X86::SETPr), PReg); 730 BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); 731 UpdateValueMap(I, ResultReg); 732 return true; 733 } 734 case CmpInst::FCMP_OGT: SwapArgs = false; SetCCOpc = X86::SETAr; break; 735 case CmpInst::FCMP_OGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break; 736 case CmpInst::FCMP_OLT: SwapArgs = true; SetCCOpc = X86::SETAr; break; 737 case CmpInst::FCMP_OLE: SwapArgs = true; SetCCOpc = X86::SETAEr; break; 738 case CmpInst::FCMP_ONE: SwapArgs = false; SetCCOpc = X86::SETNEr; break; 739 case CmpInst::FCMP_ORD: SwapArgs = false; SetCCOpc = X86::SETNPr; break; 740 case CmpInst::FCMP_UNO: SwapArgs = false; SetCCOpc = X86::SETPr; break; 741 case CmpInst::FCMP_UEQ: SwapArgs = false; SetCCOpc = X86::SETEr; break; 742 case CmpInst::FCMP_UGT: SwapArgs = true; SetCCOpc = X86::SETBr; break; 743 case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break; 744 case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break; 745 case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break; 746 747 case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break; 748 case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break; 749 case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break; 750 case CmpInst::ICMP_UGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break; 751 case CmpInst::ICMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break; 752 case CmpInst::ICMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break; 753 case CmpInst::ICMP_SGT: SwapArgs = false; SetCCOpc = X86::SETGr; break; 754 case CmpInst::ICMP_SGE: SwapArgs = false; SetCCOpc = X86::SETGEr; break; 755 case CmpInst::ICMP_SLT: SwapArgs = false; SetCCOpc = X86::SETLr; break; 756 case CmpInst::ICMP_SLE: SwapArgs = false; SetCCOpc = X86::SETLEr; break; 757 default: 758 return false; 759 } 760 761 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); 762 if (SwapArgs) 763 std::swap(Op0, Op1); 764 765 // Emit a compare of Op0/Op1. 766 if (!X86FastEmitCompare(Op0, Op1, VT)) 767 return false; 768 769 BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg); 770 UpdateValueMap(I, ResultReg); 771 return true; 772} 773 774bool X86FastISel::X86SelectZExt(Instruction *I) { 775 // Handle zero-extension from i1 to i8, which is common. 776 if (I->getType() == Type::Int8Ty && 777 I->getOperand(0)->getType() == Type::Int1Ty) { 778 unsigned ResultReg = getRegForValue(I->getOperand(0)); 779 if (ResultReg == 0) return false; 780 // Set the high bits to zero. 781 ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg); 782 if (ResultReg == 0) return false; 783 UpdateValueMap(I, ResultReg); 784 return true; 785 } 786 787 return false; 788} 789 790 791bool X86FastISel::X86SelectBranch(Instruction *I) { 792 // Unconditional branches are selected by tablegen-generated code. 793 // Handle a conditional branch. 794 BranchInst *BI = cast<BranchInst>(I); 795 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)]; 796 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)]; 797 798 // Fold the common case of a conditional branch with a comparison. 799 if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 800 if (CI->hasOneUse()) { 801 MVT VT = TLI.getValueType(CI->getOperand(0)->getType()); 802 803 // Try to take advantage of fallthrough opportunities. 804 CmpInst::Predicate Predicate = CI->getPredicate(); 805 if (MBB->isLayoutSuccessor(TrueMBB)) { 806 std::swap(TrueMBB, FalseMBB); 807 Predicate = CmpInst::getInversePredicate(Predicate); 808 } 809 810 bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0. 811 unsigned BranchOpc; // Opcode to jump on, e.g. "X86::JA" 812 813 switch (Predicate) { 814 case CmpInst::FCMP_OEQ: 815 std::swap(TrueMBB, FalseMBB); 816 Predicate = CmpInst::FCMP_UNE; 817 // FALL THROUGH 818 case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE; break; 819 case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA; break; 820 case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE; break; 821 case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA; break; 822 case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE; break; 823 case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE; break; 824 case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP; break; 825 case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP; break; 826 case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE; break; 827 case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB; break; 828 case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE; break; 829 case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break; 830 case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break; 831 832 case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE; break; 833 case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE; break; 834 case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA; break; 835 case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE; break; 836 case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB; break; 837 case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE; break; 838 case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG; break; 839 case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE; break; 840 case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL; break; 841 case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE; break; 842 default: 843 return false; 844 } 845 846 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); 847 if (SwapArgs) 848 std::swap(Op0, Op1); 849 850 // Emit a compare of the LHS and RHS, setting the flags. 851 if (!X86FastEmitCompare(Op0, Op1, VT)) 852 return false; 853 854 BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB); 855 856 if (Predicate == CmpInst::FCMP_UNE) { 857 // X86 requires a second branch to handle UNE (and OEQ, 858 // which is mapped to UNE above). 859 BuildMI(MBB, DL, TII.get(X86::JP)).addMBB(TrueMBB); 860 } 861 862 FastEmitBranch(FalseMBB); 863 MBB->addSuccessor(TrueMBB); 864 return true; 865 } 866 } else if (ExtractValueInst *EI = 867 dyn_cast<ExtractValueInst>(BI->getCondition())) { 868 // Check to see if the branch instruction is from an "arithmetic with 869 // overflow" intrinsic. The main way these intrinsics are used is: 870 // 871 // %t = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 872 // %sum = extractvalue { i32, i1 } %t, 0 873 // %obit = extractvalue { i32, i1 } %t, 1 874 // br i1 %obit, label %overflow, label %normal 875 // 876 // The %sum and %obit are converted in an ADD and a SETO/SETB before 877 // reaching the branch. Therefore, we search backwards through the MBB 878 // looking for the SETO/SETB instruction. If an instruction modifies the 879 // EFLAGS register before we reach the SETO/SETB instruction, then we can't 880 // convert the branch into a JO/JB instruction. 881 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){ 882 if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow || 883 CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) { 884 const MachineInstr *SetMI = 0; 885 unsigned Reg = lookUpRegForValue(EI); 886 887 for (MachineBasicBlock::const_reverse_iterator 888 RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) { 889 const MachineInstr &MI = *RI; 890 891 if (MI.modifiesRegister(Reg)) { 892 unsigned Src, Dst, SrcSR, DstSR; 893 894 if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) { 895 Reg = Src; 896 continue; 897 } 898 899 SetMI = &MI; 900 break; 901 } 902 903 const TargetInstrDesc &TID = MI.getDesc(); 904 if (TID.hasUnmodeledSideEffects() || 905 TID.hasImplicitDefOfPhysReg(X86::EFLAGS)) 906 break; 907 } 908 909 if (SetMI) { 910 unsigned OpCode = SetMI->getOpcode(); 911 912 if (OpCode == X86::SETOr || OpCode == X86::SETBr) { 913 BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? X86::JO : X86::JB)) 914 .addMBB(TrueMBB); 915 FastEmitBranch(FalseMBB); 916 MBB->addSuccessor(TrueMBB); 917 return true; 918 } 919 } 920 } 921 } 922 } 923 924 // Otherwise do a clumsy setcc and re-test it. 925 unsigned OpReg = getRegForValue(BI->getCondition()); 926 if (OpReg == 0) return false; 927 928 BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); 929 BuildMI(MBB, DL, TII.get(X86::JNE)).addMBB(TrueMBB); 930 FastEmitBranch(FalseMBB); 931 MBB->addSuccessor(TrueMBB); 932 return true; 933} 934 935bool X86FastISel::X86SelectShift(Instruction *I) { 936 unsigned CReg = 0, OpReg = 0, OpImm = 0; 937 const TargetRegisterClass *RC = NULL; 938 if (I->getType() == Type::Int8Ty) { 939 CReg = X86::CL; 940 RC = &X86::GR8RegClass; 941 switch (I->getOpcode()) { 942 case Instruction::LShr: OpReg = X86::SHR8rCL; OpImm = X86::SHR8ri; break; 943 case Instruction::AShr: OpReg = X86::SAR8rCL; OpImm = X86::SAR8ri; break; 944 case Instruction::Shl: OpReg = X86::SHL8rCL; OpImm = X86::SHL8ri; break; 945 default: return false; 946 } 947 } else if (I->getType() == Type::Int16Ty) { 948 CReg = X86::CX; 949 RC = &X86::GR16RegClass; 950 switch (I->getOpcode()) { 951 case Instruction::LShr: OpReg = X86::SHR16rCL; OpImm = X86::SHR16ri; break; 952 case Instruction::AShr: OpReg = X86::SAR16rCL; OpImm = X86::SAR16ri; break; 953 case Instruction::Shl: OpReg = X86::SHL16rCL; OpImm = X86::SHL16ri; break; 954 default: return false; 955 } 956 } else if (I->getType() == Type::Int32Ty) { 957 CReg = X86::ECX; 958 RC = &X86::GR32RegClass; 959 switch (I->getOpcode()) { 960 case Instruction::LShr: OpReg = X86::SHR32rCL; OpImm = X86::SHR32ri; break; 961 case Instruction::AShr: OpReg = X86::SAR32rCL; OpImm = X86::SAR32ri; break; 962 case Instruction::Shl: OpReg = X86::SHL32rCL; OpImm = X86::SHL32ri; break; 963 default: return false; 964 } 965 } else if (I->getType() == Type::Int64Ty) { 966 CReg = X86::RCX; 967 RC = &X86::GR64RegClass; 968 switch (I->getOpcode()) { 969 case Instruction::LShr: OpReg = X86::SHR64rCL; OpImm = X86::SHR64ri; break; 970 case Instruction::AShr: OpReg = X86::SAR64rCL; OpImm = X86::SAR64ri; break; 971 case Instruction::Shl: OpReg = X86::SHL64rCL; OpImm = X86::SHL64ri; break; 972 default: return false; 973 } 974 } else { 975 return false; 976 } 977 978 MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); 979 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) 980 return false; 981 982 unsigned Op0Reg = getRegForValue(I->getOperand(0)); 983 if (Op0Reg == 0) return false; 984 985 // Fold immediate in shl(x,3). 986 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 987 unsigned ResultReg = createResultReg(RC); 988 BuildMI(MBB, DL, TII.get(OpImm), 989 ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); 990 UpdateValueMap(I, ResultReg); 991 return true; 992 } 993 994 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 995 if (Op1Reg == 0) return false; 996 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC); 997 998 // The shift instruction uses X86::CL. If we defined a super-register 999 // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what 1000 // we're doing here. 1001 if (CReg != X86::CL) 1002 BuildMI(MBB, DL, TII.get(TargetInstrInfo::EXTRACT_SUBREG), X86::CL) 1003 .addReg(CReg).addImm(X86::SUBREG_8BIT); 1004 1005 unsigned ResultReg = createResultReg(RC); 1006 BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg); 1007 UpdateValueMap(I, ResultReg); 1008 return true; 1009} 1010 1011bool X86FastISel::X86SelectSelect(Instruction *I) { 1012 MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true); 1013 if (VT == MVT::Other || !isTypeLegal(I->getType(), VT)) 1014 return false; 1015 1016 unsigned Opc = 0; 1017 const TargetRegisterClass *RC = NULL; 1018 if (VT.getSimpleVT() == MVT::i16) { 1019 Opc = X86::CMOVE16rr; 1020 RC = &X86::GR16RegClass; 1021 } else if (VT.getSimpleVT() == MVT::i32) { 1022 Opc = X86::CMOVE32rr; 1023 RC = &X86::GR32RegClass; 1024 } else if (VT.getSimpleVT() == MVT::i64) { 1025 Opc = X86::CMOVE64rr; 1026 RC = &X86::GR64RegClass; 1027 } else { 1028 return false; 1029 } 1030 1031 unsigned Op0Reg = getRegForValue(I->getOperand(0)); 1032 if (Op0Reg == 0) return false; 1033 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1034 if (Op1Reg == 0) return false; 1035 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 1036 if (Op2Reg == 0) return false; 1037 1038 BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); 1039 unsigned ResultReg = createResultReg(RC); 1040 BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); 1041 UpdateValueMap(I, ResultReg); 1042 return true; 1043} 1044 1045bool X86FastISel::X86SelectFPExt(Instruction *I) { 1046 // fpext from float to double. 1047 if (Subtarget->hasSSE2() && I->getType() == Type::DoubleTy) { 1048 Value *V = I->getOperand(0); 1049 if (V->getType() == Type::FloatTy) { 1050 unsigned OpReg = getRegForValue(V); 1051 if (OpReg == 0) return false; 1052 unsigned ResultReg = createResultReg(X86::FR64RegisterClass); 1053 BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg); 1054 UpdateValueMap(I, ResultReg); 1055 return true; 1056 } 1057 } 1058 1059 return false; 1060} 1061 1062bool X86FastISel::X86SelectFPTrunc(Instruction *I) { 1063 if (Subtarget->hasSSE2()) { 1064 if (I->getType() == Type::FloatTy) { 1065 Value *V = I->getOperand(0); 1066 if (V->getType() == Type::DoubleTy) { 1067 unsigned OpReg = getRegForValue(V); 1068 if (OpReg == 0) return false; 1069 unsigned ResultReg = createResultReg(X86::FR32RegisterClass); 1070 BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg); 1071 UpdateValueMap(I, ResultReg); 1072 return true; 1073 } 1074 } 1075 } 1076 1077 return false; 1078} 1079 1080bool X86FastISel::X86SelectTrunc(Instruction *I) { 1081 if (Subtarget->is64Bit()) 1082 // All other cases should be handled by the tblgen generated code. 1083 return false; 1084 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1085 MVT DstVT = TLI.getValueType(I->getType()); 1086 1087 // This code only handles truncation to byte right now. 1088 if (DstVT != MVT::i8 && DstVT != MVT::i1) 1089 // All other cases should be handled by the tblgen generated code. 1090 return false; 1091 if (SrcVT != MVT::i16 && SrcVT != MVT::i32) 1092 // All other cases should be handled by the tblgen generated code. 1093 return false; 1094 1095 unsigned InputReg = getRegForValue(I->getOperand(0)); 1096 if (!InputReg) 1097 // Unhandled operand. Halt "fast" selection and bail. 1098 return false; 1099 1100 // First issue a copy to GR16_ABCD or GR32_ABCD. 1101 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr; 1102 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) 1103 ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; 1104 unsigned CopyReg = createResultReg(CopyRC); 1105 BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg); 1106 1107 // Then issue an extract_subreg. 1108 unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, 1109 CopyReg, X86::SUBREG_8BIT); 1110 if (!ResultReg) 1111 return false; 1112 1113 UpdateValueMap(I, ResultReg); 1114 return true; 1115} 1116 1117bool X86FastISel::X86SelectExtractValue(Instruction *I) { 1118 ExtractValueInst *EI = cast<ExtractValueInst>(I); 1119 Value *Agg = EI->getAggregateOperand(); 1120 1121 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) { 1122 switch (CI->getIntrinsicID()) { 1123 default: break; 1124 case Intrinsic::sadd_with_overflow: 1125 case Intrinsic::uadd_with_overflow: 1126 // Cheat a little. We know that the registers for "add" and "seto" are 1127 // allocated sequentially. However, we only keep track of the register 1128 // for "add" in the value map. Use extractvalue's index to get the 1129 // correct register for "seto". 1130 UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin()); 1131 return true; 1132 } 1133 } 1134 1135 return false; 1136} 1137 1138bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) { 1139 // FIXME: Handle more intrinsics. 1140 switch (I.getIntrinsicID()) { 1141 default: return false; 1142 case Intrinsic::sadd_with_overflow: 1143 case Intrinsic::uadd_with_overflow: { 1144 // Replace "add with overflow" intrinsics with an "add" instruction followed 1145 // by a seto/setc instruction. Later on, when the "extractvalue" 1146 // instructions are encountered, we use the fact that two registers were 1147 // created sequentially to get the correct registers for the "sum" and the 1148 // "overflow bit". 1149 const Function *Callee = I.getCalledFunction(); 1150 const Type *RetTy = 1151 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0)); 1152 1153 MVT VT; 1154 if (!isTypeLegal(RetTy, VT)) 1155 return false; 1156 1157 Value *Op1 = I.getOperand(1); 1158 Value *Op2 = I.getOperand(2); 1159 unsigned Reg1 = getRegForValue(Op1); 1160 unsigned Reg2 = getRegForValue(Op2); 1161 1162 if (Reg1 == 0 || Reg2 == 0) 1163 // FIXME: Handle values *not* in registers. 1164 return false; 1165 1166 unsigned OpC = 0; 1167 if (VT == MVT::i32) 1168 OpC = X86::ADD32rr; 1169 else if (VT == MVT::i64) 1170 OpC = X86::ADD64rr; 1171 else 1172 return false; 1173 1174 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1175 BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2); 1176 unsigned DestReg1 = UpdateValueMap(&I, ResultReg); 1177 1178 // If the add with overflow is an intra-block value then we just want to 1179 // create temporaries for it like normal. If it is a cross-block value then 1180 // UpdateValueMap will return the cross-block register used. Since we 1181 // *really* want the value to be live in the register pair known by 1182 // UpdateValueMap, we have to use DestReg1+1 as the destination register in 1183 // the cross block case. In the non-cross-block case, we should just make 1184 // another register for the value. 1185 if (DestReg1 != ResultReg) 1186 ResultReg = DestReg1+1; 1187 else 1188 ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8)); 1189 1190 unsigned Opc = X86::SETBr; 1191 if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) 1192 Opc = X86::SETOr; 1193 BuildMI(MBB, DL, TII.get(Opc), ResultReg); 1194 return true; 1195 } 1196 } 1197} 1198 1199bool X86FastISel::X86SelectCall(Instruction *I) { 1200 CallInst *CI = cast<CallInst>(I); 1201 Value *Callee = I->getOperand(0); 1202 1203 // Can't handle inline asm yet. 1204 if (isa<InlineAsm>(Callee)) 1205 return false; 1206 1207 // Handle intrinsic calls. 1208 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) 1209 return X86VisitIntrinsicCall(*II); 1210 1211 // Handle only C and fastcc calling conventions for now. 1212 CallSite CS(CI); 1213 unsigned CC = CS.getCallingConv(); 1214 if (CC != CallingConv::C && 1215 CC != CallingConv::Fast && 1216 CC != CallingConv::X86_FastCall) 1217 return false; 1218 1219 // On X86, -tailcallopt changes the fastcc ABI. FastISel doesn't 1220 // handle this for now. 1221 if (CC == CallingConv::Fast && PerformTailCallOpt) 1222 return false; 1223 1224 // Let SDISel handle vararg functions. 1225 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1226 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1227 if (FTy->isVarArg()) 1228 return false; 1229 1230 // Handle *simple* calls for now. 1231 const Type *RetTy = CS.getType(); 1232 MVT RetVT; 1233 if (RetTy == Type::VoidTy) 1234 RetVT = MVT::isVoid; 1235 else if (!isTypeLegal(RetTy, RetVT, true)) 1236 return false; 1237 1238 // Materialize callee address in a register. FIXME: GV address can be 1239 // handled with a CALLpcrel32 instead. 1240 X86AddressMode CalleeAM; 1241 if (!X86SelectCallAddress(Callee, CalleeAM)) 1242 return false; 1243 unsigned CalleeOp = 0; 1244 GlobalValue *GV = 0; 1245 if (CalleeAM.GV != 0) { 1246 GV = CalleeAM.GV; 1247 } else if (CalleeAM.Base.Reg != 0) { 1248 CalleeOp = CalleeAM.Base.Reg; 1249 } else 1250 return false; 1251 1252 // Allow calls which produce i1 results. 1253 bool AndToI1 = false; 1254 if (RetVT == MVT::i1) { 1255 RetVT = MVT::i8; 1256 AndToI1 = true; 1257 } 1258 1259 // Deal with call operands first. 1260 SmallVector<Value*, 8> ArgVals; 1261 SmallVector<unsigned, 8> Args; 1262 SmallVector<MVT, 8> ArgVTs; 1263 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1264 Args.reserve(CS.arg_size()); 1265 ArgVals.reserve(CS.arg_size()); 1266 ArgVTs.reserve(CS.arg_size()); 1267 ArgFlags.reserve(CS.arg_size()); 1268 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1269 i != e; ++i) { 1270 unsigned Arg = getRegForValue(*i); 1271 if (Arg == 0) 1272 return false; 1273 ISD::ArgFlagsTy Flags; 1274 unsigned AttrInd = i - CS.arg_begin() + 1; 1275 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1276 Flags.setSExt(); 1277 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1278 Flags.setZExt(); 1279 1280 // FIXME: Only handle *easy* calls for now. 1281 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1282 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1283 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1284 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1285 return false; 1286 1287 const Type *ArgTy = (*i)->getType(); 1288 MVT ArgVT; 1289 if (!isTypeLegal(ArgTy, ArgVT)) 1290 return false; 1291 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 1292 Flags.setOrigAlign(OriginalAlignment); 1293 1294 Args.push_back(Arg); 1295 ArgVals.push_back(*i); 1296 ArgVTs.push_back(ArgVT); 1297 ArgFlags.push_back(Flags); 1298 } 1299 1300 // Analyze operands of the call, assigning locations to each operand. 1301 SmallVector<CCValAssign, 16> ArgLocs; 1302 CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext()); 1303 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC)); 1304 1305 // Get a count of how many bytes are to be pushed on the stack. 1306 unsigned NumBytes = CCInfo.getNextStackOffset(); 1307 1308 // Issue CALLSEQ_START 1309 unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); 1310 BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes); 1311 1312 // Process argument: walk the register/memloc assignments, inserting 1313 // copies / loads. 1314 SmallVector<unsigned, 4> RegArgs; 1315 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1316 CCValAssign &VA = ArgLocs[i]; 1317 unsigned Arg = Args[VA.getValNo()]; 1318 MVT ArgVT = ArgVTs[VA.getValNo()]; 1319 1320 // Promote the value if needed. 1321 switch (VA.getLocInfo()) { 1322 default: llvm_unreachable("Unknown loc info!"); 1323 case CCValAssign::Full: break; 1324 case CCValAssign::SExt: { 1325 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1326 Arg, ArgVT, Arg); 1327 assert(Emitted && "Failed to emit a sext!"); Emitted=Emitted; 1328 Emitted = true; 1329 ArgVT = VA.getLocVT(); 1330 break; 1331 } 1332 case CCValAssign::ZExt: { 1333 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1334 Arg, ArgVT, Arg); 1335 assert(Emitted && "Failed to emit a zext!"); Emitted=Emitted; 1336 Emitted = true; 1337 ArgVT = VA.getLocVT(); 1338 break; 1339 } 1340 case CCValAssign::AExt: { 1341 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 1342 Arg, ArgVT, Arg); 1343 if (!Emitted) 1344 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1345 Arg, ArgVT, Arg); 1346 if (!Emitted) 1347 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1348 Arg, ArgVT, Arg); 1349 1350 assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; 1351 ArgVT = VA.getLocVT(); 1352 break; 1353 } 1354 } 1355 1356 if (VA.isRegLoc()) { 1357 TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); 1358 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(), 1359 Arg, RC, RC); 1360 assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; 1361 Emitted = true; 1362 RegArgs.push_back(VA.getLocReg()); 1363 } else { 1364 unsigned LocMemOffset = VA.getLocMemOffset(); 1365 X86AddressMode AM; 1366 AM.Base.Reg = StackPtr; 1367 AM.Disp = LocMemOffset; 1368 Value *ArgVal = ArgVals[VA.getValNo()]; 1369 1370 // If this is a really simple value, emit this with the Value* version of 1371 // X86FastEmitStore. If it isn't simple, we don't want to do this, as it 1372 // can cause us to reevaluate the argument. 1373 if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) 1374 X86FastEmitStore(ArgVT, ArgVal, AM); 1375 else 1376 X86FastEmitStore(ArgVT, Arg, AM); 1377 } 1378 } 1379 1380 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1381 // GOT pointer. 1382 if (Subtarget->isPICStyleGOT()) { 1383 TargetRegisterClass *RC = X86::GR32RegisterClass; 1384 unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF); 1385 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC); 1386 assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; 1387 Emitted = true; 1388 } 1389 1390 // Issue the call. 1391 MachineInstrBuilder MIB; 1392 if (CalleeOp) { 1393 // Register-indirect call. 1394 unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; 1395 MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp); 1396 1397 } else { 1398 // Direct call. 1399 assert(GV && "Not a direct call"); 1400 unsigned CallOpc = 1401 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 1402 1403 // See if we need any target-specific flags on the GV operand. 1404 unsigned char OpFlags = 0; 1405 1406 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 1407 // external symbols most go through the PLT in PIC mode. If the symbol 1408 // has hidden or protected visibility, or if it is static or local, then 1409 // we don't need to use the PLT - we can directly call it. 1410 if (Subtarget->isTargetELF() && 1411 TM.getRelocationModel() == Reloc::PIC_ && 1412 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 1413 OpFlags = X86II::MO_PLT; 1414 } else if (Subtarget->isPICStyleStubAny() && 1415 (GV->isDeclaration() || GV->isWeakForLinker()) && 1416 Subtarget->getDarwinVers() < 9) { 1417 // PC-relative references to external symbols should go through $stub, 1418 // unless we're building with the leopard linker or later, which 1419 // automatically synthesizes these stubs. 1420 OpFlags = X86II::MO_DARWIN_STUB; 1421 } 1422 1423 1424 MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags); 1425 } 1426 1427 // Add an implicit use GOT pointer in EBX. 1428 if (Subtarget->isPICStyleGOT()) 1429 MIB.addReg(X86::EBX); 1430 1431 // Add implicit physical register uses to the call. 1432 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1433 MIB.addReg(RegArgs[i]); 1434 1435 // Issue CALLSEQ_END 1436 unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); 1437 BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0); 1438 1439 // Now handle call return value (if any). 1440 if (RetVT.getSimpleVT() != MVT::isVoid) { 1441 SmallVector<CCValAssign, 16> RVLocs; 1442 CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext()); 1443 CCInfo.AnalyzeCallResult(RetVT, RetCC_X86); 1444 1445 // Copy all of the result registers out of their specified physreg. 1446 assert(RVLocs.size() == 1 && "Can't handle multi-value calls!"); 1447 MVT CopyVT = RVLocs[0].getValVT(); 1448 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1449 TargetRegisterClass *SrcRC = DstRC; 1450 1451 // If this is a call to a function that returns an fp value on the x87 fp 1452 // stack, but where we prefer to use the value in xmm registers, copy it 1453 // out as F80 and use a truncate to move it from fp stack reg to xmm reg. 1454 if ((RVLocs[0].getLocReg() == X86::ST0 || 1455 RVLocs[0].getLocReg() == X86::ST1) && 1456 isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) { 1457 CopyVT = MVT::f80; 1458 SrcRC = X86::RSTRegisterClass; 1459 DstRC = X86::RFP80RegisterClass; 1460 } 1461 1462 unsigned ResultReg = createResultReg(DstRC); 1463 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 1464 RVLocs[0].getLocReg(), DstRC, SrcRC); 1465 assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; 1466 Emitted = true; 1467 if (CopyVT != RVLocs[0].getValVT()) { 1468 // Round the F80 the right size, which also moves to the appropriate xmm 1469 // register. This is accomplished by storing the F80 value in memory and 1470 // then loading it back. Ewww... 1471 MVT ResVT = RVLocs[0].getValVT(); 1472 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; 1473 unsigned MemSize = ResVT.getSizeInBits()/8; 1474 int FI = MFI.CreateStackObject(MemSize, MemSize); 1475 addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg); 1476 DstRC = ResVT == MVT::f32 1477 ? X86::FR32RegisterClass : X86::FR64RegisterClass; 1478 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; 1479 ResultReg = createResultReg(DstRC); 1480 addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI); 1481 } 1482 1483 if (AndToI1) { 1484 // Mask out all but lowest bit for some call which produces an i1. 1485 unsigned AndResult = createResultReg(X86::GR8RegisterClass); 1486 BuildMI(MBB, DL, 1487 TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); 1488 ResultReg = AndResult; 1489 } 1490 1491 UpdateValueMap(I, ResultReg); 1492 } 1493 1494 return true; 1495} 1496 1497 1498bool 1499X86FastISel::TargetSelectInstruction(Instruction *I) { 1500 switch (I->getOpcode()) { 1501 default: break; 1502 case Instruction::Load: 1503 return X86SelectLoad(I); 1504 case Instruction::Store: 1505 return X86SelectStore(I); 1506 case Instruction::ICmp: 1507 case Instruction::FCmp: 1508 return X86SelectCmp(I); 1509 case Instruction::ZExt: 1510 return X86SelectZExt(I); 1511 case Instruction::Br: 1512 return X86SelectBranch(I); 1513 case Instruction::Call: 1514 return X86SelectCall(I); 1515 case Instruction::LShr: 1516 case Instruction::AShr: 1517 case Instruction::Shl: 1518 return X86SelectShift(I); 1519 case Instruction::Select: 1520 return X86SelectSelect(I); 1521 case Instruction::Trunc: 1522 return X86SelectTrunc(I); 1523 case Instruction::FPExt: 1524 return X86SelectFPExt(I); 1525 case Instruction::FPTrunc: 1526 return X86SelectFPTrunc(I); 1527 case Instruction::ExtractValue: 1528 return X86SelectExtractValue(I); 1529 case Instruction::IntToPtr: // Deliberate fall-through. 1530 case Instruction::PtrToInt: { 1531 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 1532 MVT DstVT = TLI.getValueType(I->getType()); 1533 if (DstVT.bitsGT(SrcVT)) 1534 return X86SelectZExt(I); 1535 if (DstVT.bitsLT(SrcVT)) 1536 return X86SelectTrunc(I); 1537 unsigned Reg = getRegForValue(I->getOperand(0)); 1538 if (Reg == 0) return false; 1539 UpdateValueMap(I, Reg); 1540 return true; 1541 } 1542 } 1543 1544 return false; 1545} 1546 1547unsigned X86FastISel::TargetMaterializeConstant(Constant *C) { 1548 MVT VT; 1549 if (!isTypeLegal(C->getType(), VT)) 1550 return false; 1551 1552 // Get opcode and regclass of the output for the given load instruction. 1553 unsigned Opc = 0; 1554 const TargetRegisterClass *RC = NULL; 1555 switch (VT.getSimpleVT()) { 1556 default: return false; 1557 case MVT::i8: 1558 Opc = X86::MOV8rm; 1559 RC = X86::GR8RegisterClass; 1560 break; 1561 case MVT::i16: 1562 Opc = X86::MOV16rm; 1563 RC = X86::GR16RegisterClass; 1564 break; 1565 case MVT::i32: 1566 Opc = X86::MOV32rm; 1567 RC = X86::GR32RegisterClass; 1568 break; 1569 case MVT::i64: 1570 // Must be in x86-64 mode. 1571 Opc = X86::MOV64rm; 1572 RC = X86::GR64RegisterClass; 1573 break; 1574 case MVT::f32: 1575 if (Subtarget->hasSSE1()) { 1576 Opc = X86::MOVSSrm; 1577 RC = X86::FR32RegisterClass; 1578 } else { 1579 Opc = X86::LD_Fp32m; 1580 RC = X86::RFP32RegisterClass; 1581 } 1582 break; 1583 case MVT::f64: 1584 if (Subtarget->hasSSE2()) { 1585 Opc = X86::MOVSDrm; 1586 RC = X86::FR64RegisterClass; 1587 } else { 1588 Opc = X86::LD_Fp64m; 1589 RC = X86::RFP64RegisterClass; 1590 } 1591 break; 1592 case MVT::f80: 1593 // No f80 support yet. 1594 return false; 1595 } 1596 1597 // Materialize addresses with LEA instructions. 1598 if (isa<GlobalValue>(C)) { 1599 X86AddressMode AM; 1600 if (X86SelectAddress(C, AM)) { 1601 if (TLI.getPointerTy() == MVT::i32) 1602 Opc = X86::LEA32r; 1603 else 1604 Opc = X86::LEA64r; 1605 unsigned ResultReg = createResultReg(RC); 1606 addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); 1607 return ResultReg; 1608 } 1609 return 0; 1610 } 1611 1612 // MachineConstantPool wants an explicit alignment. 1613 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 1614 if (Align == 0) { 1615 // Alignment of vector types. FIXME! 1616 Align = TD.getTypeAllocSize(C->getType()); 1617 } 1618 1619 // x86-32 PIC requires a PIC base register for constant pools. 1620 unsigned PICBase = 0; 1621 unsigned char OpFlag = 0; 1622 if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic 1623 OpFlag = X86II::MO_PIC_BASE_OFFSET; 1624 PICBase = getInstrInfo()->getGlobalBaseReg(&MF); 1625 } else if (Subtarget->isPICStyleGOT()) { 1626 OpFlag = X86II::MO_GOTOFF; 1627 PICBase = getInstrInfo()->getGlobalBaseReg(&MF); 1628 } else if (Subtarget->isPICStyleRIPRel() && 1629 TM.getCodeModel() == CodeModel::Small) { 1630 PICBase = X86::RIP; 1631 } 1632 1633 // Create the load from the constant pool. 1634 unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); 1635 unsigned ResultReg = createResultReg(RC); 1636 addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), 1637 MCPOffset, PICBase, OpFlag); 1638 1639 return ResultReg; 1640} 1641 1642unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) { 1643 // Fail on dynamic allocas. At this point, getRegForValue has already 1644 // checked its CSE maps, so if we're here trying to handle a dynamic 1645 // alloca, we're not going to succeed. X86SelectAddress has a 1646 // check for dynamic allocas, because it's called directly from 1647 // various places, but TargetMaterializeAlloca also needs a check 1648 // in order to avoid recursion between getRegForValue, 1649 // X86SelectAddrss, and TargetMaterializeAlloca. 1650 if (!StaticAllocaMap.count(C)) 1651 return 0; 1652 1653 X86AddressMode AM; 1654 if (!X86SelectAddress(C, AM)) 1655 return 0; 1656 unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 1657 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); 1658 unsigned ResultReg = createResultReg(RC); 1659 addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); 1660 return ResultReg; 1661} 1662 1663namespace llvm { 1664 llvm::FastISel *X86::createFastISel(MachineFunction &mf, 1665 MachineModuleInfo *mmi, 1666 DwarfWriter *dw, 1667 DenseMap<const Value *, unsigned> &vm, 1668 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm, 1669 DenseMap<const AllocaInst *, int> &am 1670#ifndef NDEBUG 1671 , SmallSet<Instruction*, 8> &cil 1672#endif 1673 ) { 1674 return new X86FastISel(mf, mmi, dw, vm, bm, am 1675#ifndef NDEBUG 1676 , cil 1677#endif 1678 ); 1679 } 1680} 1681