X86FastISel.cpp revision 7962e856d1460cc61ce246e31ca77e99f85228a3
1//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the X86-specific support for the FastISel class. Much 11// of the target-specific code is generated by tablegen in the file 12// X86GenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/CallingConv.h" 23#include "llvm/DerivedTypes.h" 24#include "llvm/Instructions.h" 25#include "llvm/CodeGen/FastISel.h" 26#include "llvm/CodeGen/MachineConstantPool.h" 27#include "llvm/CodeGen/MachineFrameInfo.h" 28#include "llvm/CodeGen/MachineRegisterInfo.h" 29#include "llvm/Support/CallSite.h" 30#include "llvm/Support/GetElementPtrTypeIterator.h" 31 32using namespace llvm; 33 34class X86FastISel : public FastISel { 35 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 36 /// make the right decision when generating code for different targets. 37 const X86Subtarget *Subtarget; 38 39 /// StackPtr - Register used as the stack pointer. 40 /// 41 unsigned StackPtr; 42 43 /// GlobalBaseReg - keeps track of the virtual register mapped onto global 44 /// base register. 45 unsigned GlobalBaseReg; 46 47 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 48 /// floating point ops. 49 /// When SSE is available, use it for f32 operations. 50 /// When SSE2 is available, use it for f64 operations. 51 bool X86ScalarSSEf64; 52 bool X86ScalarSSEf32; 53 54public: 55 explicit X86FastISel(MachineFunction &mf, 56 MachineModuleInfo *mmi, 57 DenseMap<const Value *, unsigned> &vm, 58 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm, 59 DenseMap<const AllocaInst *, int> &am) 60 : FastISel(mf, mmi, vm, bm, am) { 61 Subtarget = &TM.getSubtarget<X86Subtarget>(); 62 StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 63 GlobalBaseReg = 0; 64 X86ScalarSSEf64 = Subtarget->hasSSE2(); 65 X86ScalarSSEf32 = Subtarget->hasSSE1(); 66 } 67 68 virtual bool TargetSelectInstruction(Instruction *I); 69 70#include "X86GenFastISel.inc" 71 72private: 73 bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR); 74 75 bool X86FastEmitStore(MVT VT, unsigned Val, 76 const X86AddressMode &AM); 77 78 bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT, 79 unsigned &ResultReg); 80 81 bool X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall); 82 83 bool X86SelectLoad(Instruction *I); 84 85 bool X86SelectStore(Instruction *I); 86 87 bool X86SelectCmp(Instruction *I); 88 89 bool X86SelectZExt(Instruction *I); 90 91 bool X86SelectBranch(Instruction *I); 92 93 bool X86SelectShift(Instruction *I); 94 95 bool X86SelectSelect(Instruction *I); 96 97 bool X86SelectTrunc(Instruction *I); 98 99 bool X86SelectFPExt(Instruction *I); 100 bool X86SelectFPTrunc(Instruction *I); 101 102 bool X86SelectCall(Instruction *I); 103 104 CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false); 105 106 unsigned getGlobalBaseReg(); 107 108 const X86InstrInfo *getInstrInfo() const { 109 return getTargetMachine()->getInstrInfo(); 110 } 111 const X86TargetMachine *getTargetMachine() const { 112 return static_cast<const X86TargetMachine *>(&TM); 113 } 114 115 unsigned TargetMaterializeConstant(Constant *C); 116 117 unsigned TargetMaterializeAlloca(AllocaInst *C); 118 119 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is 120 /// computed in an SSE register, not on the X87 floating point stack. 121 bool isScalarFPTypeInSSEReg(MVT VT) const { 122 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 123 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 124 } 125 126}; 127 128static bool isTypeLegal(const Type *Ty, const TargetLowering &TLI, MVT &VT, 129 bool AllowI1 = false) { 130 VT = MVT::getMVT(Ty, /*HandleUnknown=*/true); 131 if (VT == MVT::Other || !VT.isSimple()) 132 // Unhandled type. Halt "fast" selection and bail. 133 return false; 134 if (VT == MVT::iPTR) 135 // Use pointer type. 136 VT = TLI.getPointerTy(); 137 // We only handle legal types. For example, on x86-32 the instruction 138 // selector contains all of the 64-bit instructions from x86-64, 139 // under the assumption that i64 won't be used if the target doesn't 140 // support it. 141 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT); 142} 143 144/// getGlobalBaseReg - Return the the global base register. Output 145/// instructions required to initialize the global base register, if necessary. 146/// 147unsigned X86FastISel::getGlobalBaseReg() { 148 assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing"); 149 if (!GlobalBaseReg) 150 GlobalBaseReg = getInstrInfo()->initializeGlobalBaseReg(MBB->getParent()); 151 return GlobalBaseReg; 152} 153 154#include "X86GenCallingConv.inc" 155 156/// CCAssignFnForCall - Selects the correct CCAssignFn for a given calling 157/// convention. 158CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) { 159 if (Subtarget->is64Bit()) { 160 if (Subtarget->isTargetWin64()) 161 return CC_X86_Win64_C; 162 else if (CC == CallingConv::Fast && isTaillCall) 163 return CC_X86_64_TailCall; 164 else 165 return CC_X86_64_C; 166 } 167 168 if (CC == CallingConv::X86_FastCall) 169 return CC_X86_32_FastCall; 170 else if (CC == CallingConv::Fast) 171 return CC_X86_32_FastCC; 172 else 173 return CC_X86_32_C; 174} 175 176/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. 177/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. 178/// Return true and the result register by reference if it is possible. 179bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM, 180 unsigned &ResultReg) { 181 // Get opcode and regclass of the output for the given load instruction. 182 unsigned Opc = 0; 183 const TargetRegisterClass *RC = NULL; 184 switch (VT.getSimpleVT()) { 185 default: return false; 186 case MVT::i8: 187 Opc = X86::MOV8rm; 188 RC = X86::GR8RegisterClass; 189 break; 190 case MVT::i16: 191 Opc = X86::MOV16rm; 192 RC = X86::GR16RegisterClass; 193 break; 194 case MVT::i32: 195 Opc = X86::MOV32rm; 196 RC = X86::GR32RegisterClass; 197 break; 198 case MVT::i64: 199 // Must be in x86-64 mode. 200 Opc = X86::MOV64rm; 201 RC = X86::GR64RegisterClass; 202 break; 203 case MVT::f32: 204 if (Subtarget->hasSSE1()) { 205 Opc = X86::MOVSSrm; 206 RC = X86::FR32RegisterClass; 207 } else { 208 Opc = X86::LD_Fp32m; 209 RC = X86::RFP32RegisterClass; 210 } 211 break; 212 case MVT::f64: 213 if (Subtarget->hasSSE2()) { 214 Opc = X86::MOVSDrm; 215 RC = X86::FR64RegisterClass; 216 } else { 217 Opc = X86::LD_Fp64m; 218 RC = X86::RFP64RegisterClass; 219 } 220 break; 221 case MVT::f80: 222 // No f80 support yet. 223 return false; 224 } 225 226 ResultReg = createResultReg(RC); 227 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM); 228 return true; 229} 230 231/// X86FastEmitStore - Emit a machine instruction to store a value Val of 232/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr 233/// and a displacement offset, or a GlobalAddress, 234/// i.e. V. Return true if it is possible. 235bool 236X86FastISel::X86FastEmitStore(MVT VT, unsigned Val, 237 const X86AddressMode &AM) { 238 // Get opcode and regclass of the output for the given store instruction. 239 unsigned Opc = 0; 240 const TargetRegisterClass *RC = NULL; 241 switch (VT.getSimpleVT()) { 242 default: return false; 243 case MVT::i8: 244 Opc = X86::MOV8mr; 245 RC = X86::GR8RegisterClass; 246 break; 247 case MVT::i16: 248 Opc = X86::MOV16mr; 249 RC = X86::GR16RegisterClass; 250 break; 251 case MVT::i32: 252 Opc = X86::MOV32mr; 253 RC = X86::GR32RegisterClass; 254 break; 255 case MVT::i64: 256 // Must be in x86-64 mode. 257 Opc = X86::MOV64mr; 258 RC = X86::GR64RegisterClass; 259 break; 260 case MVT::f32: 261 if (Subtarget->hasSSE1()) { 262 Opc = X86::MOVSSmr; 263 RC = X86::FR32RegisterClass; 264 } else { 265 Opc = X86::ST_Fp32m; 266 RC = X86::RFP32RegisterClass; 267 } 268 break; 269 case MVT::f64: 270 if (Subtarget->hasSSE2()) { 271 Opc = X86::MOVSDmr; 272 RC = X86::FR64RegisterClass; 273 } else { 274 Opc = X86::ST_Fp64m; 275 RC = X86::RFP64RegisterClass; 276 } 277 break; 278 case MVT::f80: 279 // No f80 support yet. 280 return false; 281 } 282 283 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Val); 284 return true; 285} 286 287/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of 288/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g. 289/// ISD::SIGN_EXTEND). 290bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, 291 unsigned Src, MVT SrcVT, 292 unsigned &ResultReg) { 293 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src); 294 295 if (RR != 0) { 296 ResultReg = RR; 297 return true; 298 } else 299 return false; 300} 301 302/// X86SelectAddress - Attempt to fill in an address from the given value. 303/// 304bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) { 305 User *U; 306 unsigned Opcode = Instruction::UserOp1; 307 if (Instruction *I = dyn_cast<Instruction>(V)) { 308 Opcode = I->getOpcode(); 309 U = I; 310 } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) { 311 Opcode = C->getOpcode(); 312 U = C; 313 } 314 315 switch (Opcode) { 316 default: break; 317 case Instruction::BitCast: 318 // Look past bitcasts. 319 return X86SelectAddress(U->getOperand(0), AM, isCall); 320 321 case Instruction::IntToPtr: 322 // Look past no-op inttoptrs. 323 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 324 return X86SelectAddress(U->getOperand(0), AM, isCall); 325 326 case Instruction::PtrToInt: 327 // Look past no-op ptrtoints. 328 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 329 return X86SelectAddress(U->getOperand(0), AM, isCall); 330 331 case Instruction::Alloca: { 332 if (isCall) break; 333 // Do static allocas. 334 const AllocaInst *A = cast<AllocaInst>(V); 335 DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A); 336 if (SI != StaticAllocaMap.end()) { 337 AM.BaseType = X86AddressMode::FrameIndexBase; 338 AM.Base.FrameIndex = SI->second; 339 return true; 340 } 341 break; 342 } 343 344 case Instruction::Add: { 345 if (isCall) break; 346 // Adds of constants are common and easy enough. 347 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 348 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue(); 349 // They have to fit in the 32-bit signed displacement field though. 350 if (isInt32(Disp)) { 351 AM.Disp = (uint32_t)Disp; 352 return X86SelectAddress(U->getOperand(0), AM, isCall); 353 } 354 } 355 break; 356 } 357 358 case Instruction::GetElementPtr: { 359 if (isCall) break; 360 // Pattern-match simple GEPs. 361 uint64_t Disp = (int32_t)AM.Disp; 362 unsigned IndexReg = AM.IndexReg; 363 unsigned Scale = AM.Scale; 364 gep_type_iterator GTI = gep_type_begin(U); 365 // Look at all but the last index. Constants can be folded, 366 // and one dynamic index can be handled, if the scale is supported. 367 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); 368 i != e; ++i, ++GTI) { 369 Value *Op = *i; 370 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 371 const StructLayout *SL = TD.getStructLayout(STy); 372 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 373 Disp += SL->getElementOffset(Idx); 374 } else { 375 uint64_t S = TD.getABITypeSize(GTI.getIndexedType()); 376 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 377 // Constant-offset addressing. 378 Disp += CI->getSExtValue() * S; 379 } else if (IndexReg == 0 && 380 (!AM.GV || 381 !getTargetMachine()->symbolicAddressesAreRIPRel()) && 382 (S == 1 || S == 2 || S == 4 || S == 8)) { 383 // Scaled-index addressing. 384 Scale = S; 385 IndexReg = getRegForValue(Op); 386 if (IndexReg == 0) 387 return false; 388 } else 389 // Unsupported. 390 goto unsupported_gep; 391 } 392 } 393 // Check for displacement overflow. 394 if (!isInt32(Disp)) 395 break; 396 // Ok, the GEP indices were covered by constant-offset and scaled-index 397 // addressing. Update the address state and move on to examining the base. 398 AM.IndexReg = IndexReg; 399 AM.Scale = Scale; 400 AM.Disp = (uint32_t)Disp; 401 return X86SelectAddress(U->getOperand(0), AM, isCall); 402 unsupported_gep: 403 // Ok, the GEP indices weren't all covered. 404 break; 405 } 406 } 407 408 // Handle constant address. 409 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 410 // Can't handle alternate code models yet. 411 if (TM.getCodeModel() != CodeModel::Default && 412 TM.getCodeModel() != CodeModel::Small) 413 return false; 414 415 // RIP-relative addresses can't have additional register operands. 416 if (getTargetMachine()->symbolicAddressesAreRIPRel() && 417 (AM.Base.Reg != 0 || AM.IndexReg != 0)) 418 return false; 419 420 // Set up the basic address. 421 AM.GV = GV; 422 if (!isCall && 423 TM.getRelocationModel() == Reloc::PIC_ && 424 !Subtarget->is64Bit()) 425 AM.Base.Reg = getGlobalBaseReg(); 426 427 // Emit an extra load if the ABI requires it. 428 if (Subtarget->GVRequiresExtraLoad(GV, TM, isCall)) { 429 // Check to see if we've already materialized this 430 // value in a register in this block. 431 if (unsigned Reg = LocalValueMap[V]) { 432 AM.Base.Reg = Reg; 433 AM.GV = 0; 434 return true; 435 } 436 // Issue load from stub if necessary. 437 unsigned Opc = 0; 438 const TargetRegisterClass *RC = NULL; 439 if (TLI.getPointerTy() == MVT::i32) { 440 Opc = X86::MOV32rm; 441 RC = X86::GR32RegisterClass; 442 } else { 443 Opc = X86::MOV64rm; 444 RC = X86::GR64RegisterClass; 445 } 446 447 X86AddressMode StubAM; 448 StubAM.Base.Reg = AM.Base.Reg; 449 StubAM.GV = AM.GV; 450 unsigned ResultReg = createResultReg(RC); 451 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), StubAM); 452 453 // Now construct the final address. Note that the Disp, Scale, 454 // and Index values may already be set here. 455 AM.Base.Reg = ResultReg; 456 AM.GV = 0; 457 458 // Prevent loading GV stub multiple times in same MBB. 459 LocalValueMap[V] = AM.Base.Reg; 460 } 461 return true; 462 } 463 464 // If all else fails, try to materialize the value in a register. 465 if (!AM.GV || !getTargetMachine()->symbolicAddressesAreRIPRel()) { 466 if (AM.Base.Reg == 0) { 467 AM.Base.Reg = getRegForValue(V); 468 return AM.Base.Reg != 0; 469 } 470 if (AM.IndexReg == 0) { 471 assert(AM.Scale == 1 && "Scale with no index!"); 472 AM.IndexReg = getRegForValue(V); 473 return AM.IndexReg != 0; 474 } 475 } 476 477 return false; 478} 479 480/// X86SelectStore - Select and emit code to implement store instructions. 481bool X86FastISel::X86SelectStore(Instruction* I) { 482 MVT VT; 483 if (!isTypeLegal(I->getOperand(0)->getType(), TLI, VT)) 484 return false; 485 unsigned Val = getRegForValue(I->getOperand(0)); 486 if (Val == 0) 487 // Unhandled operand. Halt "fast" selection and bail. 488 return false; 489 490 X86AddressMode AM; 491 if (!X86SelectAddress(I->getOperand(1), AM, false)) 492 return false; 493 494 return X86FastEmitStore(VT, Val, AM); 495} 496 497/// X86SelectLoad - Select and emit code to implement load instructions. 498/// 499bool X86FastISel::X86SelectLoad(Instruction *I) { 500 MVT VT; 501 if (!isTypeLegal(I->getType(), TLI, VT)) 502 return false; 503 504 X86AddressMode AM; 505 if (!X86SelectAddress(I->getOperand(0), AM, false)) 506 return false; 507 508 unsigned ResultReg = 0; 509 if (X86FastEmitLoad(VT, AM, ResultReg)) { 510 UpdateValueMap(I, ResultReg); 511 return true; 512 } 513 return false; 514} 515 516bool X86FastISel::X86SelectCmp(Instruction *I) { 517 CmpInst *CI = cast<CmpInst>(I); 518 519 MVT VT = TLI.getValueType(I->getOperand(0)->getType()); 520 if (!TLI.isTypeLegal(VT)) 521 return false; 522 523 unsigned Op0Reg = getRegForValue(CI->getOperand(0)); 524 if (Op0Reg == 0) return false; 525 unsigned Op1Reg = getRegForValue(CI->getOperand(1)); 526 if (Op1Reg == 0) return false; 527 528 unsigned Opc; 529 switch (VT.getSimpleVT()) { 530 case MVT::i8: Opc = X86::CMP8rr; break; 531 case MVT::i16: Opc = X86::CMP16rr; break; 532 case MVT::i32: Opc = X86::CMP32rr; break; 533 case MVT::i64: Opc = X86::CMP64rr; break; 534 case MVT::f32: Opc = X86::UCOMISSrr; break; 535 case MVT::f64: Opc = X86::UCOMISDrr; break; 536 default: return false; 537 } 538 539 unsigned ResultReg = createResultReg(&X86::GR8RegClass); 540 switch (CI->getPredicate()) { 541 case CmpInst::FCMP_OEQ: { 542 unsigned EReg = createResultReg(&X86::GR8RegClass); 543 unsigned NPReg = createResultReg(&X86::GR8RegClass); 544 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 545 BuildMI(MBB, TII.get(X86::SETEr), EReg); 546 BuildMI(MBB, TII.get(X86::SETNPr), NPReg); 547 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); 548 break; 549 } 550 case CmpInst::FCMP_UNE: { 551 unsigned NEReg = createResultReg(&X86::GR8RegClass); 552 unsigned PReg = createResultReg(&X86::GR8RegClass); 553 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 554 BuildMI(MBB, TII.get(X86::SETNEr), NEReg); 555 BuildMI(MBB, TII.get(X86::SETPr), PReg); 556 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); 557 break; 558 } 559 case CmpInst::FCMP_OGT: 560 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 561 BuildMI(MBB, TII.get(X86::SETAr), ResultReg); 562 break; 563 case CmpInst::FCMP_OGE: 564 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 565 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg); 566 break; 567 case CmpInst::FCMP_OLT: 568 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); 569 BuildMI(MBB, TII.get(X86::SETAr), ResultReg); 570 break; 571 case CmpInst::FCMP_OLE: 572 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); 573 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg); 574 break; 575 case CmpInst::FCMP_ONE: 576 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 577 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg); 578 break; 579 case CmpInst::FCMP_ORD: 580 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 581 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg); 582 break; 583 case CmpInst::FCMP_UNO: 584 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 585 BuildMI(MBB, TII.get(X86::SETPr), ResultReg); 586 break; 587 case CmpInst::FCMP_UEQ: 588 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 589 BuildMI(MBB, TII.get(X86::SETEr), ResultReg); 590 break; 591 case CmpInst::FCMP_UGT: 592 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); 593 BuildMI(MBB, TII.get(X86::SETBr), ResultReg); 594 break; 595 case CmpInst::FCMP_UGE: 596 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg); 597 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg); 598 break; 599 case CmpInst::FCMP_ULT: 600 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 601 BuildMI(MBB, TII.get(X86::SETBr), ResultReg); 602 break; 603 case CmpInst::FCMP_ULE: 604 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 605 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg); 606 break; 607 case CmpInst::ICMP_EQ: 608 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 609 BuildMI(MBB, TII.get(X86::SETEr), ResultReg); 610 break; 611 case CmpInst::ICMP_NE: 612 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 613 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg); 614 break; 615 case CmpInst::ICMP_UGT: 616 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 617 BuildMI(MBB, TII.get(X86::SETAr), ResultReg); 618 break; 619 case CmpInst::ICMP_UGE: 620 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 621 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg); 622 break; 623 case CmpInst::ICMP_ULT: 624 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 625 BuildMI(MBB, TII.get(X86::SETBr), ResultReg); 626 break; 627 case CmpInst::ICMP_ULE: 628 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 629 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg); 630 break; 631 case CmpInst::ICMP_SGT: 632 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 633 BuildMI(MBB, TII.get(X86::SETGr), ResultReg); 634 break; 635 case CmpInst::ICMP_SGE: 636 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 637 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg); 638 break; 639 case CmpInst::ICMP_SLT: 640 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 641 BuildMI(MBB, TII.get(X86::SETLr), ResultReg); 642 break; 643 case CmpInst::ICMP_SLE: 644 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg); 645 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg); 646 break; 647 default: 648 return false; 649 } 650 651 UpdateValueMap(I, ResultReg); 652 return true; 653} 654 655bool X86FastISel::X86SelectZExt(Instruction *I) { 656 // Special-case hack: The only i1 values we know how to produce currently 657 // set the upper bits of an i8 value to zero. 658 if (I->getType() == Type::Int8Ty && 659 I->getOperand(0)->getType() == Type::Int1Ty) { 660 unsigned ResultReg = getRegForValue(I->getOperand(0)); 661 if (ResultReg == 0) return false; 662 UpdateValueMap(I, ResultReg); 663 return true; 664 } 665 666 return false; 667} 668 669bool X86FastISel::X86SelectBranch(Instruction *I) { 670 BranchInst *BI = cast<BranchInst>(I); 671 // Unconditional branches are selected by tablegen-generated code. 672 unsigned OpReg = getRegForValue(BI->getCondition()); 673 if (OpReg == 0) return false; 674 MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)]; 675 MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)]; 676 677 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); 678 BuildMI(MBB, TII.get(X86::JNE)).addMBB(TrueMBB); 679 BuildMI(MBB, TII.get(X86::JMP)).addMBB(FalseMBB); 680 681 MBB->addSuccessor(TrueMBB); 682 MBB->addSuccessor(FalseMBB); 683 684 return true; 685} 686 687bool X86FastISel::X86SelectShift(Instruction *I) { 688 unsigned CReg = 0, OpReg = 0, OpImm = 0; 689 const TargetRegisterClass *RC = NULL; 690 if (I->getType() == Type::Int8Ty) { 691 CReg = X86::CL; 692 RC = &X86::GR8RegClass; 693 switch (I->getOpcode()) { 694 case Instruction::LShr: OpReg = X86::SHR8rCL; OpImm = X86::SHR8ri; break; 695 case Instruction::AShr: OpReg = X86::SAR8rCL; OpImm = X86::SAR8ri; break; 696 case Instruction::Shl: OpReg = X86::SHL8rCL; OpImm = X86::SHL8ri; break; 697 default: return false; 698 } 699 } else if (I->getType() == Type::Int16Ty) { 700 CReg = X86::CX; 701 RC = &X86::GR16RegClass; 702 switch (I->getOpcode()) { 703 case Instruction::LShr: OpReg = X86::SHR16rCL; OpImm = X86::SHR16ri; break; 704 case Instruction::AShr: OpReg = X86::SAR16rCL; OpImm = X86::SAR16ri; break; 705 case Instruction::Shl: OpReg = X86::SHL16rCL; OpImm = X86::SHL16ri; break; 706 default: return false; 707 } 708 } else if (I->getType() == Type::Int32Ty) { 709 CReg = X86::ECX; 710 RC = &X86::GR32RegClass; 711 switch (I->getOpcode()) { 712 case Instruction::LShr: OpReg = X86::SHR32rCL; OpImm = X86::SHR32ri; break; 713 case Instruction::AShr: OpReg = X86::SAR32rCL; OpImm = X86::SAR32ri; break; 714 case Instruction::Shl: OpReg = X86::SHL32rCL; OpImm = X86::SHL32ri; break; 715 default: return false; 716 } 717 } else if (I->getType() == Type::Int64Ty) { 718 CReg = X86::RCX; 719 RC = &X86::GR64RegClass; 720 switch (I->getOpcode()) { 721 case Instruction::LShr: OpReg = X86::SHR64rCL; OpImm = X86::SHR64ri; break; 722 case Instruction::AShr: OpReg = X86::SAR64rCL; OpImm = X86::SAR64ri; break; 723 case Instruction::Shl: OpReg = X86::SHL64rCL; OpImm = X86::SHL64ri; break; 724 default: return false; 725 } 726 } else { 727 return false; 728 } 729 730 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true); 731 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) 732 return false; 733 734 unsigned Op0Reg = getRegForValue(I->getOperand(0)); 735 if (Op0Reg == 0) return false; 736 737 // Fold immediate in shl(x,3). 738 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 739 unsigned ResultReg = createResultReg(RC); 740 BuildMI(MBB, TII.get(OpImm), 741 ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue()); 742 UpdateValueMap(I, ResultReg); 743 return true; 744 } 745 746 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 747 if (Op1Reg == 0) return false; 748 TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC); 749 unsigned ResultReg = createResultReg(RC); 750 BuildMI(MBB, TII.get(OpReg), ResultReg).addReg(Op0Reg); 751 UpdateValueMap(I, ResultReg); 752 return true; 753} 754 755bool X86FastISel::X86SelectSelect(Instruction *I) { 756 const Type *Ty = I->getType(); 757 if (isa<PointerType>(Ty)) 758 Ty = TD.getIntPtrType(); 759 760 unsigned Opc = 0; 761 const TargetRegisterClass *RC = NULL; 762 if (Ty == Type::Int16Ty) { 763 Opc = X86::CMOVE16rr; 764 RC = &X86::GR16RegClass; 765 } else if (Ty == Type::Int32Ty) { 766 Opc = X86::CMOVE32rr; 767 RC = &X86::GR32RegClass; 768 } else if (Ty == Type::Int64Ty) { 769 Opc = X86::CMOVE64rr; 770 RC = &X86::GR64RegClass; 771 } else { 772 return false; 773 } 774 775 MVT VT = MVT::getMVT(Ty, /*HandleUnknown=*/true); 776 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) 777 return false; 778 779 unsigned Op0Reg = getRegForValue(I->getOperand(0)); 780 if (Op0Reg == 0) return false; 781 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 782 if (Op1Reg == 0) return false; 783 unsigned Op2Reg = getRegForValue(I->getOperand(2)); 784 if (Op2Reg == 0) return false; 785 786 BuildMI(MBB, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); 787 unsigned ResultReg = createResultReg(RC); 788 BuildMI(MBB, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); 789 UpdateValueMap(I, ResultReg); 790 return true; 791} 792 793bool X86FastISel::X86SelectFPExt(Instruction *I) { 794 if (Subtarget->hasSSE2()) { 795 if (I->getType() == Type::DoubleTy) { 796 Value *V = I->getOperand(0); 797 if (V->getType() == Type::FloatTy) { 798 unsigned OpReg = getRegForValue(V); 799 if (OpReg == 0) return false; 800 unsigned ResultReg = createResultReg(X86::FR64RegisterClass); 801 BuildMI(MBB, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg); 802 UpdateValueMap(I, ResultReg); 803 return true; 804 } 805 } 806 } 807 808 return false; 809} 810 811bool X86FastISel::X86SelectFPTrunc(Instruction *I) { 812 if (Subtarget->hasSSE2()) { 813 if (I->getType() == Type::FloatTy) { 814 Value *V = I->getOperand(0); 815 if (V->getType() == Type::DoubleTy) { 816 unsigned OpReg = getRegForValue(V); 817 if (OpReg == 0) return false; 818 unsigned ResultReg = createResultReg(X86::FR32RegisterClass); 819 BuildMI(MBB, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg); 820 UpdateValueMap(I, ResultReg); 821 return true; 822 } 823 } 824 } 825 826 return false; 827} 828 829bool X86FastISel::X86SelectTrunc(Instruction *I) { 830 if (Subtarget->is64Bit()) 831 // All other cases should be handled by the tblgen generated code. 832 return false; 833 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); 834 MVT DstVT = TLI.getValueType(I->getType()); 835 if (DstVT != MVT::i8) 836 // All other cases should be handled by the tblgen generated code. 837 return false; 838 if (SrcVT != MVT::i16 && SrcVT != MVT::i32) 839 // All other cases should be handled by the tblgen generated code. 840 return false; 841 842 unsigned InputReg = getRegForValue(I->getOperand(0)); 843 if (!InputReg) 844 // Unhandled operand. Halt "fast" selection and bail. 845 return false; 846 847 // First issue a copy to GR16_ or GR32_. 848 unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_; 849 const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) 850 ? X86::GR16_RegisterClass : X86::GR32_RegisterClass; 851 unsigned CopyReg = createResultReg(CopyRC); 852 BuildMI(MBB, TII.get(CopyOpc), CopyReg).addReg(InputReg); 853 854 // Then issue an extract_subreg. 855 unsigned ResultReg = FastEmitInst_extractsubreg(CopyReg,1); // x86_subreg_8bit 856 if (!ResultReg) 857 return false; 858 859 UpdateValueMap(I, ResultReg); 860 return true; 861} 862 863bool X86FastISel::X86SelectCall(Instruction *I) { 864 CallInst *CI = cast<CallInst>(I); 865 Value *Callee = I->getOperand(0); 866 867 // Can't handle inline asm yet. 868 if (isa<InlineAsm>(Callee)) 869 return false; 870 871 // FIXME: Handle some intrinsics. 872 if (Function *F = CI->getCalledFunction()) { 873 if (F->isDeclaration() &&F->getIntrinsicID()) 874 return false; 875 } 876 877 // Handle only C and fastcc calling conventions for now. 878 CallSite CS(CI); 879 unsigned CC = CS.getCallingConv(); 880 if (CC != CallingConv::C && 881 CC != CallingConv::Fast && 882 CC != CallingConv::X86_FastCall) 883 return false; 884 885 // Let SDISel handle vararg functions. 886 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 887 const FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 888 if (FTy->isVarArg()) 889 return false; 890 891 // Handle *simple* calls for now. 892 const Type *RetTy = CS.getType(); 893 MVT RetVT; 894 if (RetTy == Type::VoidTy) 895 RetVT = MVT::isVoid; 896 else if (!isTypeLegal(RetTy, TLI, RetVT, true)) 897 return false; 898 899 // Materialize callee address in a register. FIXME: GV address can be 900 // handled with a CALLpcrel32 instead. 901 X86AddressMode CalleeAM; 902 if (!X86SelectAddress(Callee, CalleeAM, true)) 903 return false; 904 unsigned CalleeOp = 0; 905 GlobalValue *GV = 0; 906 if (CalleeAM.Base.Reg != 0) { 907 assert(CalleeAM.GV == 0); 908 CalleeOp = CalleeAM.Base.Reg; 909 } else if (CalleeAM.GV != 0) { 910 assert(CalleeAM.GV != 0); 911 GV = CalleeAM.GV; 912 } else 913 return false; 914 915 // Allow calls which produce i1 results. 916 bool AndToI1 = false; 917 if (RetVT == MVT::i1) { 918 RetVT = MVT::i8; 919 AndToI1 = true; 920 } 921 922 // Deal with call operands first. 923 SmallVector<unsigned, 4> Args; 924 SmallVector<MVT, 4> ArgVTs; 925 SmallVector<ISD::ArgFlagsTy, 4> ArgFlags; 926 Args.reserve(CS.arg_size()); 927 ArgVTs.reserve(CS.arg_size()); 928 ArgFlags.reserve(CS.arg_size()); 929 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 930 i != e; ++i) { 931 unsigned Arg = getRegForValue(*i); 932 if (Arg == 0) 933 return false; 934 ISD::ArgFlagsTy Flags; 935 unsigned AttrInd = i - CS.arg_begin() + 1; 936 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 937 Flags.setSExt(); 938 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 939 Flags.setZExt(); 940 941 // FIXME: Only handle *easy* calls for now. 942 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 943 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 944 CS.paramHasAttr(AttrInd, Attribute::Nest) || 945 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 946 return false; 947 948 const Type *ArgTy = (*i)->getType(); 949 MVT ArgVT; 950 if (!isTypeLegal(ArgTy, TLI, ArgVT)) 951 return false; 952 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 953 Flags.setOrigAlign(OriginalAlignment); 954 955 Args.push_back(Arg); 956 ArgVTs.push_back(ArgVT); 957 ArgFlags.push_back(Flags); 958 } 959 960 // Analyze operands of the call, assigning locations to each operand. 961 SmallVector<CCValAssign, 16> ArgLocs; 962 CCState CCInfo(CC, false, TM, ArgLocs); 963 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC)); 964 965 // Get a count of how many bytes are to be pushed on the stack. 966 unsigned NumBytes = CCInfo.getNextStackOffset(); 967 968 // Issue CALLSEQ_START 969 BuildMI(MBB, TII.get(X86::ADJCALLSTACKDOWN)).addImm(NumBytes); 970 971 // Process argumenet: walk the register/memloc assignments, inserting 972 // copies / loads. 973 SmallVector<unsigned, 4> RegArgs; 974 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 975 CCValAssign &VA = ArgLocs[i]; 976 unsigned Arg = Args[VA.getValNo()]; 977 MVT ArgVT = ArgVTs[VA.getValNo()]; 978 979 // Promote the value if needed. 980 switch (VA.getLocInfo()) { 981 default: assert(0 && "Unknown loc info!"); 982 case CCValAssign::Full: break; 983 case CCValAssign::SExt: { 984 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 985 Arg, ArgVT, Arg); 986 assert(Emitted && "Failed to emit a sext!"); 987 ArgVT = VA.getLocVT(); 988 break; 989 } 990 case CCValAssign::ZExt: { 991 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 992 Arg, ArgVT, Arg); 993 assert(Emitted && "Failed to emit a zext!"); 994 ArgVT = VA.getLocVT(); 995 break; 996 } 997 case CCValAssign::AExt: { 998 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), 999 Arg, ArgVT, Arg); 1000 if (!Emitted) 1001 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), 1002 Arg, ArgVT, Arg); 1003 if (!Emitted) 1004 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), 1005 Arg, ArgVT, Arg); 1006 1007 assert(Emitted && "Failed to emit a aext!"); 1008 ArgVT = VA.getLocVT(); 1009 break; 1010 } 1011 } 1012 1013 if (VA.isRegLoc()) { 1014 TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); 1015 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(), 1016 Arg, RC, RC); 1017 assert(Emitted && "Failed to emit a copy instruction!"); 1018 RegArgs.push_back(VA.getLocReg()); 1019 } else { 1020 unsigned LocMemOffset = VA.getLocMemOffset(); 1021 X86AddressMode AM; 1022 AM.Base.Reg = StackPtr; 1023 AM.Disp = LocMemOffset; 1024 X86FastEmitStore(ArgVT, Arg, AM); 1025 } 1026 } 1027 1028 // ELF / PIC requires GOT in the EBX register before function calls via PLT 1029 // GOT pointer. 1030 if (!Subtarget->is64Bit() && 1031 TM.getRelocationModel() == Reloc::PIC_ && 1032 Subtarget->isPICStyleGOT()) { 1033 TargetRegisterClass *RC = X86::GR32RegisterClass; 1034 unsigned Base = getGlobalBaseReg(); 1035 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC); 1036 assert(Emitted && "Failed to emit a copy instruction!"); 1037 } 1038 1039 // Issue the call. 1040 unsigned CallOpc = CalleeOp 1041 ? (Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r) 1042 : (Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32); 1043 MachineInstrBuilder MIB = CalleeOp 1044 ? BuildMI(MBB, TII.get(CallOpc)).addReg(CalleeOp) 1045 : BuildMI(MBB, TII.get(CallOpc)).addGlobalAddress(GV); 1046 1047 // Add an implicit use GOT pointer in EBX. 1048 if (!Subtarget->is64Bit() && 1049 TM.getRelocationModel() == Reloc::PIC_ && 1050 Subtarget->isPICStyleGOT()) 1051 MIB.addReg(X86::EBX); 1052 1053 // Add implicit physical register uses to the call. 1054 while (!RegArgs.empty()) { 1055 MIB.addReg(RegArgs.back()); 1056 RegArgs.pop_back(); 1057 } 1058 1059 // Issue CALLSEQ_END 1060 BuildMI(MBB, TII.get(X86::ADJCALLSTACKUP)).addImm(NumBytes).addImm(0); 1061 1062 // Now handle call return value (if any). 1063 if (RetVT.getSimpleVT() != MVT::isVoid) { 1064 SmallVector<CCValAssign, 16> RVLocs; 1065 CCState CCInfo(CC, false, TM, RVLocs); 1066 CCInfo.AnalyzeCallResult(RetVT, RetCC_X86); 1067 1068 // Copy all of the result registers out of their specified physreg. 1069 assert(RVLocs.size() == 1 && "Can't handle multi-value calls!"); 1070 MVT CopyVT = RVLocs[0].getValVT(); 1071 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1072 TargetRegisterClass *SrcRC = DstRC; 1073 1074 // If this is a call to a function that returns an fp value on the x87 fp 1075 // stack, but where we prefer to use the value in xmm registers, copy it 1076 // out as F80 and use a truncate to move it from fp stack reg to xmm reg. 1077 if ((RVLocs[0].getLocReg() == X86::ST0 || 1078 RVLocs[0].getLocReg() == X86::ST1) && 1079 isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) { 1080 CopyVT = MVT::f80; 1081 SrcRC = X86::RSTRegisterClass; 1082 DstRC = X86::RFP80RegisterClass; 1083 } 1084 1085 unsigned ResultReg = createResultReg(DstRC); 1086 bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, 1087 RVLocs[0].getLocReg(), DstRC, SrcRC); 1088 assert(Emitted && "Failed to emit a copy instruction!"); 1089 if (CopyVT != RVLocs[0].getValVT()) { 1090 // Round the F80 the right size, which also moves to the appropriate xmm 1091 // register. This is accomplished by storing the F80 value in memory and 1092 // then loading it back. Ewww... 1093 MVT ResVT = RVLocs[0].getValVT(); 1094 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; 1095 unsigned MemSize = ResVT.getSizeInBits()/8; 1096 int FI = MFI.CreateStackObject(MemSize, MemSize); 1097 addFrameReference(BuildMI(MBB, TII.get(Opc)), FI).addReg(ResultReg); 1098 DstRC = ResVT == MVT::f32 1099 ? X86::FR32RegisterClass : X86::FR64RegisterClass; 1100 Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; 1101 ResultReg = createResultReg(DstRC); 1102 addFrameReference(BuildMI(MBB, TII.get(Opc), ResultReg), FI); 1103 } 1104 1105 if (AndToI1) { 1106 // Mask out all but lowest bit for some call which produces an i1. 1107 unsigned AndResult = createResultReg(X86::GR8RegisterClass); 1108 BuildMI(MBB, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); 1109 ResultReg = AndResult; 1110 } 1111 1112 UpdateValueMap(I, ResultReg); 1113 } 1114 1115 return true; 1116} 1117 1118 1119bool 1120X86FastISel::TargetSelectInstruction(Instruction *I) { 1121 switch (I->getOpcode()) { 1122 default: break; 1123 case Instruction::Load: 1124 return X86SelectLoad(I); 1125 case Instruction::Store: 1126 return X86SelectStore(I); 1127 case Instruction::ICmp: 1128 case Instruction::FCmp: 1129 return X86SelectCmp(I); 1130 case Instruction::ZExt: 1131 return X86SelectZExt(I); 1132 case Instruction::Br: 1133 return X86SelectBranch(I); 1134 case Instruction::Call: 1135 return X86SelectCall(I); 1136 case Instruction::LShr: 1137 case Instruction::AShr: 1138 case Instruction::Shl: 1139 return X86SelectShift(I); 1140 case Instruction::Select: 1141 return X86SelectSelect(I); 1142 case Instruction::Trunc: 1143 return X86SelectTrunc(I); 1144 case Instruction::FPExt: 1145 return X86SelectFPExt(I); 1146 case Instruction::FPTrunc: 1147 return X86SelectFPTrunc(I); 1148 } 1149 1150 return false; 1151} 1152 1153unsigned X86FastISel::TargetMaterializeConstant(Constant *C) { 1154 MVT VT; 1155 if (!isTypeLegal(C->getType(), TLI, VT)) 1156 return false; 1157 1158 // Get opcode and regclass of the output for the given load instruction. 1159 unsigned Opc = 0; 1160 const TargetRegisterClass *RC = NULL; 1161 switch (VT.getSimpleVT()) { 1162 default: return false; 1163 case MVT::i8: 1164 Opc = X86::MOV8rm; 1165 RC = X86::GR8RegisterClass; 1166 break; 1167 case MVT::i16: 1168 Opc = X86::MOV16rm; 1169 RC = X86::GR16RegisterClass; 1170 break; 1171 case MVT::i32: 1172 Opc = X86::MOV32rm; 1173 RC = X86::GR32RegisterClass; 1174 break; 1175 case MVT::i64: 1176 // Must be in x86-64 mode. 1177 Opc = X86::MOV64rm; 1178 RC = X86::GR64RegisterClass; 1179 break; 1180 case MVT::f32: 1181 if (Subtarget->hasSSE1()) { 1182 Opc = X86::MOVSSrm; 1183 RC = X86::FR32RegisterClass; 1184 } else { 1185 Opc = X86::LD_Fp32m; 1186 RC = X86::RFP32RegisterClass; 1187 } 1188 break; 1189 case MVT::f64: 1190 if (Subtarget->hasSSE2()) { 1191 Opc = X86::MOVSDrm; 1192 RC = X86::FR64RegisterClass; 1193 } else { 1194 Opc = X86::LD_Fp64m; 1195 RC = X86::RFP64RegisterClass; 1196 } 1197 break; 1198 case MVT::f80: 1199 // No f80 support yet. 1200 return false; 1201 } 1202 1203 // Materialize addresses with LEA instructions. 1204 if (isa<GlobalValue>(C)) { 1205 X86AddressMode AM; 1206 if (X86SelectAddress(C, AM, false)) { 1207 if (TLI.getPointerTy() == MVT::i32) 1208 Opc = X86::LEA32r; 1209 else 1210 Opc = X86::LEA64r; 1211 unsigned ResultReg = createResultReg(RC); 1212 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM); 1213 return ResultReg; 1214 } 1215 return 0; 1216 } 1217 1218 // MachineConstantPool wants an explicit alignment. 1219 unsigned Align = TD.getPreferredTypeAlignmentShift(C->getType()); 1220 if (Align == 0) { 1221 // Alignment of vector types. FIXME! 1222 Align = TD.getABITypeSize(C->getType()); 1223 Align = Log2_64(Align); 1224 } 1225 1226 unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); 1227 unsigned ResultReg = createResultReg(RC); 1228 addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset); 1229 return ResultReg; 1230} 1231 1232unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) { 1233 X86AddressMode AM; 1234 if (!X86SelectAddress(C, AM, false)) 1235 return 0; 1236 unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; 1237 TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); 1238 unsigned ResultReg = createResultReg(RC); 1239 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM); 1240 return ResultReg; 1241} 1242 1243namespace llvm { 1244 llvm::FastISel *X86::createFastISel(MachineFunction &mf, 1245 MachineModuleInfo *mmi, 1246 DenseMap<const Value *, unsigned> &vm, 1247 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm, 1248 DenseMap<const AllocaInst *, int> &am) { 1249 return new X86FastISel(mf, mmi, vm, bm, am); 1250 } 1251} 1252