X86ISelDAGToDAG.cpp revision 6726b6d75a8b679068a58cb954ba97cf9d1690ba
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86RegisterInfo.h" 21#include "X86Subtarget.h" 22#include "X86TargetMachine.h" 23#include "llvm/GlobalValue.h" 24#include "llvm/Instructions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/Support/CFG.h" 27#include "llvm/Type.h" 28#include "llvm/CodeGen/MachineConstantPool.h" 29#include "llvm/CodeGen/MachineFunction.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/CodeGen/SelectionDAGISel.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/Compiler.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/MathExtras.h" 40#include "llvm/Support/raw_ostream.h" 41#include "llvm/ADT/SmallPtrSet.h" 42#include "llvm/ADT/Statistic.h" 43using namespace llvm; 44 45#include "llvm/Support/CommandLine.h" 46static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden); 47 48STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 49 50//===----------------------------------------------------------------------===// 51// Pattern Matcher Implementation 52//===----------------------------------------------------------------------===// 53 54namespace { 55 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 56 /// SDValue's instead of register numbers for the leaves of the matched 57 /// tree. 58 struct X86ISelAddressMode { 59 enum { 60 RegBase, 61 FrameIndexBase 62 } BaseType; 63 64 struct { // This is really a union, discriminated by BaseType! 65 SDValue Reg; 66 int FrameIndex; 67 } Base; 68 69 unsigned Scale; 70 SDValue IndexReg; 71 int32_t Disp; 72 SDValue Segment; 73 GlobalValue *GV; 74 Constant *CP; 75 const char *ES; 76 int JT; 77 unsigned Align; // CP alignment. 78 unsigned char SymbolFlags; // X86II::MO_* 79 80 X86ISelAddressMode() 81 : BaseType(RegBase), Scale(1), IndexReg(), Disp(0), 82 Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0), 83 SymbolFlags(X86II::MO_NO_FLAG) { 84 } 85 86 bool hasSymbolicDisplacement() const { 87 return GV != 0 || CP != 0 || ES != 0 || JT != -1; 88 } 89 90 bool hasBaseOrIndexReg() const { 91 return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0; 92 } 93 94 /// isRIPRelative - Return true if this addressing mode is already RIP 95 /// relative. 96 bool isRIPRelative() const { 97 if (BaseType != RegBase) return false; 98 if (RegisterSDNode *RegNode = 99 dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode())) 100 return RegNode->getReg() == X86::RIP; 101 return false; 102 } 103 104 void setBaseReg(SDValue Reg) { 105 BaseType = RegBase; 106 Base.Reg = Reg; 107 } 108 109 void dump() { 110 errs() << "X86ISelAddressMode " << this << '\n'; 111 errs() << "Base.Reg "; 112 if (Base.Reg.getNode() != 0) 113 Base.Reg.getNode()->dump(); 114 else 115 errs() << "nul"; 116 errs() << " Base.FrameIndex " << Base.FrameIndex << '\n' 117 << " Scale" << Scale << '\n' 118 << "IndexReg "; 119 if (IndexReg.getNode() != 0) 120 IndexReg.getNode()->dump(); 121 else 122 errs() << "nul"; 123 errs() << " Disp " << Disp << '\n' 124 << "GV "; 125 if (GV) 126 GV->dump(); 127 else 128 errs() << "nul"; 129 errs() << " CP "; 130 if (CP) 131 CP->dump(); 132 else 133 errs() << "nul"; 134 errs() << '\n' 135 << "ES "; 136 if (ES) 137 errs() << ES; 138 else 139 errs() << "nul"; 140 errs() << " JT" << JT << " Align" << Align << '\n'; 141 } 142 }; 143} 144 145namespace { 146 //===--------------------------------------------------------------------===// 147 /// ISel - X86 specific code to select X86 machine instructions for 148 /// SelectionDAG operations. 149 /// 150 class X86DAGToDAGISel : public SelectionDAGISel { 151 /// X86Lowering - This object fully describes how to lower LLVM code to an 152 /// X86-specific SelectionDAG. 153 X86TargetLowering &X86Lowering; 154 155 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 156 /// make the right decision when generating code for different targets. 157 const X86Subtarget *Subtarget; 158 159 /// OptForSize - If true, selector should try to optimize for code size 160 /// instead of performance. 161 bool OptForSize; 162 163 public: 164 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 165 : SelectionDAGISel(tm, OptLevel), 166 X86Lowering(*tm.getTargetLowering()), 167 Subtarget(&tm.getSubtarget<X86Subtarget>()), 168 OptForSize(false) {} 169 170 virtual const char *getPassName() const { 171 return "X86 DAG->DAG Instruction Selection"; 172 } 173 174 /// InstructionSelect - This callback is invoked by 175 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. 176 virtual void InstructionSelect(); 177 178 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF); 179 180 virtual 181 bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const; 182 183// Include the pieces autogenerated from the target description. 184#include "X86GenDAGISel.inc" 185 186 private: 187 SDNode *Select(SDValue N); 188 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 189 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT); 190 191 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM); 192 bool MatchLoad(SDValue N, X86ISelAddressMode &AM); 193 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 194 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 195 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 196 unsigned Depth); 197 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 198 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base, 199 SDValue &Scale, SDValue &Index, SDValue &Disp, 200 SDValue &Segment); 201 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base, 202 SDValue &Scale, SDValue &Index, SDValue &Disp); 203 bool SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base, 204 SDValue &Scale, SDValue &Index, SDValue &Disp); 205 bool SelectScalarSSELoad(SDValue Op, SDValue Pred, 206 SDValue N, SDValue &Base, SDValue &Scale, 207 SDValue &Index, SDValue &Disp, 208 SDValue &Segment, 209 SDValue &InChain, SDValue &OutChain); 210 bool TryFoldLoad(SDValue P, SDValue N, 211 SDValue &Base, SDValue &Scale, 212 SDValue &Index, SDValue &Disp, 213 SDValue &Segment); 214 void PreprocessForRMW(); 215 void PreprocessForFPConvert(); 216 217 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 218 /// inline asm expressions. 219 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 220 char ConstraintCode, 221 std::vector<SDValue> &OutOps); 222 223 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 224 225 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 226 SDValue &Scale, SDValue &Index, 227 SDValue &Disp, SDValue &Segment) { 228 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 229 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) : 230 AM.Base.Reg; 231 Scale = getI8Imm(AM.Scale); 232 Index = AM.IndexReg; 233 // These are 32-bit even in 64-bit mode since RIP relative offset 234 // is 32-bit. 235 if (AM.GV) 236 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp, 237 AM.SymbolFlags); 238 else if (AM.CP) 239 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 240 AM.Align, AM.Disp, AM.SymbolFlags); 241 else if (AM.ES) 242 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 243 else if (AM.JT != -1) 244 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 245 else 246 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 247 248 if (AM.Segment.getNode()) 249 Segment = AM.Segment; 250 else 251 Segment = CurDAG->getRegister(0, MVT::i32); 252 } 253 254 /// getI8Imm - Return a target constant with the specified value, of type 255 /// i8. 256 inline SDValue getI8Imm(unsigned Imm) { 257 return CurDAG->getTargetConstant(Imm, MVT::i8); 258 } 259 260 /// getI16Imm - Return a target constant with the specified value, of type 261 /// i16. 262 inline SDValue getI16Imm(unsigned Imm) { 263 return CurDAG->getTargetConstant(Imm, MVT::i16); 264 } 265 266 /// getI32Imm - Return a target constant with the specified value, of type 267 /// i32. 268 inline SDValue getI32Imm(unsigned Imm) { 269 return CurDAG->getTargetConstant(Imm, MVT::i32); 270 } 271 272 /// getGlobalBaseReg - Return an SDNode that returns the value of 273 /// the global base register. Output instructions required to 274 /// initialize the global base register, if necessary. 275 /// 276 SDNode *getGlobalBaseReg(); 277 278 /// getTargetMachine - Return a reference to the TargetMachine, casted 279 /// to the target-specific type. 280 const X86TargetMachine &getTargetMachine() { 281 return static_cast<const X86TargetMachine &>(TM); 282 } 283 284 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 285 /// to the target-specific type. 286 const X86InstrInfo *getInstrInfo() { 287 return getTargetMachine().getInstrInfo(); 288 } 289 290#ifndef NDEBUG 291 unsigned Indent; 292#endif 293 }; 294} 295 296 297bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U, 298 SDNode *Root) const { 299 if (OptLevel == CodeGenOpt::None) return false; 300 301 if (U == Root) 302 switch (U->getOpcode()) { 303 default: break; 304 case ISD::ADD: 305 case ISD::ADDC: 306 case ISD::ADDE: 307 case ISD::AND: 308 case ISD::OR: 309 case ISD::XOR: { 310 SDValue Op1 = U->getOperand(1); 311 312 // If the other operand is a 8-bit immediate we should fold the immediate 313 // instead. This reduces code size. 314 // e.g. 315 // movl 4(%esp), %eax 316 // addl $4, %eax 317 // vs. 318 // movl $4, %eax 319 // addl 4(%esp), %eax 320 // The former is 2 bytes shorter. In case where the increment is 1, then 321 // the saving can be 4 bytes (by using incl %eax). 322 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 323 if (Imm->getAPIntValue().isSignedIntN(8)) 324 return false; 325 326 // If the other operand is a TLS address, we should fold it instead. 327 // This produces 328 // movl %gs:0, %eax 329 // leal i@NTPOFF(%eax), %eax 330 // instead of 331 // movl $i@NTPOFF, %eax 332 // addl %gs:0, %eax 333 // if the block also has an access to a second TLS address this will save 334 // a load. 335 // FIXME: This is probably also true for non TLS addresses. 336 if (Op1.getOpcode() == X86ISD::Wrapper) { 337 SDValue Val = Op1.getOperand(0); 338 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 339 return false; 340 } 341 } 342 } 343 344 // Proceed to 'generic' cycle finder code 345 return SelectionDAGISel::IsLegalAndProfitableToFold(N, U, Root); 346} 347 348/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand 349/// and move load below the TokenFactor. Replace store's chain operand with 350/// load's chain result. 351static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load, 352 SDValue Store, SDValue TF) { 353 SmallVector<SDValue, 4> Ops; 354 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i) 355 if (Load.getNode() == TF.getOperand(i).getNode()) 356 Ops.push_back(Load.getOperand(0)); 357 else 358 Ops.push_back(TF.getOperand(i)); 359 SDValue NewTF = CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size()); 360 SDValue NewLoad = CurDAG->UpdateNodeOperands(Load, NewTF, 361 Load.getOperand(1), 362 Load.getOperand(2)); 363 CurDAG->UpdateNodeOperands(Store, NewLoad.getValue(1), Store.getOperand(1), 364 Store.getOperand(2), Store.getOperand(3)); 365} 366 367/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG. The 368/// chain produced by the load must only be used by the store's chain operand, 369/// otherwise this may produce a cycle in the DAG. 370/// 371static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address, 372 SDValue &Load) { 373 if (N.getOpcode() == ISD::BIT_CONVERT) 374 N = N.getOperand(0); 375 376 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 377 if (!LD || LD->isVolatile()) 378 return false; 379 if (LD->getAddressingMode() != ISD::UNINDEXED) 380 return false; 381 382 ISD::LoadExtType ExtType = LD->getExtensionType(); 383 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD) 384 return false; 385 386 if (N.hasOneUse() && 387 LD->hasNUsesOfValue(1, 1) && 388 N.getOperand(1) == Address && 389 LD->isOperandOf(Chain.getNode())) { 390 Load = N; 391 return true; 392 } 393 return false; 394} 395 396/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain 397/// operand and move load below the call's chain operand. 398static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load, 399 SDValue Call, SDValue CallSeqStart) { 400 SmallVector<SDValue, 8> Ops; 401 SDValue Chain = CallSeqStart.getOperand(0); 402 if (Chain.getNode() == Load.getNode()) 403 Ops.push_back(Load.getOperand(0)); 404 else { 405 assert(Chain.getOpcode() == ISD::TokenFactor && 406 "Unexpected CallSeqStart chain operand"); 407 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 408 if (Chain.getOperand(i).getNode() == Load.getNode()) 409 Ops.push_back(Load.getOperand(0)); 410 else 411 Ops.push_back(Chain.getOperand(i)); 412 SDValue NewChain = 413 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 414 MVT::Other, &Ops[0], Ops.size()); 415 Ops.clear(); 416 Ops.push_back(NewChain); 417 } 418 for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i) 419 Ops.push_back(CallSeqStart.getOperand(i)); 420 CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size()); 421 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0), 422 Load.getOperand(1), Load.getOperand(2)); 423 Ops.clear(); 424 Ops.push_back(SDValue(Load.getNode(), 1)); 425 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i) 426 Ops.push_back(Call.getOperand(i)); 427 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size()); 428} 429 430/// isCalleeLoad - Return true if call address is a load and it can be 431/// moved below CALLSEQ_START and the chains leading up to the call. 432/// Return the CALLSEQ_START by reference as a second output. 433static bool isCalleeLoad(SDValue Callee, SDValue &Chain) { 434 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 435 return false; 436 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 437 if (!LD || 438 LD->isVolatile() || 439 LD->getAddressingMode() != ISD::UNINDEXED || 440 LD->getExtensionType() != ISD::NON_EXTLOAD) 441 return false; 442 443 // Now let's find the callseq_start. 444 while (Chain.getOpcode() != ISD::CALLSEQ_START) { 445 if (!Chain.hasOneUse()) 446 return false; 447 Chain = Chain.getOperand(0); 448 } 449 450 if (Chain.getOperand(0).getNode() == Callee.getNode()) 451 return true; 452 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 453 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 454 Callee.getValue(1).hasOneUse()) 455 return true; 456 return false; 457} 458 459 460/// PreprocessForRMW - Preprocess the DAG to make instruction selection better. 461/// This is only run if not in -O0 mode. 462/// This allows the instruction selector to pick more read-modify-write 463/// instructions. This is a common case: 464/// 465/// [Load chain] 466/// ^ 467/// | 468/// [Load] 469/// ^ ^ 470/// | | 471/// / \- 472/// / | 473/// [TokenFactor] [Op] 474/// ^ ^ 475/// | | 476/// \ / 477/// \ / 478/// [Store] 479/// 480/// The fact the store's chain operand != load's chain will prevent the 481/// (store (op (load))) instruction from being selected. We can transform it to: 482/// 483/// [Load chain] 484/// ^ 485/// | 486/// [TokenFactor] 487/// ^ 488/// | 489/// [Load] 490/// ^ ^ 491/// | | 492/// | \- 493/// | | 494/// | [Op] 495/// | ^ 496/// | | 497/// \ / 498/// \ / 499/// [Store] 500void X86DAGToDAGISel::PreprocessForRMW() { 501 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 502 E = CurDAG->allnodes_end(); I != E; ++I) { 503 if (I->getOpcode() == X86ISD::CALL) { 504 /// Also try moving call address load from outside callseq_start to just 505 /// before the call to allow it to be folded. 506 /// 507 /// [Load chain] 508 /// ^ 509 /// | 510 /// [Load] 511 /// ^ ^ 512 /// | | 513 /// / \-- 514 /// / | 515 ///[CALLSEQ_START] | 516 /// ^ | 517 /// | | 518 /// [LOAD/C2Reg] | 519 /// | | 520 /// \ / 521 /// \ / 522 /// [CALL] 523 SDValue Chain = I->getOperand(0); 524 SDValue Load = I->getOperand(1); 525 if (!isCalleeLoad(Load, Chain)) 526 continue; 527 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain); 528 ++NumLoadMoved; 529 continue; 530 } 531 532 if (!ISD::isNON_TRUNCStore(I)) 533 continue; 534 SDValue Chain = I->getOperand(0); 535 536 if (Chain.getNode()->getOpcode() != ISD::TokenFactor) 537 continue; 538 539 SDValue N1 = I->getOperand(1); 540 SDValue N2 = I->getOperand(2); 541 if ((N1.getValueType().isFloatingPoint() && 542 !N1.getValueType().isVector()) || 543 !N1.hasOneUse()) 544 continue; 545 546 bool RModW = false; 547 SDValue Load; 548 unsigned Opcode = N1.getNode()->getOpcode(); 549 switch (Opcode) { 550 case ISD::ADD: 551 case ISD::MUL: 552 case ISD::AND: 553 case ISD::OR: 554 case ISD::XOR: 555 case ISD::ADDC: 556 case ISD::ADDE: 557 case ISD::VECTOR_SHUFFLE: { 558 SDValue N10 = N1.getOperand(0); 559 SDValue N11 = N1.getOperand(1); 560 RModW = isRMWLoad(N10, Chain, N2, Load); 561 if (!RModW) 562 RModW = isRMWLoad(N11, Chain, N2, Load); 563 break; 564 } 565 case ISD::SUB: 566 case ISD::SHL: 567 case ISD::SRA: 568 case ISD::SRL: 569 case ISD::ROTL: 570 case ISD::ROTR: 571 case ISD::SUBC: 572 case ISD::SUBE: 573 case X86ISD::SHLD: 574 case X86ISD::SHRD: { 575 SDValue N10 = N1.getOperand(0); 576 RModW = isRMWLoad(N10, Chain, N2, Load); 577 break; 578 } 579 } 580 581 if (RModW) { 582 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain); 583 ++NumLoadMoved; 584 } 585 } 586} 587 588 589/// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend 590/// nodes that target the FP stack to be store and load to the stack. This is a 591/// gross hack. We would like to simply mark these as being illegal, but when 592/// we do that, legalize produces these when it expands calls, then expands 593/// these in the same legalize pass. We would like dag combine to be able to 594/// hack on these between the call expansion and the node legalization. As such 595/// this pass basically does "really late" legalization of these inline with the 596/// X86 isel pass. 597void X86DAGToDAGISel::PreprocessForFPConvert() { 598 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 599 E = CurDAG->allnodes_end(); I != E; ) { 600 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 601 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 602 continue; 603 604 // If the source and destination are SSE registers, then this is a legal 605 // conversion that should not be lowered. 606 EVT SrcVT = N->getOperand(0).getValueType(); 607 EVT DstVT = N->getValueType(0); 608 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 609 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 610 if (SrcIsSSE && DstIsSSE) 611 continue; 612 613 if (!SrcIsSSE && !DstIsSSE) { 614 // If this is an FPStack extension, it is a noop. 615 if (N->getOpcode() == ISD::FP_EXTEND) 616 continue; 617 // If this is a value-preserving FPStack truncation, it is a noop. 618 if (N->getConstantOperandVal(1)) 619 continue; 620 } 621 622 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 623 // FPStack has extload and truncstore. SSE can fold direct loads into other 624 // operations. Based on this, decide what we want to do. 625 EVT MemVT; 626 if (N->getOpcode() == ISD::FP_ROUND) 627 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 628 else 629 MemVT = SrcIsSSE ? SrcVT : DstVT; 630 631 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 632 DebugLoc dl = N->getDebugLoc(); 633 634 // FIXME: optimize the case where the src/dest is a load or store? 635 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 636 N->getOperand(0), 637 MemTmp, NULL, 0, MemVT); 638 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 639 NULL, 0, MemVT); 640 641 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 642 // extload we created. This will cause general havok on the dag because 643 // anything below the conversion could be folded into other existing nodes. 644 // To avoid invalidating 'I', back it up to the convert node. 645 --I; 646 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 647 648 // Now that we did that, the node is dead. Increment the iterator to the 649 // next node to process, then delete N. 650 ++I; 651 CurDAG->DeleteNode(N); 652 } 653} 654 655/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel 656/// when it has created a SelectionDAG for us to codegen. 657void X86DAGToDAGISel::InstructionSelect() { 658 const Function *F = MF->getFunction(); 659 OptForSize = F->hasFnAttr(Attribute::OptimizeForSize); 660 661 DEBUG(BB->dump()); 662 if (OptLevel != CodeGenOpt::None) 663 PreprocessForRMW(); 664 665 // FIXME: This should only happen when not compiled with -O0. 666 PreprocessForFPConvert(); 667 668 // Codegen the basic block. 669#ifndef NDEBUG 670 DEBUG(errs() << "===== Instruction selection begins:\n"); 671 Indent = 0; 672#endif 673 SelectRoot(*CurDAG); 674#ifndef NDEBUG 675 DEBUG(errs() << "===== Instruction selection ends:\n"); 676#endif 677 678 CurDAG->RemoveDeadNodes(); 679} 680 681/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 682/// the main function. 683void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 684 MachineFrameInfo *MFI) { 685 const TargetInstrInfo *TII = TM.getInstrInfo(); 686 if (Subtarget->isTargetCygMing()) 687 BuildMI(BB, DebugLoc::getUnknownLoc(), 688 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main"); 689} 690 691void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) { 692 // If this is main, emit special code for main. 693 MachineBasicBlock *BB = MF.begin(); 694 if (Fn.hasExternalLinkage() && Fn.getName() == "main") 695 EmitSpecialCodeForMain(BB, MF.getFrameInfo()); 696} 697 698 699bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N, 700 X86ISelAddressMode &AM) { 701 assert(N.getOpcode() == X86ISD::SegmentBaseAddress); 702 SDValue Segment = N.getOperand(0); 703 704 if (AM.Segment.getNode() == 0) { 705 AM.Segment = Segment; 706 return false; 707 } 708 709 return true; 710} 711 712bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) { 713 // This optimization is valid because the GNU TLS model defines that 714 // gs:0 (or fs:0 on X86-64) contains its own address. 715 // For more information see http://people.redhat.com/drepper/tls.pdf 716 717 SDValue Address = N.getOperand(1); 718 if (Address.getOpcode() == X86ISD::SegmentBaseAddress && 719 !MatchSegmentBaseAddress (Address, AM)) 720 return false; 721 722 return true; 723} 724 725/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 726/// into an addressing mode. These wrap things that will resolve down into a 727/// symbol reference. If no match is possible, this returns true, otherwise it 728/// returns false. 729bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 730 // If the addressing mode already has a symbol as the displacement, we can 731 // never match another symbol. 732 if (AM.hasSymbolicDisplacement()) 733 return true; 734 735 SDValue N0 = N.getOperand(0); 736 CodeModel::Model M = TM.getCodeModel(); 737 738 // Handle X86-64 rip-relative addresses. We check this before checking direct 739 // folding because RIP is preferable to non-RIP accesses. 740 if (Subtarget->is64Bit() && 741 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 742 // they cannot be folded into immediate fields. 743 // FIXME: This can be improved for kernel and other models? 744 (M == CodeModel::Small || M == CodeModel::Kernel) && 745 // Base and index reg must be 0 in order to use %rip as base and lowering 746 // must allow RIP. 747 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) { 748 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 749 int64_t Offset = AM.Disp + G->getOffset(); 750 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true; 751 AM.GV = G->getGlobal(); 752 AM.Disp = Offset; 753 AM.SymbolFlags = G->getTargetFlags(); 754 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 755 int64_t Offset = AM.Disp + CP->getOffset(); 756 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true; 757 AM.CP = CP->getConstVal(); 758 AM.Align = CP->getAlignment(); 759 AM.Disp = Offset; 760 AM.SymbolFlags = CP->getTargetFlags(); 761 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 762 AM.ES = S->getSymbol(); 763 AM.SymbolFlags = S->getTargetFlags(); 764 } else { 765 JumpTableSDNode *J = cast<JumpTableSDNode>(N0); 766 AM.JT = J->getIndex(); 767 AM.SymbolFlags = J->getTargetFlags(); 768 } 769 770 if (N.getOpcode() == X86ISD::WrapperRIP) 771 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 772 return false; 773 } 774 775 // Handle the case when globals fit in our immediate field: This is true for 776 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit 777 // mode, this results in a non-RIP-relative computation. 778 if (!Subtarget->is64Bit() || 779 ((M == CodeModel::Small || M == CodeModel::Kernel) && 780 TM.getRelocationModel() == Reloc::Static)) { 781 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 782 AM.GV = G->getGlobal(); 783 AM.Disp += G->getOffset(); 784 AM.SymbolFlags = G->getTargetFlags(); 785 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 786 AM.CP = CP->getConstVal(); 787 AM.Align = CP->getAlignment(); 788 AM.Disp += CP->getOffset(); 789 AM.SymbolFlags = CP->getTargetFlags(); 790 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 791 AM.ES = S->getSymbol(); 792 AM.SymbolFlags = S->getTargetFlags(); 793 } else { 794 JumpTableSDNode *J = cast<JumpTableSDNode>(N0); 795 AM.JT = J->getIndex(); 796 AM.SymbolFlags = J->getTargetFlags(); 797 } 798 return false; 799 } 800 801 return true; 802} 803 804/// MatchAddress - Add the specified node to the specified addressing mode, 805/// returning true if it cannot be done. This just pattern matches for the 806/// addressing mode. 807bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 808 if (MatchAddressRecursively(N, AM, 0)) 809 return true; 810 811 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 812 // a smaller encoding and avoids a scaled-index. 813 if (AM.Scale == 2 && 814 AM.BaseType == X86ISelAddressMode::RegBase && 815 AM.Base.Reg.getNode() == 0) { 816 AM.Base.Reg = AM.IndexReg; 817 AM.Scale = 1; 818 } 819 820 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 821 // because it has a smaller encoding. 822 // TODO: Which other code models can use this? 823 if (TM.getCodeModel() == CodeModel::Small && 824 Subtarget->is64Bit() && 825 AM.Scale == 1 && 826 AM.BaseType == X86ISelAddressMode::RegBase && 827 AM.Base.Reg.getNode() == 0 && 828 AM.IndexReg.getNode() == 0 && 829 AM.SymbolFlags == X86II::MO_NO_FLAG && 830 AM.hasSymbolicDisplacement()) 831 AM.Base.Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 832 833 return false; 834} 835 836bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 837 unsigned Depth) { 838 bool is64Bit = Subtarget->is64Bit(); 839 DebugLoc dl = N.getDebugLoc(); 840 DEBUG({ 841 errs() << "MatchAddress: "; 842 AM.dump(); 843 }); 844 // Limit recursion. 845 if (Depth > 5) 846 return MatchAddressBase(N, AM); 847 848 CodeModel::Model M = TM.getCodeModel(); 849 850 // If this is already a %rip relative address, we can only merge immediates 851 // into it. Instead of handling this in every case, we handle it here. 852 // RIP relative addressing: %rip + 32-bit displacement! 853 if (AM.isRIPRelative()) { 854 // FIXME: JumpTable and ExternalSymbol address currently don't like 855 // displacements. It isn't very important, but this should be fixed for 856 // consistency. 857 if (!AM.ES && AM.JT != -1) return true; 858 859 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) { 860 int64_t Val = AM.Disp + Cst->getSExtValue(); 861 if (X86::isOffsetSuitableForCodeModel(Val, M, 862 AM.hasSymbolicDisplacement())) { 863 AM.Disp = Val; 864 return false; 865 } 866 } 867 return true; 868 } 869 870 switch (N.getOpcode()) { 871 default: break; 872 case ISD::Constant: { 873 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 874 if (!is64Bit || 875 X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M, 876 AM.hasSymbolicDisplacement())) { 877 AM.Disp += Val; 878 return false; 879 } 880 break; 881 } 882 883 case X86ISD::SegmentBaseAddress: 884 if (!MatchSegmentBaseAddress(N, AM)) 885 return false; 886 break; 887 888 case X86ISD::Wrapper: 889 case X86ISD::WrapperRIP: 890 if (!MatchWrapper(N, AM)) 891 return false; 892 break; 893 894 case ISD::LOAD: 895 if (!MatchLoad(N, AM)) 896 return false; 897 break; 898 899 case ISD::FrameIndex: 900 if (AM.BaseType == X86ISelAddressMode::RegBase 901 && AM.Base.Reg.getNode() == 0) { 902 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 903 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 904 return false; 905 } 906 break; 907 908 case ISD::SHL: 909 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 910 break; 911 912 if (ConstantSDNode 913 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 914 unsigned Val = CN->getZExtValue(); 915 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 916 // that the base operand remains free for further matching. If 917 // the base doesn't end up getting used, a post-processing step 918 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 919 if (Val == 1 || Val == 2 || Val == 3) { 920 AM.Scale = 1 << Val; 921 SDValue ShVal = N.getNode()->getOperand(0); 922 923 // Okay, we know that we have a scale by now. However, if the scaled 924 // value is an add of something and a constant, we can fold the 925 // constant into the disp field here. 926 if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() && 927 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) { 928 AM.IndexReg = ShVal.getNode()->getOperand(0); 929 ConstantSDNode *AddVal = 930 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 931 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val); 932 if (!is64Bit || 933 X86::isOffsetSuitableForCodeModel(Disp, M, 934 AM.hasSymbolicDisplacement())) 935 AM.Disp = Disp; 936 else 937 AM.IndexReg = ShVal; 938 } else { 939 AM.IndexReg = ShVal; 940 } 941 return false; 942 } 943 break; 944 } 945 946 case ISD::SMUL_LOHI: 947 case ISD::UMUL_LOHI: 948 // A mul_lohi where we need the low part can be folded as a plain multiply. 949 if (N.getResNo() != 0) break; 950 // FALL THROUGH 951 case ISD::MUL: 952 case X86ISD::MUL_IMM: 953 // X*[3,5,9] -> X+X*[2,4,8] 954 if (AM.BaseType == X86ISelAddressMode::RegBase && 955 AM.Base.Reg.getNode() == 0 && 956 AM.IndexReg.getNode() == 0) { 957 if (ConstantSDNode 958 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 959 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 960 CN->getZExtValue() == 9) { 961 AM.Scale = unsigned(CN->getZExtValue())-1; 962 963 SDValue MulVal = N.getNode()->getOperand(0); 964 SDValue Reg; 965 966 // Okay, we know that we have a scale by now. However, if the scaled 967 // value is an add of something and a constant, we can fold the 968 // constant into the disp field here. 969 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 970 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 971 Reg = MulVal.getNode()->getOperand(0); 972 ConstantSDNode *AddVal = 973 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 974 uint64_t Disp = AM.Disp + AddVal->getSExtValue() * 975 CN->getZExtValue(); 976 if (!is64Bit || 977 X86::isOffsetSuitableForCodeModel(Disp, M, 978 AM.hasSymbolicDisplacement())) 979 AM.Disp = Disp; 980 else 981 Reg = N.getNode()->getOperand(0); 982 } else { 983 Reg = N.getNode()->getOperand(0); 984 } 985 986 AM.IndexReg = AM.Base.Reg = Reg; 987 return false; 988 } 989 } 990 break; 991 992 case ISD::SUB: { 993 // Given A-B, if A can be completely folded into the address and 994 // the index field with the index field unused, use -B as the index. 995 // This is a win if a has multiple parts that can be folded into 996 // the address. Also, this saves a mov if the base register has 997 // other uses, since it avoids a two-address sub instruction, however 998 // it costs an additional mov if the index register has other uses. 999 1000 // Test if the LHS of the sub can be folded. 1001 X86ISelAddressMode Backup = AM; 1002 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1003 AM = Backup; 1004 break; 1005 } 1006 // Test if the index field is free for use. 1007 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1008 AM = Backup; 1009 break; 1010 } 1011 int Cost = 0; 1012 SDValue RHS = N.getNode()->getOperand(1); 1013 // If the RHS involves a register with multiple uses, this 1014 // transformation incurs an extra mov, due to the neg instruction 1015 // clobbering its operand. 1016 if (!RHS.getNode()->hasOneUse() || 1017 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1018 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1019 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1020 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1021 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1022 ++Cost; 1023 // If the base is a register with multiple uses, this 1024 // transformation may save a mov. 1025 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1026 AM.Base.Reg.getNode() && 1027 !AM.Base.Reg.getNode()->hasOneUse()) || 1028 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1029 --Cost; 1030 // If the folded LHS was interesting, this transformation saves 1031 // address arithmetic. 1032 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1033 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1034 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1035 --Cost; 1036 // If it doesn't look like it may be an overall win, don't do it. 1037 if (Cost >= 0) { 1038 AM = Backup; 1039 break; 1040 } 1041 1042 // Ok, the transformation is legal and appears profitable. Go for it. 1043 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1044 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1045 AM.IndexReg = Neg; 1046 AM.Scale = 1; 1047 1048 // Insert the new nodes into the topological ordering. 1049 if (Zero.getNode()->getNodeId() == -1 || 1050 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1051 CurDAG->RepositionNode(N.getNode(), Zero.getNode()); 1052 Zero.getNode()->setNodeId(N.getNode()->getNodeId()); 1053 } 1054 if (Neg.getNode()->getNodeId() == -1 || 1055 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1056 CurDAG->RepositionNode(N.getNode(), Neg.getNode()); 1057 Neg.getNode()->setNodeId(N.getNode()->getNodeId()); 1058 } 1059 return false; 1060 } 1061 1062 case ISD::ADD: { 1063 X86ISelAddressMode Backup = AM; 1064 if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1) && 1065 !MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1)) 1066 return false; 1067 AM = Backup; 1068 if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1) && 1069 !MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) 1070 return false; 1071 AM = Backup; 1072 1073 // If we couldn't fold both operands into the address at the same time, 1074 // see if we can just put each operand into a register and fold at least 1075 // the add. 1076 if (AM.BaseType == X86ISelAddressMode::RegBase && 1077 !AM.Base.Reg.getNode() && 1078 !AM.IndexReg.getNode()) { 1079 AM.Base.Reg = N.getNode()->getOperand(0); 1080 AM.IndexReg = N.getNode()->getOperand(1); 1081 AM.Scale = 1; 1082 return false; 1083 } 1084 break; 1085 } 1086 1087 case ISD::OR: 1088 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1089 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1090 X86ISelAddressMode Backup = AM; 1091 uint64_t Offset = CN->getSExtValue(); 1092 // Start with the LHS as an addr mode. 1093 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1094 // Address could not have picked a GV address for the displacement. 1095 AM.GV == NULL && 1096 // On x86-64, the resultant disp must fit in 32-bits. 1097 (!is64Bit || 1098 X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M, 1099 AM.hasSymbolicDisplacement())) && 1100 // Check to see if the LHS & C is zero. 1101 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) { 1102 AM.Disp += Offset; 1103 return false; 1104 } 1105 AM = Backup; 1106 } 1107 break; 1108 1109 case ISD::AND: { 1110 // Perform some heroic transforms on an and of a constant-count shift 1111 // with a constant to enable use of the scaled offset field. 1112 1113 SDValue Shift = N.getOperand(0); 1114 if (Shift.getNumOperands() != 2) break; 1115 1116 // Scale must not be used already. 1117 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1118 1119 SDValue X = Shift.getOperand(0); 1120 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1121 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 1122 if (!C1 || !C2) break; 1123 1124 // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This 1125 // allows us to convert the shift and and into an h-register extract and 1126 // a scaled index. 1127 if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) { 1128 unsigned ScaleLog = 8 - C1->getZExtValue(); 1129 if (ScaleLog > 0 && ScaleLog < 4 && 1130 C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) { 1131 SDValue Eight = CurDAG->getConstant(8, MVT::i8); 1132 SDValue Mask = CurDAG->getConstant(0xff, N.getValueType()); 1133 SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(), 1134 X, Eight); 1135 SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(), 1136 Srl, Mask); 1137 SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8); 1138 SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(), 1139 And, ShlCount); 1140 1141 // Insert the new nodes into the topological ordering. 1142 if (Eight.getNode()->getNodeId() == -1 || 1143 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1144 CurDAG->RepositionNode(X.getNode(), Eight.getNode()); 1145 Eight.getNode()->setNodeId(X.getNode()->getNodeId()); 1146 } 1147 if (Mask.getNode()->getNodeId() == -1 || 1148 Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1149 CurDAG->RepositionNode(X.getNode(), Mask.getNode()); 1150 Mask.getNode()->setNodeId(X.getNode()->getNodeId()); 1151 } 1152 if (Srl.getNode()->getNodeId() == -1 || 1153 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 1154 CurDAG->RepositionNode(Shift.getNode(), Srl.getNode()); 1155 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId()); 1156 } 1157 if (And.getNode()->getNodeId() == -1 || 1158 And.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1159 CurDAG->RepositionNode(N.getNode(), And.getNode()); 1160 And.getNode()->setNodeId(N.getNode()->getNodeId()); 1161 } 1162 if (ShlCount.getNode()->getNodeId() == -1 || 1163 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1164 CurDAG->RepositionNode(X.getNode(), ShlCount.getNode()); 1165 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId()); 1166 } 1167 if (Shl.getNode()->getNodeId() == -1 || 1168 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1169 CurDAG->RepositionNode(N.getNode(), Shl.getNode()); 1170 Shl.getNode()->setNodeId(N.getNode()->getNodeId()); 1171 } 1172 CurDAG->ReplaceAllUsesWith(N, Shl); 1173 AM.IndexReg = And; 1174 AM.Scale = (1 << ScaleLog); 1175 return false; 1176 } 1177 } 1178 1179 // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this 1180 // allows us to fold the shift into this addressing mode. 1181 if (Shift.getOpcode() != ISD::SHL) break; 1182 1183 // Not likely to be profitable if either the AND or SHIFT node has more 1184 // than one use (unless all uses are for address computation). Besides, 1185 // isel mechanism requires their node ids to be reused. 1186 if (!N.hasOneUse() || !Shift.hasOneUse()) 1187 break; 1188 1189 // Verify that the shift amount is something we can fold. 1190 unsigned ShiftCst = C1->getZExtValue(); 1191 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3) 1192 break; 1193 1194 // Get the new AND mask, this folds to a constant. 1195 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(), 1196 SDValue(C2, 0), SDValue(C1, 0)); 1197 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X, 1198 NewANDMask); 1199 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(), 1200 NewAND, SDValue(C1, 0)); 1201 1202 // Insert the new nodes into the topological ordering. 1203 if (C1->getNodeId() > X.getNode()->getNodeId()) { 1204 CurDAG->RepositionNode(X.getNode(), C1); 1205 C1->setNodeId(X.getNode()->getNodeId()); 1206 } 1207 if (NewANDMask.getNode()->getNodeId() == -1 || 1208 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1209 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode()); 1210 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId()); 1211 } 1212 if (NewAND.getNode()->getNodeId() == -1 || 1213 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 1214 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode()); 1215 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId()); 1216 } 1217 if (NewSHIFT.getNode()->getNodeId() == -1 || 1218 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1219 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode()); 1220 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId()); 1221 } 1222 1223 CurDAG->ReplaceAllUsesWith(N, NewSHIFT); 1224 1225 AM.Scale = 1 << ShiftCst; 1226 AM.IndexReg = NewAND; 1227 return false; 1228 } 1229 } 1230 1231 return MatchAddressBase(N, AM); 1232} 1233 1234/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1235/// specified addressing mode without any further recursion. 1236bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1237 // Is the base register already occupied? 1238 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) { 1239 // If so, check to see if the scale index register is set. 1240 if (AM.IndexReg.getNode() == 0) { 1241 AM.IndexReg = N; 1242 AM.Scale = 1; 1243 return false; 1244 } 1245 1246 // Otherwise, we cannot select it. 1247 return true; 1248 } 1249 1250 // Default, generate it as a register. 1251 AM.BaseType = X86ISelAddressMode::RegBase; 1252 AM.Base.Reg = N; 1253 return false; 1254} 1255 1256/// SelectAddr - returns true if it is able pattern match an addressing mode. 1257/// It returns the operands which make up the maximal addressing mode it can 1258/// match by reference. 1259bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base, 1260 SDValue &Scale, SDValue &Index, 1261 SDValue &Disp, SDValue &Segment) { 1262 X86ISelAddressMode AM; 1263 bool Done = false; 1264 if (AvoidDupAddrCompute && !N.hasOneUse()) { 1265 unsigned Opcode = N.getOpcode(); 1266 if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex && 1267 Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) { 1268 // If we are able to fold N into addressing mode, then we'll allow it even 1269 // if N has multiple uses. In general, addressing computation is used as 1270 // addresses by all of its uses. But watch out for CopyToReg uses, that 1271 // means the address computation is liveout. It will be computed by a LEA 1272 // so we want to avoid computing the address twice. 1273 for (SDNode::use_iterator UI = N.getNode()->use_begin(), 1274 UE = N.getNode()->use_end(); UI != UE; ++UI) { 1275 if (UI->getOpcode() == ISD::CopyToReg) { 1276 MatchAddressBase(N, AM); 1277 Done = true; 1278 break; 1279 } 1280 } 1281 } 1282 } 1283 1284 if (!Done && MatchAddress(N, AM)) 1285 return false; 1286 1287 EVT VT = N.getValueType(); 1288 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1289 if (!AM.Base.Reg.getNode()) 1290 AM.Base.Reg = CurDAG->getRegister(0, VT); 1291 } 1292 1293 if (!AM.IndexReg.getNode()) 1294 AM.IndexReg = CurDAG->getRegister(0, VT); 1295 1296 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1297 return true; 1298} 1299 1300/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1301/// match a load whose top elements are either undef or zeros. The load flavor 1302/// is derived from the type of N, which is either v4f32 or v2f64. 1303bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred, 1304 SDValue N, SDValue &Base, 1305 SDValue &Scale, SDValue &Index, 1306 SDValue &Disp, SDValue &Segment, 1307 SDValue &InChain, 1308 SDValue &OutChain) { 1309 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1310 InChain = N.getOperand(0).getValue(1); 1311 if (ISD::isNON_EXTLoad(InChain.getNode()) && 1312 InChain.getValue(0).hasOneUse() && 1313 N.hasOneUse() && 1314 IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) { 1315 LoadSDNode *LD = cast<LoadSDNode>(InChain); 1316 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1317 return false; 1318 OutChain = LD->getChain(); 1319 return true; 1320 } 1321 } 1322 1323 // Also handle the case where we explicitly require zeros in the top 1324 // elements. This is a vector shuffle from the zero vector. 1325 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1326 // Check to see if the top elements are all zeros (or bitcast of zeros). 1327 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1328 N.getOperand(0).getNode()->hasOneUse() && 1329 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1330 N.getOperand(0).getOperand(0).hasOneUse()) { 1331 // Okay, this is a zero extending load. Fold it. 1332 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1333 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1334 return false; 1335 OutChain = LD->getChain(); 1336 InChain = SDValue(LD, 1); 1337 return true; 1338 } 1339 return false; 1340} 1341 1342 1343/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1344/// mode it matches can be cost effectively emitted as an LEA instruction. 1345bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N, 1346 SDValue &Base, SDValue &Scale, 1347 SDValue &Index, SDValue &Disp) { 1348 X86ISelAddressMode AM; 1349 1350 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1351 // segments. 1352 SDValue Copy = AM.Segment; 1353 SDValue T = CurDAG->getRegister(0, MVT::i32); 1354 AM.Segment = T; 1355 if (MatchAddress(N, AM)) 1356 return false; 1357 assert (T == AM.Segment); 1358 AM.Segment = Copy; 1359 1360 EVT VT = N.getValueType(); 1361 unsigned Complexity = 0; 1362 if (AM.BaseType == X86ISelAddressMode::RegBase) 1363 if (AM.Base.Reg.getNode()) 1364 Complexity = 1; 1365 else 1366 AM.Base.Reg = CurDAG->getRegister(0, VT); 1367 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1368 Complexity = 4; 1369 1370 if (AM.IndexReg.getNode()) 1371 Complexity++; 1372 else 1373 AM.IndexReg = CurDAG->getRegister(0, VT); 1374 1375 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1376 // a simple shift. 1377 if (AM.Scale > 1) 1378 Complexity++; 1379 1380 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1381 // to a LEA. This is determined with some expermentation but is by no means 1382 // optimal (especially for code size consideration). LEA is nice because of 1383 // its three-address nature. Tweak the cost function again when we can run 1384 // convertToThreeAddress() at register allocation time. 1385 if (AM.hasSymbolicDisplacement()) { 1386 // For X86-64, we should always use lea to materialize RIP relative 1387 // addresses. 1388 if (Subtarget->is64Bit()) 1389 Complexity = 4; 1390 else 1391 Complexity += 2; 1392 } 1393 1394 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode())) 1395 Complexity++; 1396 1397 // If it isn't worth using an LEA, reject it. 1398 if (Complexity <= 2) 1399 return false; 1400 1401 SDValue Segment; 1402 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1403 return true; 1404} 1405 1406/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1407bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base, 1408 SDValue &Scale, SDValue &Index, 1409 SDValue &Disp) { 1410 assert(Op.getOpcode() == X86ISD::TLSADDR); 1411 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1412 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1413 1414 X86ISelAddressMode AM; 1415 AM.GV = GA->getGlobal(); 1416 AM.Disp += GA->getOffset(); 1417 AM.Base.Reg = CurDAG->getRegister(0, N.getValueType()); 1418 AM.SymbolFlags = GA->getTargetFlags(); 1419 1420 if (N.getValueType() == MVT::i32) { 1421 AM.Scale = 1; 1422 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1423 } else { 1424 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1425 } 1426 1427 SDValue Segment; 1428 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1429 return true; 1430} 1431 1432 1433bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N, 1434 SDValue &Base, SDValue &Scale, 1435 SDValue &Index, SDValue &Disp, 1436 SDValue &Segment) { 1437 if (ISD::isNON_EXTLoad(N.getNode()) && 1438 N.hasOneUse() && 1439 IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode())) 1440 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment); 1441 return false; 1442} 1443 1444/// getGlobalBaseReg - Return an SDNode that returns the value of 1445/// the global base register. Output instructions required to 1446/// initialize the global base register, if necessary. 1447/// 1448SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1449 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1450 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1451} 1452 1453static SDNode *FindCallStartFromCall(SDNode *Node) { 1454 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node; 1455 assert(Node->getOperand(0).getValueType() == MVT::Other && 1456 "Node doesn't have a token chain argument!"); 1457 return FindCallStartFromCall(Node->getOperand(0).getNode()); 1458} 1459 1460SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1461 SDValue Chain = Node->getOperand(0); 1462 SDValue In1 = Node->getOperand(1); 1463 SDValue In2L = Node->getOperand(2); 1464 SDValue In2H = Node->getOperand(3); 1465 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1466 if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1467 return NULL; 1468 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1469 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1470 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1471 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1472 MVT::i32, MVT::i32, MVT::Other, Ops, 1473 array_lengthof(Ops)); 1474 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1475 return ResNode; 1476} 1477 1478SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { 1479 if (Node->hasAnyUseOfValue(0)) 1480 return 0; 1481 1482 // Optimize common patterns for __sync_add_and_fetch and 1483 // __sync_sub_and_fetch where the result is not used. This allows us 1484 // to use "lock" version of add, sub, inc, dec instructions. 1485 // FIXME: Do not use special instructions but instead add the "lock" 1486 // prefix to the target node somehow. The extra information will then be 1487 // transferred to machine instruction and it denotes the prefix. 1488 SDValue Chain = Node->getOperand(0); 1489 SDValue Ptr = Node->getOperand(1); 1490 SDValue Val = Node->getOperand(2); 1491 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1492 if (!SelectAddr(Ptr, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1493 return 0; 1494 1495 bool isInc = false, isDec = false, isSub = false, isCN = false; 1496 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val); 1497 if (CN) { 1498 isCN = true; 1499 int64_t CNVal = CN->getSExtValue(); 1500 if (CNVal == 1) 1501 isInc = true; 1502 else if (CNVal == -1) 1503 isDec = true; 1504 else if (CNVal >= 0) 1505 Val = CurDAG->getTargetConstant(CNVal, NVT); 1506 else { 1507 isSub = true; 1508 Val = CurDAG->getTargetConstant(-CNVal, NVT); 1509 } 1510 } else if (Val.hasOneUse() && 1511 Val.getOpcode() == ISD::SUB && 1512 X86::isZeroNode(Val.getOperand(0))) { 1513 isSub = true; 1514 Val = Val.getOperand(1); 1515 } 1516 1517 unsigned Opc = 0; 1518 switch (NVT.getSimpleVT().SimpleTy) { 1519 default: return 0; 1520 case MVT::i8: 1521 if (isInc) 1522 Opc = X86::LOCK_INC8m; 1523 else if (isDec) 1524 Opc = X86::LOCK_DEC8m; 1525 else if (isSub) { 1526 if (isCN) 1527 Opc = X86::LOCK_SUB8mi; 1528 else 1529 Opc = X86::LOCK_SUB8mr; 1530 } else { 1531 if (isCN) 1532 Opc = X86::LOCK_ADD8mi; 1533 else 1534 Opc = X86::LOCK_ADD8mr; 1535 } 1536 break; 1537 case MVT::i16: 1538 if (isInc) 1539 Opc = X86::LOCK_INC16m; 1540 else if (isDec) 1541 Opc = X86::LOCK_DEC16m; 1542 else if (isSub) { 1543 if (isCN) { 1544 if (Predicate_i16immSExt8(Val.getNode())) 1545 Opc = X86::LOCK_SUB16mi8; 1546 else 1547 Opc = X86::LOCK_SUB16mi; 1548 } else 1549 Opc = X86::LOCK_SUB16mr; 1550 } else { 1551 if (isCN) { 1552 if (Predicate_i16immSExt8(Val.getNode())) 1553 Opc = X86::LOCK_ADD16mi8; 1554 else 1555 Opc = X86::LOCK_ADD16mi; 1556 } else 1557 Opc = X86::LOCK_ADD16mr; 1558 } 1559 break; 1560 case MVT::i32: 1561 if (isInc) 1562 Opc = X86::LOCK_INC32m; 1563 else if (isDec) 1564 Opc = X86::LOCK_DEC32m; 1565 else if (isSub) { 1566 if (isCN) { 1567 if (Predicate_i32immSExt8(Val.getNode())) 1568 Opc = X86::LOCK_SUB32mi8; 1569 else 1570 Opc = X86::LOCK_SUB32mi; 1571 } else 1572 Opc = X86::LOCK_SUB32mr; 1573 } else { 1574 if (isCN) { 1575 if (Predicate_i32immSExt8(Val.getNode())) 1576 Opc = X86::LOCK_ADD32mi8; 1577 else 1578 Opc = X86::LOCK_ADD32mi; 1579 } else 1580 Opc = X86::LOCK_ADD32mr; 1581 } 1582 break; 1583 case MVT::i64: 1584 if (isInc) 1585 Opc = X86::LOCK_INC64m; 1586 else if (isDec) 1587 Opc = X86::LOCK_DEC64m; 1588 else if (isSub) { 1589 Opc = X86::LOCK_SUB64mr; 1590 if (isCN) { 1591 if (Predicate_i64immSExt8(Val.getNode())) 1592 Opc = X86::LOCK_SUB64mi8; 1593 else if (Predicate_i64immSExt32(Val.getNode())) 1594 Opc = X86::LOCK_SUB64mi32; 1595 } 1596 } else { 1597 Opc = X86::LOCK_ADD64mr; 1598 if (isCN) { 1599 if (Predicate_i64immSExt8(Val.getNode())) 1600 Opc = X86::LOCK_ADD64mi8; 1601 else if (Predicate_i64immSExt32(Val.getNode())) 1602 Opc = X86::LOCK_ADD64mi32; 1603 } 1604 } 1605 break; 1606 } 1607 1608 DebugLoc dl = Node->getDebugLoc(); 1609 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetInstrInfo::IMPLICIT_DEF, 1610 dl, NVT), 0); 1611 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1612 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1613 if (isInc || isDec) { 1614 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1615 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0); 1616 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1617 SDValue RetVals[] = { Undef, Ret }; 1618 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1619 } else { 1620 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1621 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); 1622 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1623 SDValue RetVals[] = { Undef, Ret }; 1624 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1625 } 1626} 1627 1628/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1629/// any uses which require the SF or OF bits to be accurate. 1630static bool HasNoSignedComparisonUses(SDNode *N) { 1631 // Examine each user of the node. 1632 for (SDNode::use_iterator UI = N->use_begin(), 1633 UE = N->use_end(); UI != UE; ++UI) { 1634 // Only examine CopyToReg uses. 1635 if (UI->getOpcode() != ISD::CopyToReg) 1636 return false; 1637 // Only examine CopyToReg uses that copy to EFLAGS. 1638 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1639 X86::EFLAGS) 1640 return false; 1641 // Examine each user of the CopyToReg use. 1642 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1643 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1644 // Only examine the Flag result. 1645 if (FlagUI.getUse().getResNo() != 1) continue; 1646 // Anything unusual: assume conservatively. 1647 if (!FlagUI->isMachineOpcode()) return false; 1648 // Examine the opcode of the user. 1649 switch (FlagUI->getMachineOpcode()) { 1650 // These comparisons don't treat the most significant bit specially. 1651 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1652 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1653 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1654 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1655 case X86::JA: case X86::JAE: case X86::JB: case X86::JBE: 1656 case X86::JE: case X86::JNE: case X86::JP: case X86::JNP: 1657 case X86::CMOVA16rr: case X86::CMOVA16rm: 1658 case X86::CMOVA32rr: case X86::CMOVA32rm: 1659 case X86::CMOVA64rr: case X86::CMOVA64rm: 1660 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1661 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1662 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1663 case X86::CMOVB16rr: case X86::CMOVB16rm: 1664 case X86::CMOVB32rr: case X86::CMOVB32rm: 1665 case X86::CMOVB64rr: case X86::CMOVB64rm: 1666 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1667 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1668 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1669 case X86::CMOVE16rr: case X86::CMOVE16rm: 1670 case X86::CMOVE32rr: case X86::CMOVE32rm: 1671 case X86::CMOVE64rr: case X86::CMOVE64rm: 1672 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1673 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1674 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1675 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1676 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1677 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1678 case X86::CMOVP16rr: case X86::CMOVP16rm: 1679 case X86::CMOVP32rr: case X86::CMOVP32rm: 1680 case X86::CMOVP64rr: case X86::CMOVP64rm: 1681 continue; 1682 // Anything else: assume conservatively. 1683 default: return false; 1684 } 1685 } 1686 } 1687 return true; 1688} 1689 1690SDNode *X86DAGToDAGISel::Select(SDValue N) { 1691 SDNode *Node = N.getNode(); 1692 EVT NVT = Node->getValueType(0); 1693 unsigned Opc, MOpc; 1694 unsigned Opcode = Node->getOpcode(); 1695 DebugLoc dl = Node->getDebugLoc(); 1696 1697#ifndef NDEBUG 1698 DEBUG({ 1699 errs() << std::string(Indent, ' ') << "Selecting: "; 1700 Node->dump(CurDAG); 1701 errs() << '\n'; 1702 }); 1703 Indent += 2; 1704#endif 1705 1706 if (Node->isMachineOpcode()) { 1707#ifndef NDEBUG 1708 DEBUG({ 1709 errs() << std::string(Indent-2, ' ') << "== "; 1710 Node->dump(CurDAG); 1711 errs() << '\n'; 1712 }); 1713 Indent -= 2; 1714#endif 1715 return NULL; // Already selected. 1716 } 1717 1718 switch (Opcode) { 1719 default: break; 1720 case X86ISD::GlobalBaseReg: 1721 return getGlobalBaseReg(); 1722 1723 case X86ISD::ATOMOR64_DAG: 1724 return SelectAtomic64(Node, X86::ATOMOR6432); 1725 case X86ISD::ATOMXOR64_DAG: 1726 return SelectAtomic64(Node, X86::ATOMXOR6432); 1727 case X86ISD::ATOMADD64_DAG: 1728 return SelectAtomic64(Node, X86::ATOMADD6432); 1729 case X86ISD::ATOMSUB64_DAG: 1730 return SelectAtomic64(Node, X86::ATOMSUB6432); 1731 case X86ISD::ATOMNAND64_DAG: 1732 return SelectAtomic64(Node, X86::ATOMNAND6432); 1733 case X86ISD::ATOMAND64_DAG: 1734 return SelectAtomic64(Node, X86::ATOMAND6432); 1735 case X86ISD::ATOMSWAP64_DAG: 1736 return SelectAtomic64(Node, X86::ATOMSWAP6432); 1737 1738 case ISD::ATOMIC_LOAD_ADD: { 1739 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT); 1740 if (RetVal) 1741 return RetVal; 1742 break; 1743 } 1744 1745 case ISD::SMUL_LOHI: 1746 case ISD::UMUL_LOHI: { 1747 SDValue N0 = Node->getOperand(0); 1748 SDValue N1 = Node->getOperand(1); 1749 1750 bool isSigned = Opcode == ISD::SMUL_LOHI; 1751 if (!isSigned) { 1752 switch (NVT.getSimpleVT().SimpleTy) { 1753 default: llvm_unreachable("Unsupported VT!"); 1754 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 1755 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 1756 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break; 1757 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; 1758 } 1759 } else { 1760 switch (NVT.getSimpleVT().SimpleTy) { 1761 default: llvm_unreachable("Unsupported VT!"); 1762 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 1763 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 1764 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 1765 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 1766 } 1767 } 1768 1769 unsigned LoReg, HiReg; 1770 switch (NVT.getSimpleVT().SimpleTy) { 1771 default: llvm_unreachable("Unsupported VT!"); 1772 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; 1773 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; 1774 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break; 1775 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; 1776 } 1777 1778 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1779 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1780 // Multiply is commmutative. 1781 if (!foldedLoad) { 1782 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1783 if (foldedLoad) 1784 std::swap(N0, N1); 1785 } 1786 1787 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 1788 N0, SDValue()).getValue(1); 1789 1790 if (foldedLoad) { 1791 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 1792 InFlag }; 1793 SDNode *CNode = 1794 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops, 1795 array_lengthof(Ops)); 1796 InFlag = SDValue(CNode, 1); 1797 // Update the chain. 1798 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 1799 } else { 1800 InFlag = 1801 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0); 1802 } 1803 1804 // Copy the low half of the result, if it is needed. 1805 if (!N.getValue(0).use_empty()) { 1806 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1807 LoReg, NVT, InFlag); 1808 InFlag = Result.getValue(2); 1809 ReplaceUses(N.getValue(0), Result); 1810#ifndef NDEBUG 1811 DEBUG({ 1812 errs() << std::string(Indent-2, ' ') << "=> "; 1813 Result.getNode()->dump(CurDAG); 1814 errs() << '\n'; 1815 }); 1816#endif 1817 } 1818 // Copy the high half of the result, if it is needed. 1819 if (!N.getValue(1).use_empty()) { 1820 SDValue Result; 1821 if (HiReg == X86::AH && Subtarget->is64Bit()) { 1822 // Prevent use of AH in a REX instruction by referencing AX instead. 1823 // Shift it down 8 bits. 1824 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1825 X86::AX, MVT::i16, InFlag); 1826 InFlag = Result.getValue(2); 1827 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 1828 Result, 1829 CurDAG->getTargetConstant(8, MVT::i8)), 0); 1830 // Then truncate it down to i8. 1831 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl, 1832 MVT::i8, Result); 1833 } else { 1834 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1835 HiReg, NVT, InFlag); 1836 InFlag = Result.getValue(2); 1837 } 1838 ReplaceUses(N.getValue(1), Result); 1839#ifndef NDEBUG 1840 DEBUG({ 1841 errs() << std::string(Indent-2, ' ') << "=> "; 1842 Result.getNode()->dump(CurDAG); 1843 errs() << '\n'; 1844 }); 1845#endif 1846 } 1847 1848#ifndef NDEBUG 1849 Indent -= 2; 1850#endif 1851 1852 return NULL; 1853 } 1854 1855 case ISD::SDIVREM: 1856 case ISD::UDIVREM: { 1857 SDValue N0 = Node->getOperand(0); 1858 SDValue N1 = Node->getOperand(1); 1859 1860 bool isSigned = Opcode == ISD::SDIVREM; 1861 if (!isSigned) { 1862 switch (NVT.getSimpleVT().SimpleTy) { 1863 default: llvm_unreachable("Unsupported VT!"); 1864 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 1865 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 1866 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 1867 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 1868 } 1869 } else { 1870 switch (NVT.getSimpleVT().SimpleTy) { 1871 default: llvm_unreachable("Unsupported VT!"); 1872 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 1873 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 1874 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 1875 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 1876 } 1877 } 1878 1879 unsigned LoReg, HiReg; 1880 unsigned ClrOpcode, SExtOpcode; 1881 switch (NVT.getSimpleVT().SimpleTy) { 1882 default: llvm_unreachable("Unsupported VT!"); 1883 case MVT::i8: 1884 LoReg = X86::AL; HiReg = X86::AH; 1885 ClrOpcode = 0; 1886 SExtOpcode = X86::CBW; 1887 break; 1888 case MVT::i16: 1889 LoReg = X86::AX; HiReg = X86::DX; 1890 ClrOpcode = X86::MOV16r0; 1891 SExtOpcode = X86::CWD; 1892 break; 1893 case MVT::i32: 1894 LoReg = X86::EAX; HiReg = X86::EDX; 1895 ClrOpcode = X86::MOV32r0; 1896 SExtOpcode = X86::CDQ; 1897 break; 1898 case MVT::i64: 1899 LoReg = X86::RAX; HiReg = X86::RDX; 1900 ClrOpcode = ~0U; // NOT USED. 1901 SExtOpcode = X86::CQO; 1902 break; 1903 } 1904 1905 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1906 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1907 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 1908 1909 SDValue InFlag; 1910 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 1911 // Special case for div8, just use a move with zero extension to AX to 1912 // clear the upper 8 bits (AH). 1913 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 1914 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 1915 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 1916 Move = 1917 SDValue(CurDAG->getMachineNode(X86::MOVZX16rm8, dl, MVT::i16, 1918 MVT::Other, Ops, 1919 array_lengthof(Ops)), 0); 1920 Chain = Move.getValue(1); 1921 ReplaceUses(N0.getValue(1), Chain); 1922 } else { 1923 Move = 1924 SDValue(CurDAG->getMachineNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0); 1925 Chain = CurDAG->getEntryNode(); 1926 } 1927 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue()); 1928 InFlag = Chain.getValue(1); 1929 } else { 1930 InFlag = 1931 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 1932 LoReg, N0, SDValue()).getValue(1); 1933 if (isSigned && !signBitIsZero) { 1934 // Sign extend the low part into the high part. 1935 InFlag = 1936 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Flag, InFlag),0); 1937 } else { 1938 // Zero out the high part, effectively zero extending the input. 1939 SDValue ClrNode; 1940 1941 if (NVT.getSimpleVT() == MVT::i64) { 1942 ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, MVT::i32), 1943 0); 1944 // We just did a 32-bit clear, insert it into a 64-bit register to 1945 // clear the whole 64-bit reg. 1946 SDValue Undef = 1947 SDValue(CurDAG->getMachineNode(TargetInstrInfo::IMPLICIT_DEF, 1948 dl, MVT::i64), 0); 1949 SDValue SubRegNo = 1950 CurDAG->getTargetConstant(X86::SUBREG_32BIT, MVT::i32); 1951 ClrNode = 1952 SDValue(CurDAG->getMachineNode(TargetInstrInfo::INSERT_SUBREG, dl, 1953 MVT::i64, Undef, ClrNode, SubRegNo), 1954 0); 1955 } else { 1956 ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); 1957 } 1958 1959 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg, 1960 ClrNode, InFlag).getValue(1); 1961 } 1962 } 1963 1964 if (foldedLoad) { 1965 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 1966 InFlag }; 1967 SDNode *CNode = 1968 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops, 1969 array_lengthof(Ops)); 1970 InFlag = SDValue(CNode, 1); 1971 // Update the chain. 1972 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 1973 } else { 1974 InFlag = 1975 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0); 1976 } 1977 1978 // Copy the division (low) result, if it is needed. 1979 if (!N.getValue(0).use_empty()) { 1980 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1981 LoReg, NVT, InFlag); 1982 InFlag = Result.getValue(2); 1983 ReplaceUses(N.getValue(0), Result); 1984#ifndef NDEBUG 1985 DEBUG({ 1986 errs() << std::string(Indent-2, ' ') << "=> "; 1987 Result.getNode()->dump(CurDAG); 1988 errs() << '\n'; 1989 }); 1990#endif 1991 } 1992 // Copy the remainder (high) result, if it is needed. 1993 if (!N.getValue(1).use_empty()) { 1994 SDValue Result; 1995 if (HiReg == X86::AH && Subtarget->is64Bit()) { 1996 // Prevent use of AH in a REX instruction by referencing AX instead. 1997 // Shift it down 8 bits. 1998 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1999 X86::AX, MVT::i16, InFlag); 2000 InFlag = Result.getValue(2); 2001 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2002 Result, 2003 CurDAG->getTargetConstant(8, MVT::i8)), 2004 0); 2005 // Then truncate it down to i8. 2006 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl, 2007 MVT::i8, Result); 2008 } else { 2009 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2010 HiReg, NVT, InFlag); 2011 InFlag = Result.getValue(2); 2012 } 2013 ReplaceUses(N.getValue(1), Result); 2014#ifndef NDEBUG 2015 DEBUG({ 2016 errs() << std::string(Indent-2, ' ') << "=> "; 2017 Result.getNode()->dump(CurDAG); 2018 errs() << '\n'; 2019 }); 2020#endif 2021 } 2022 2023#ifndef NDEBUG 2024 Indent -= 2; 2025#endif 2026 2027 return NULL; 2028 } 2029 2030 case X86ISD::CMP: { 2031 SDValue N0 = Node->getOperand(0); 2032 SDValue N1 = Node->getOperand(1); 2033 2034 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2035 // use a smaller encoding. 2036 if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 2037 N0.getValueType() != MVT::i8 && 2038 X86::isZeroNode(N1)) { 2039 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2040 if (!C) break; 2041 2042 // For example, convert "testl %eax, $8" to "testb %al, $8" 2043 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2044 (!(C->getZExtValue() & 0x80) || 2045 HasNoSignedComparisonUses(Node))) { 2046 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2047 SDValue Reg = N0.getNode()->getOperand(0); 2048 2049 // On x86-32, only the ABCD registers have 8-bit subregisters. 2050 if (!Subtarget->is64Bit()) { 2051 TargetRegisterClass *TRC = 0; 2052 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2053 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2054 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2055 default: llvm_unreachable("Unsupported TEST operand type!"); 2056 } 2057 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2058 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2059 Reg.getValueType(), Reg, RC), 0); 2060 } 2061 2062 // Extract the l-register. 2063 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl, 2064 MVT::i8, Reg); 2065 2066 // Emit a testb. 2067 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm); 2068 } 2069 2070 // For example, "testl %eax, $2048" to "testb %ah, $8". 2071 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2072 (!(C->getZExtValue() & 0x8000) || 2073 HasNoSignedComparisonUses(Node))) { 2074 // Shift the immediate right by 8 bits. 2075 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2076 MVT::i8); 2077 SDValue Reg = N0.getNode()->getOperand(0); 2078 2079 // Put the value in an ABCD register. 2080 TargetRegisterClass *TRC = 0; 2081 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2082 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2083 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2084 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2085 default: llvm_unreachable("Unsupported TEST operand type!"); 2086 } 2087 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2088 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2089 Reg.getValueType(), Reg, RC), 0); 2090 2091 // Extract the h-register. 2092 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT_HI, dl, 2093 MVT::i8, Reg); 2094 2095 // Emit a testb. No special NOREX tricks are needed since there's 2096 // only one GPR operand! 2097 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2098 Subreg, ShiftedImm); 2099 } 2100 2101 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2102 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2103 N0.getValueType() != MVT::i16 && 2104 (!(C->getZExtValue() & 0x8000) || 2105 HasNoSignedComparisonUses(Node))) { 2106 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2107 SDValue Reg = N0.getNode()->getOperand(0); 2108 2109 // Extract the 16-bit subregister. 2110 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_16BIT, dl, 2111 MVT::i16, Reg); 2112 2113 // Emit a testw. 2114 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm); 2115 } 2116 2117 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2118 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2119 N0.getValueType() == MVT::i64 && 2120 (!(C->getZExtValue() & 0x80000000) || 2121 HasNoSignedComparisonUses(Node))) { 2122 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2123 SDValue Reg = N0.getNode()->getOperand(0); 2124 2125 // Extract the 32-bit subregister. 2126 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_32BIT, dl, 2127 MVT::i32, Reg); 2128 2129 // Emit a testl. 2130 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm); 2131 } 2132 } 2133 break; 2134 } 2135 } 2136 2137 SDNode *ResNode = SelectCode(N); 2138 2139#ifndef NDEBUG 2140 DEBUG({ 2141 errs() << std::string(Indent-2, ' ') << "=> "; 2142 if (ResNode == NULL || ResNode == N.getNode()) 2143 N.getNode()->dump(CurDAG); 2144 else 2145 ResNode->dump(CurDAG); 2146 errs() << '\n'; 2147 }); 2148 Indent -= 2; 2149#endif 2150 2151 return ResNode; 2152} 2153 2154bool X86DAGToDAGISel:: 2155SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2156 std::vector<SDValue> &OutOps) { 2157 SDValue Op0, Op1, Op2, Op3, Op4; 2158 switch (ConstraintCode) { 2159 case 'o': // offsetable ?? 2160 case 'v': // not offsetable ?? 2161 default: return true; 2162 case 'm': // memory 2163 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3, Op4)) 2164 return true; 2165 break; 2166 } 2167 2168 OutOps.push_back(Op0); 2169 OutOps.push_back(Op1); 2170 OutOps.push_back(Op2); 2171 OutOps.push_back(Op3); 2172 OutOps.push_back(Op4); 2173 return false; 2174} 2175 2176/// createX86ISelDag - This pass converts a legalized DAG into a 2177/// X86-specific DAG, ready for instruction scheduling. 2178/// 2179FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2180 llvm::CodeGenOpt::Level OptLevel) { 2181 return new X86DAGToDAGISel(TM, OptLevel); 2182} 2183