X86ISelDAGToDAG.cpp revision 8433df36fb9566a00e643a6cb8f5e77af453ea81
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86ISelLowering.h" 19#include "X86MachineFunctionInfo.h" 20#include "X86RegisterInfo.h" 21#include "X86Subtarget.h" 22#include "X86TargetMachine.h" 23#include "llvm/GlobalValue.h" 24#include "llvm/Instructions.h" 25#include "llvm/Intrinsics.h" 26#include "llvm/Support/CFG.h" 27#include "llvm/Type.h" 28#include "llvm/CodeGen/MachineConstantPool.h" 29#include "llvm/CodeGen/MachineFunction.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/CodeGen/SelectionDAGISel.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/Compiler.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/MathExtras.h" 39#include "llvm/Support/Streams.h" 40#include "llvm/ADT/SmallPtrSet.h" 41#include "llvm/ADT/Statistic.h" 42using namespace llvm; 43 44#include "llvm/Support/CommandLine.h" 45static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden); 46 47STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 48 49//===----------------------------------------------------------------------===// 50// Pattern Matcher Implementation 51//===----------------------------------------------------------------------===// 52 53namespace { 54 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 55 /// SDValue's instead of register numbers for the leaves of the matched 56 /// tree. 57 struct X86ISelAddressMode { 58 enum { 59 RegBase, 60 FrameIndexBase 61 } BaseType; 62 63 struct { // This is really a union, discriminated by BaseType! 64 SDValue Reg; 65 int FrameIndex; 66 } Base; 67 68 bool isRIPRel; // RIP as base? 69 unsigned Scale; 70 SDValue IndexReg; 71 int32_t Disp; 72 SDValue Segment; 73 GlobalValue *GV; 74 Constant *CP; 75 const char *ES; 76 int JT; 77 unsigned Align; // CP alignment. 78 79 X86ISelAddressMode() 80 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0), 81 Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0) { 82 } 83 84 bool hasSymbolicDisplacement() const { 85 return GV != 0 || CP != 0 || ES != 0 || JT != -1; 86 } 87 88 void dump() { 89 cerr << "X86ISelAddressMode " << this << "\n"; 90 cerr << "Base.Reg "; 91 if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump(); 92 else cerr << "nul"; 93 cerr << " Base.FrameIndex " << Base.FrameIndex << "\n"; 94 cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n"; 95 cerr << "IndexReg "; 96 if (IndexReg.getNode() != 0) IndexReg.getNode()->dump(); 97 else cerr << "nul"; 98 cerr << " Disp " << Disp << "\n"; 99 cerr << "GV "; if (GV) GV->dump(); 100 else cerr << "nul"; 101 cerr << " CP "; if (CP) CP->dump(); 102 else cerr << "nul"; 103 cerr << "\n"; 104 cerr << "ES "; if (ES) cerr << ES; else cerr << "nul"; 105 cerr << " JT" << JT << " Align" << Align << "\n"; 106 } 107 }; 108} 109 110namespace { 111 //===--------------------------------------------------------------------===// 112 /// ISel - X86 specific code to select X86 machine instructions for 113 /// SelectionDAG operations. 114 /// 115 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel { 116 /// TM - Keep a reference to X86TargetMachine. 117 /// 118 X86TargetMachine &TM; 119 120 /// X86Lowering - This object fully describes how to lower LLVM code to an 121 /// X86-specific SelectionDAG. 122 X86TargetLowering &X86Lowering; 123 124 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 125 /// make the right decision when generating code for different targets. 126 const X86Subtarget *Subtarget; 127 128 /// CurBB - Current BB being isel'd. 129 /// 130 MachineBasicBlock *CurBB; 131 132 /// OptForSize - If true, selector should try to optimize for code size 133 /// instead of performance. 134 bool OptForSize; 135 136 public: 137 X86DAGToDAGISel(X86TargetMachine &tm, bool fast) 138 : SelectionDAGISel(tm, fast), 139 TM(tm), X86Lowering(*TM.getTargetLowering()), 140 Subtarget(&TM.getSubtarget<X86Subtarget>()), 141 OptForSize(false) {} 142 143 virtual const char *getPassName() const { 144 return "X86 DAG->DAG Instruction Selection"; 145 } 146 147 /// InstructionSelect - This callback is invoked by 148 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. 149 virtual void InstructionSelect(); 150 151 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF); 152 153 virtual 154 bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const; 155 156// Include the pieces autogenerated from the target description. 157#include "X86GenDAGISel.inc" 158 159 private: 160 SDNode *Select(SDValue N); 161 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 162 163 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM); 164 bool MatchLoad(SDValue N, X86ISelAddressMode &AM); 165 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 166 bool MatchAddress(SDValue N, X86ISelAddressMode &AM, 167 unsigned Depth = 0); 168 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 169 bool SelectAddr(SDValue Op, SDValue N, SDValue &Base, 170 SDValue &Scale, SDValue &Index, SDValue &Disp, 171 SDValue &Segment); 172 bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base, 173 SDValue &Scale, SDValue &Index, SDValue &Disp); 174 bool SelectScalarSSELoad(SDValue Op, SDValue Pred, 175 SDValue N, SDValue &Base, SDValue &Scale, 176 SDValue &Index, SDValue &Disp, 177 SDValue &Segment, 178 SDValue &InChain, SDValue &OutChain); 179 bool TryFoldLoad(SDValue P, SDValue N, 180 SDValue &Base, SDValue &Scale, 181 SDValue &Index, SDValue &Disp, 182 SDValue &Segment); 183 void PreprocessForRMW(); 184 void PreprocessForFPConvert(); 185 186 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 187 /// inline asm expressions. 188 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 189 char ConstraintCode, 190 std::vector<SDValue> &OutOps); 191 192 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 193 194 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 195 SDValue &Scale, SDValue &Index, 196 SDValue &Disp, SDValue &Segment) { 197 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 198 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) : 199 AM.Base.Reg; 200 Scale = getI8Imm(AM.Scale); 201 Index = AM.IndexReg; 202 // These are 32-bit even in 64-bit mode since RIP relative offset 203 // is 32-bit. 204 if (AM.GV) 205 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp); 206 else if (AM.CP) 207 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 208 AM.Align, AM.Disp); 209 else if (AM.ES) 210 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32); 211 else if (AM.JT != -1) 212 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32); 213 else 214 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 215 216 if (AM.Segment.getNode()) 217 Segment = AM.Segment; 218 else 219 Segment = CurDAG->getRegister(0, MVT::i32); 220 } 221 222 /// getI8Imm - Return a target constant with the specified value, of type 223 /// i8. 224 inline SDValue getI8Imm(unsigned Imm) { 225 return CurDAG->getTargetConstant(Imm, MVT::i8); 226 } 227 228 /// getI16Imm - Return a target constant with the specified value, of type 229 /// i16. 230 inline SDValue getI16Imm(unsigned Imm) { 231 return CurDAG->getTargetConstant(Imm, MVT::i16); 232 } 233 234 /// getI32Imm - Return a target constant with the specified value, of type 235 /// i32. 236 inline SDValue getI32Imm(unsigned Imm) { 237 return CurDAG->getTargetConstant(Imm, MVT::i32); 238 } 239 240 /// getGlobalBaseReg - Return an SDNode that returns the value of 241 /// the global base register. Output instructions required to 242 /// initialize the global base register, if necessary. 243 /// 244 SDNode *getGlobalBaseReg(); 245 246#ifndef NDEBUG 247 unsigned Indent; 248#endif 249 }; 250} 251 252/// findFlagUse - Return use of MVT::Flag value produced by the specified 253/// SDNode. 254/// 255static SDNode *findFlagUse(SDNode *N) { 256 unsigned FlagResNo = N->getNumValues()-1; 257 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 258 SDUse &Use = I.getUse(); 259 if (Use.getResNo() == FlagResNo) 260 return Use.getUser(); 261 } 262 return NULL; 263} 264 265/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def". 266/// This function recursively traverses up the operand chain, ignoring 267/// certain nodes. 268static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse, 269 SDNode *Root, 270 SmallPtrSet<SDNode*, 16> &Visited) { 271 if (Use->getNodeId() < Def->getNodeId() || 272 !Visited.insert(Use)) 273 return false; 274 275 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) { 276 SDNode *N = Use->getOperand(i).getNode(); 277 if (N == Def) { 278 if (Use == ImmedUse || Use == Root) 279 continue; // We are not looking for immediate use. 280 assert(N != Root); 281 return true; 282 } 283 284 // Traverse up the operand chain. 285 if (findNonImmUse(N, Def, ImmedUse, Root, Visited)) 286 return true; 287 } 288 return false; 289} 290 291/// isNonImmUse - Start searching from Root up the DAG to check is Def can 292/// be reached. Return true if that's the case. However, ignore direct uses 293/// by ImmedUse (which would be U in the example illustrated in 294/// IsLegalAndProfitableToFold) and by Root (which can happen in the store 295/// case). 296/// FIXME: to be really generic, we should allow direct use by any node 297/// that is being folded. But realisticly since we only fold loads which 298/// have one non-chain use, we only need to watch out for load/op/store 299/// and load/op/cmp case where the root (store / cmp) may reach the load via 300/// its chain operand. 301static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse) { 302 SmallPtrSet<SDNode*, 16> Visited; 303 return findNonImmUse(Root, Def, ImmedUse, Root, Visited); 304} 305 306 307bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U, 308 SDNode *Root) const { 309 if (Fast) return false; 310 311 if (U == Root) 312 switch (U->getOpcode()) { 313 default: break; 314 case ISD::ADD: 315 case ISD::ADDC: 316 case ISD::ADDE: 317 case ISD::AND: 318 case ISD::OR: 319 case ISD::XOR: { 320 SDValue Op1 = U->getOperand(1); 321 322 // If the other operand is a 8-bit immediate we should fold the immediate 323 // instead. This reduces code size. 324 // e.g. 325 // movl 4(%esp), %eax 326 // addl $4, %eax 327 // vs. 328 // movl $4, %eax 329 // addl 4(%esp), %eax 330 // The former is 2 bytes shorter. In case where the increment is 1, then 331 // the saving can be 4 bytes (by using incl %eax). 332 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 333 if (Imm->getAPIntValue().isSignedIntN(8)) 334 return false; 335 336 // If the other operand is a TLS address, we should fold it instead. 337 // This produces 338 // movl %gs:0, %eax 339 // leal i@NTPOFF(%eax), %eax 340 // instead of 341 // movl $i@NTPOFF, %eax 342 // addl %gs:0, %eax 343 // if the block also has an access to a second TLS address this will save 344 // a load. 345 // FIXME: This is probably also true for non TLS addresses. 346 if (Op1.getOpcode() == X86ISD::Wrapper) { 347 SDValue Val = Op1.getOperand(0); 348 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 349 return false; 350 } 351 } 352 } 353 354 // If Root use can somehow reach N through a path that that doesn't contain 355 // U then folding N would create a cycle. e.g. In the following 356 // diagram, Root can reach N through X. If N is folded into into Root, then 357 // X is both a predecessor and a successor of U. 358 // 359 // [N*] // 360 // ^ ^ // 361 // / \ // 362 // [U*] [X]? // 363 // ^ ^ // 364 // \ / // 365 // \ / // 366 // [Root*] // 367 // 368 // * indicates nodes to be folded together. 369 // 370 // If Root produces a flag, then it gets (even more) interesting. Since it 371 // will be "glued" together with its flag use in the scheduler, we need to 372 // check if it might reach N. 373 // 374 // [N*] // 375 // ^ ^ // 376 // / \ // 377 // [U*] [X]? // 378 // ^ ^ // 379 // \ \ // 380 // \ | // 381 // [Root*] | // 382 // ^ | // 383 // f | // 384 // | / // 385 // [Y] / // 386 // ^ / // 387 // f / // 388 // | / // 389 // [FU] // 390 // 391 // If FU (flag use) indirectly reaches N (the load), and Root folds N 392 // (call it Fold), then X is a predecessor of FU and a successor of 393 // Fold. But since Fold and FU are flagged together, this will create 394 // a cycle in the scheduling graph. 395 396 MVT VT = Root->getValueType(Root->getNumValues()-1); 397 while (VT == MVT::Flag) { 398 SDNode *FU = findFlagUse(Root); 399 if (FU == NULL) 400 break; 401 Root = FU; 402 VT = Root->getValueType(Root->getNumValues()-1); 403 } 404 405 return !isNonImmUse(Root, N, U); 406} 407 408/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand 409/// and move load below the TokenFactor. Replace store's chain operand with 410/// load's chain result. 411static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load, 412 SDValue Store, SDValue TF) { 413 SmallVector<SDValue, 4> Ops; 414 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i) 415 if (Load.getNode() == TF.getOperand(i).getNode()) 416 Ops.push_back(Load.getOperand(0)); 417 else 418 Ops.push_back(TF.getOperand(i)); 419 CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size()); 420 CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2)); 421 CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1), 422 Store.getOperand(2), Store.getOperand(3)); 423} 424 425/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG. 426/// 427static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address, 428 SDValue &Load) { 429 if (N.getOpcode() == ISD::BIT_CONVERT) 430 N = N.getOperand(0); 431 432 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 433 if (!LD || LD->isVolatile()) 434 return false; 435 if (LD->getAddressingMode() != ISD::UNINDEXED) 436 return false; 437 438 ISD::LoadExtType ExtType = LD->getExtensionType(); 439 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD) 440 return false; 441 442 if (N.hasOneUse() && 443 N.getOperand(1) == Address && 444 N.getNode()->isOperandOf(Chain.getNode())) { 445 Load = N; 446 return true; 447 } 448 return false; 449} 450 451/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain 452/// operand and move load below the call's chain operand. 453static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load, 454 SDValue Call, SDValue CallSeqStart) { 455 SmallVector<SDValue, 8> Ops; 456 SDValue Chain = CallSeqStart.getOperand(0); 457 if (Chain.getNode() == Load.getNode()) 458 Ops.push_back(Load.getOperand(0)); 459 else { 460 assert(Chain.getOpcode() == ISD::TokenFactor && 461 "Unexpected CallSeqStart chain operand"); 462 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 463 if (Chain.getOperand(i).getNode() == Load.getNode()) 464 Ops.push_back(Load.getOperand(0)); 465 else 466 Ops.push_back(Chain.getOperand(i)); 467 SDValue NewChain = 468 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 469 MVT::Other, &Ops[0], Ops.size()); 470 Ops.clear(); 471 Ops.push_back(NewChain); 472 } 473 for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i) 474 Ops.push_back(CallSeqStart.getOperand(i)); 475 CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size()); 476 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0), 477 Load.getOperand(1), Load.getOperand(2)); 478 Ops.clear(); 479 Ops.push_back(SDValue(Load.getNode(), 1)); 480 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i) 481 Ops.push_back(Call.getOperand(i)); 482 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size()); 483} 484 485/// isCalleeLoad - Return true if call address is a load and it can be 486/// moved below CALLSEQ_START and the chains leading up to the call. 487/// Return the CALLSEQ_START by reference as a second output. 488static bool isCalleeLoad(SDValue Callee, SDValue &Chain) { 489 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 490 return false; 491 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 492 if (!LD || 493 LD->isVolatile() || 494 LD->getAddressingMode() != ISD::UNINDEXED || 495 LD->getExtensionType() != ISD::NON_EXTLOAD) 496 return false; 497 498 // Now let's find the callseq_start. 499 while (Chain.getOpcode() != ISD::CALLSEQ_START) { 500 if (!Chain.hasOneUse()) 501 return false; 502 Chain = Chain.getOperand(0); 503 } 504 505 if (Chain.getOperand(0).getNode() == Callee.getNode()) 506 return true; 507 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 508 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode())) 509 return true; 510 return false; 511} 512 513 514/// PreprocessForRMW - Preprocess the DAG to make instruction selection better. 515/// This is only run if not in -fast mode (aka -O0). 516/// This allows the instruction selector to pick more read-modify-write 517/// instructions. This is a common case: 518/// 519/// [Load chain] 520/// ^ 521/// | 522/// [Load] 523/// ^ ^ 524/// | | 525/// / \- 526/// / | 527/// [TokenFactor] [Op] 528/// ^ ^ 529/// | | 530/// \ / 531/// \ / 532/// [Store] 533/// 534/// The fact the store's chain operand != load's chain will prevent the 535/// (store (op (load))) instruction from being selected. We can transform it to: 536/// 537/// [Load chain] 538/// ^ 539/// | 540/// [TokenFactor] 541/// ^ 542/// | 543/// [Load] 544/// ^ ^ 545/// | | 546/// | \- 547/// | | 548/// | [Op] 549/// | ^ 550/// | | 551/// \ / 552/// \ / 553/// [Store] 554void X86DAGToDAGISel::PreprocessForRMW() { 555 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 556 E = CurDAG->allnodes_end(); I != E; ++I) { 557 if (I->getOpcode() == X86ISD::CALL) { 558 /// Also try moving call address load from outside callseq_start to just 559 /// before the call to allow it to be folded. 560 /// 561 /// [Load chain] 562 /// ^ 563 /// | 564 /// [Load] 565 /// ^ ^ 566 /// | | 567 /// / \-- 568 /// / | 569 ///[CALLSEQ_START] | 570 /// ^ | 571 /// | | 572 /// [LOAD/C2Reg] | 573 /// | | 574 /// \ / 575 /// \ / 576 /// [CALL] 577 SDValue Chain = I->getOperand(0); 578 SDValue Load = I->getOperand(1); 579 if (!isCalleeLoad(Load, Chain)) 580 continue; 581 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain); 582 ++NumLoadMoved; 583 continue; 584 } 585 586 if (!ISD::isNON_TRUNCStore(I)) 587 continue; 588 SDValue Chain = I->getOperand(0); 589 590 if (Chain.getNode()->getOpcode() != ISD::TokenFactor) 591 continue; 592 593 SDValue N1 = I->getOperand(1); 594 SDValue N2 = I->getOperand(2); 595 if ((N1.getValueType().isFloatingPoint() && 596 !N1.getValueType().isVector()) || 597 !N1.hasOneUse()) 598 continue; 599 600 bool RModW = false; 601 SDValue Load; 602 unsigned Opcode = N1.getNode()->getOpcode(); 603 switch (Opcode) { 604 case ISD::ADD: 605 case ISD::MUL: 606 case ISD::AND: 607 case ISD::OR: 608 case ISD::XOR: 609 case ISD::ADDC: 610 case ISD::ADDE: 611 case ISD::VECTOR_SHUFFLE: { 612 SDValue N10 = N1.getOperand(0); 613 SDValue N11 = N1.getOperand(1); 614 RModW = isRMWLoad(N10, Chain, N2, Load); 615 if (!RModW) 616 RModW = isRMWLoad(N11, Chain, N2, Load); 617 break; 618 } 619 case ISD::SUB: 620 case ISD::SHL: 621 case ISD::SRA: 622 case ISD::SRL: 623 case ISD::ROTL: 624 case ISD::ROTR: 625 case ISD::SUBC: 626 case ISD::SUBE: 627 case X86ISD::SHLD: 628 case X86ISD::SHRD: { 629 SDValue N10 = N1.getOperand(0); 630 RModW = isRMWLoad(N10, Chain, N2, Load); 631 break; 632 } 633 } 634 635 if (RModW) { 636 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain); 637 ++NumLoadMoved; 638 } 639 } 640} 641 642 643/// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend 644/// nodes that target the FP stack to be store and load to the stack. This is a 645/// gross hack. We would like to simply mark these as being illegal, but when 646/// we do that, legalize produces these when it expands calls, then expands 647/// these in the same legalize pass. We would like dag combine to be able to 648/// hack on these between the call expansion and the node legalization. As such 649/// this pass basically does "really late" legalization of these inline with the 650/// X86 isel pass. 651void X86DAGToDAGISel::PreprocessForFPConvert() { 652 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 653 E = CurDAG->allnodes_end(); I != E; ) { 654 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 655 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 656 continue; 657 658 // If the source and destination are SSE registers, then this is a legal 659 // conversion that should not be lowered. 660 MVT SrcVT = N->getOperand(0).getValueType(); 661 MVT DstVT = N->getValueType(0); 662 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 663 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 664 if (SrcIsSSE && DstIsSSE) 665 continue; 666 667 if (!SrcIsSSE && !DstIsSSE) { 668 // If this is an FPStack extension, it is a noop. 669 if (N->getOpcode() == ISD::FP_EXTEND) 670 continue; 671 // If this is a value-preserving FPStack truncation, it is a noop. 672 if (N->getConstantOperandVal(1)) 673 continue; 674 } 675 676 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 677 // FPStack has extload and truncstore. SSE can fold direct loads into other 678 // operations. Based on this, decide what we want to do. 679 MVT MemVT; 680 if (N->getOpcode() == ISD::FP_ROUND) 681 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 682 else 683 MemVT = SrcIsSSE ? SrcVT : DstVT; 684 685 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 686 DebugLoc dl = N->getDebugLoc(); 687 688 // FIXME: optimize the case where the src/dest is a load or store? 689 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 690 N->getOperand(0), 691 MemTmp, NULL, 0, MemVT); 692 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 693 NULL, 0, MemVT); 694 695 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 696 // extload we created. This will cause general havok on the dag because 697 // anything below the conversion could be folded into other existing nodes. 698 // To avoid invalidating 'I', back it up to the convert node. 699 --I; 700 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 701 702 // Now that we did that, the node is dead. Increment the iterator to the 703 // next node to process, then delete N. 704 ++I; 705 CurDAG->DeleteNode(N); 706 } 707} 708 709/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel 710/// when it has created a SelectionDAG for us to codegen. 711void X86DAGToDAGISel::InstructionSelect() { 712 CurBB = BB; // BB can change as result of isel. 713 const Function *F = CurDAG->getMachineFunction().getFunction(); 714 OptForSize = F->hasFnAttr(Attribute::OptimizeForSize); 715 716 DEBUG(BB->dump()); 717 if (!Fast) 718 PreprocessForRMW(); 719 720 // FIXME: This should only happen when not -fast. 721 PreprocessForFPConvert(); 722 723 // Codegen the basic block. 724#ifndef NDEBUG 725 DOUT << "===== Instruction selection begins:\n"; 726 Indent = 0; 727#endif 728 SelectRoot(*CurDAG); 729#ifndef NDEBUG 730 DOUT << "===== Instruction selection ends:\n"; 731#endif 732 733 CurDAG->RemoveDeadNodes(); 734} 735 736/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 737/// the main function. 738void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 739 MachineFrameInfo *MFI) { 740 const TargetInstrInfo *TII = TM.getInstrInfo(); 741 if (Subtarget->isTargetCygMing()) 742 BuildMI(BB, DebugLoc::getUnknownLoc(), 743 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main"); 744} 745 746void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) { 747 // If this is main, emit special code for main. 748 MachineBasicBlock *BB = MF.begin(); 749 if (Fn.hasExternalLinkage() && Fn.getName() == "main") 750 EmitSpecialCodeForMain(BB, MF.getFrameInfo()); 751} 752 753 754bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N, 755 X86ISelAddressMode &AM) { 756 assert(N.getOpcode() == X86ISD::SegmentBaseAddress); 757 SDValue Segment = N.getOperand(0); 758 759 if (AM.Segment.getNode() == 0) { 760 AM.Segment = Segment; 761 return false; 762 } 763 764 return true; 765} 766 767bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) { 768 // This optimization is valid because the GNU TLS model defines that 769 // gs:0 (or fs:0 on X86-64) contains its own address. 770 // For more information see http://people.redhat.com/drepper/tls.pdf 771 772 SDValue Address = N.getOperand(1); 773 if (Address.getOpcode() == X86ISD::SegmentBaseAddress && 774 !MatchSegmentBaseAddress (Address, AM)) 775 return false; 776 777 return true; 778} 779 780bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 781 bool is64Bit = Subtarget->is64Bit(); 782 DOUT << "Wrapper: 64bit " << is64Bit; 783 DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n"; 784 785 // Under X86-64 non-small code model, GV (and friends) are 64-bits. 786 if (is64Bit && (TM.getCodeModel() != CodeModel::Small)) 787 return true; 788 789 // Base and index reg must be 0 in order to use rip as base. 790 bool canUsePICRel = !AM.Base.Reg.getNode() && !AM.IndexReg.getNode(); 791 if (is64Bit && !canUsePICRel && TM.symbolicAddressesAreRIPRel()) 792 return true; 793 794 if (AM.hasSymbolicDisplacement()) 795 return true; 796 // If value is available in a register both base and index components have 797 // been picked, we can't fit the result available in the register in the 798 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement. 799 800 SDValue N0 = N.getOperand(0); 801 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 802 uint64_t Offset = G->getOffset(); 803 if (!is64Bit || isInt32(AM.Disp + Offset)) { 804 GlobalValue *GV = G->getGlobal(); 805 bool isRIPRel = TM.symbolicAddressesAreRIPRel(); 806 if (N0.getOpcode() == llvm::ISD::TargetGlobalTLSAddress) { 807 TLSModel::Model model = 808 getTLSModel (GV, TM.getRelocationModel()); 809 if (is64Bit && model == TLSModel::InitialExec) 810 isRIPRel = true; 811 } 812 AM.GV = GV; 813 AM.Disp += Offset; 814 AM.isRIPRel = isRIPRel; 815 return false; 816 } 817 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 818 uint64_t Offset = CP->getOffset(); 819 if (!is64Bit || isInt32(AM.Disp + Offset)) { 820 AM.CP = CP->getConstVal(); 821 AM.Align = CP->getAlignment(); 822 AM.Disp += Offset; 823 AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); 824 return false; 825 } 826 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) { 827 AM.ES = S->getSymbol(); 828 AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); 829 return false; 830 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 831 AM.JT = J->getIndex(); 832 AM.isRIPRel = TM.symbolicAddressesAreRIPRel(); 833 return false; 834 } 835 836 return true; 837} 838 839/// MatchAddress - Add the specified node to the specified addressing mode, 840/// returning true if it cannot be done. This just pattern matches for the 841/// addressing mode. 842bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM, 843 unsigned Depth) { 844 bool is64Bit = Subtarget->is64Bit(); 845 DebugLoc dl = N.getDebugLoc(); 846 DOUT << "MatchAddress: "; DEBUG(AM.dump()); 847 // Limit recursion. 848 if (Depth > 5) 849 return MatchAddressBase(N, AM); 850 851 // RIP relative addressing: %rip + 32-bit displacement! 852 if (AM.isRIPRel) { 853 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) { 854 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 855 if (!is64Bit || isInt32(AM.Disp + Val)) { 856 AM.Disp += Val; 857 return false; 858 } 859 } 860 return true; 861 } 862 863 switch (N.getOpcode()) { 864 default: break; 865 case ISD::Constant: { 866 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 867 if (!is64Bit || isInt32(AM.Disp + Val)) { 868 AM.Disp += Val; 869 return false; 870 } 871 break; 872 } 873 874 case X86ISD::SegmentBaseAddress: 875 if (!MatchSegmentBaseAddress(N, AM)) 876 return false; 877 break; 878 879 case X86ISD::Wrapper: 880 if (!MatchWrapper(N, AM)) 881 return false; 882 break; 883 884 case ISD::LOAD: 885 if (!MatchLoad(N, AM)) 886 return false; 887 break; 888 889 case ISD::FrameIndex: 890 if (AM.BaseType == X86ISelAddressMode::RegBase 891 && AM.Base.Reg.getNode() == 0) { 892 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 893 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 894 return false; 895 } 896 break; 897 898 case ISD::SHL: 899 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel) 900 break; 901 902 if (ConstantSDNode 903 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 904 unsigned Val = CN->getZExtValue(); 905 if (Val == 1 || Val == 2 || Val == 3) { 906 AM.Scale = 1 << Val; 907 SDValue ShVal = N.getNode()->getOperand(0); 908 909 // Okay, we know that we have a scale by now. However, if the scaled 910 // value is an add of something and a constant, we can fold the 911 // constant into the disp field here. 912 if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() && 913 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) { 914 AM.IndexReg = ShVal.getNode()->getOperand(0); 915 ConstantSDNode *AddVal = 916 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 917 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val); 918 if (!is64Bit || isInt32(Disp)) 919 AM.Disp = Disp; 920 else 921 AM.IndexReg = ShVal; 922 } else { 923 AM.IndexReg = ShVal; 924 } 925 return false; 926 } 927 break; 928 } 929 930 case ISD::SMUL_LOHI: 931 case ISD::UMUL_LOHI: 932 // A mul_lohi where we need the low part can be folded as a plain multiply. 933 if (N.getResNo() != 0) break; 934 // FALL THROUGH 935 case ISD::MUL: 936 case X86ISD::MUL_IMM: 937 // X*[3,5,9] -> X+X*[2,4,8] 938 if (AM.BaseType == X86ISelAddressMode::RegBase && 939 AM.Base.Reg.getNode() == 0 && 940 AM.IndexReg.getNode() == 0 && 941 !AM.isRIPRel) { 942 if (ConstantSDNode 943 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 944 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 945 CN->getZExtValue() == 9) { 946 AM.Scale = unsigned(CN->getZExtValue())-1; 947 948 SDValue MulVal = N.getNode()->getOperand(0); 949 SDValue Reg; 950 951 // Okay, we know that we have a scale by now. However, if the scaled 952 // value is an add of something and a constant, we can fold the 953 // constant into the disp field here. 954 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 955 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 956 Reg = MulVal.getNode()->getOperand(0); 957 ConstantSDNode *AddVal = 958 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 959 uint64_t Disp = AM.Disp + AddVal->getSExtValue() * 960 CN->getZExtValue(); 961 if (!is64Bit || isInt32(Disp)) 962 AM.Disp = Disp; 963 else 964 Reg = N.getNode()->getOperand(0); 965 } else { 966 Reg = N.getNode()->getOperand(0); 967 } 968 969 AM.IndexReg = AM.Base.Reg = Reg; 970 return false; 971 } 972 } 973 break; 974 975 case ISD::ADD: { 976 X86ISelAddressMode Backup = AM; 977 if (!MatchAddress(N.getNode()->getOperand(0), AM, Depth+1) && 978 !MatchAddress(N.getNode()->getOperand(1), AM, Depth+1)) 979 return false; 980 AM = Backup; 981 if (!MatchAddress(N.getNode()->getOperand(1), AM, Depth+1) && 982 !MatchAddress(N.getNode()->getOperand(0), AM, Depth+1)) 983 return false; 984 AM = Backup; 985 986 // If we couldn't fold both operands into the address at the same time, 987 // see if we can just put each operand into a register and fold at least 988 // the add. 989 if (AM.BaseType == X86ISelAddressMode::RegBase && 990 !AM.Base.Reg.getNode() && 991 !AM.IndexReg.getNode() && 992 !AM.isRIPRel) { 993 AM.Base.Reg = N.getNode()->getOperand(0); 994 AM.IndexReg = N.getNode()->getOperand(1); 995 AM.Scale = 1; 996 return false; 997 } 998 break; 999 } 1000 1001 case ISD::OR: 1002 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1003 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1004 X86ISelAddressMode Backup = AM; 1005 uint64_t Offset = CN->getSExtValue(); 1006 // Start with the LHS as an addr mode. 1007 if (!MatchAddress(N.getOperand(0), AM, Depth+1) && 1008 // Address could not have picked a GV address for the displacement. 1009 AM.GV == NULL && 1010 // On x86-64, the resultant disp must fit in 32-bits. 1011 (!is64Bit || isInt32(AM.Disp + Offset)) && 1012 // Check to see if the LHS & C is zero. 1013 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) { 1014 AM.Disp += Offset; 1015 return false; 1016 } 1017 AM = Backup; 1018 } 1019 break; 1020 1021 case ISD::AND: { 1022 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this 1023 // allows us to fold the shift into this addressing mode. 1024 SDValue Shift = N.getOperand(0); 1025 if (Shift.getOpcode() != ISD::SHL) break; 1026 1027 // Scale must not be used already. 1028 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1029 1030 // Not when RIP is used as the base. 1031 if (AM.isRIPRel) break; 1032 1033 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1034 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 1035 if (!C1 || !C2) break; 1036 1037 // Not likely to be profitable if either the AND or SHIFT node has more 1038 // than one use (unless all uses are for address computation). Besides, 1039 // isel mechanism requires their node ids to be reused. 1040 if (!N.hasOneUse() || !Shift.hasOneUse()) 1041 break; 1042 1043 // Verify that the shift amount is something we can fold. 1044 unsigned ShiftCst = C1->getZExtValue(); 1045 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3) 1046 break; 1047 1048 // Get the new AND mask, this folds to a constant. 1049 SDValue X = Shift.getOperand(0); 1050 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(), 1051 SDValue(C2, 0), SDValue(C1, 0)); 1052 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X, 1053 NewANDMask); 1054 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(), 1055 NewAND, SDValue(C1, 0)); 1056 1057 // Insert the new nodes into the topological ordering. 1058 if (C1->getNodeId() > X.getNode()->getNodeId()) { 1059 CurDAG->RepositionNode(X.getNode(), C1); 1060 C1->setNodeId(X.getNode()->getNodeId()); 1061 } 1062 if (NewANDMask.getNode()->getNodeId() == -1 || 1063 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1064 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode()); 1065 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId()); 1066 } 1067 if (NewAND.getNode()->getNodeId() == -1 || 1068 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 1069 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode()); 1070 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId()); 1071 } 1072 if (NewSHIFT.getNode()->getNodeId() == -1 || 1073 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1074 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode()); 1075 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId()); 1076 } 1077 1078 CurDAG->ReplaceAllUsesWith(N, NewSHIFT); 1079 1080 AM.Scale = 1 << ShiftCst; 1081 AM.IndexReg = NewAND; 1082 return false; 1083 } 1084 } 1085 1086 return MatchAddressBase(N, AM); 1087} 1088 1089/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1090/// specified addressing mode without any further recursion. 1091bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1092 // Is the base register already occupied? 1093 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) { 1094 // If so, check to see if the scale index register is set. 1095 if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) { 1096 AM.IndexReg = N; 1097 AM.Scale = 1; 1098 return false; 1099 } 1100 1101 // Otherwise, we cannot select it. 1102 return true; 1103 } 1104 1105 // Default, generate it as a register. 1106 AM.BaseType = X86ISelAddressMode::RegBase; 1107 AM.Base.Reg = N; 1108 return false; 1109} 1110 1111/// SelectAddr - returns true if it is able pattern match an addressing mode. 1112/// It returns the operands which make up the maximal addressing mode it can 1113/// match by reference. 1114bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base, 1115 SDValue &Scale, SDValue &Index, 1116 SDValue &Disp, SDValue &Segment) { 1117 X86ISelAddressMode AM; 1118 bool Done = false; 1119 if (AvoidDupAddrCompute && !N.hasOneUse()) { 1120 unsigned Opcode = N.getOpcode(); 1121 if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex && 1122 Opcode != X86ISD::Wrapper) { 1123 // If we are able to fold N into addressing mode, then we'll allow it even 1124 // if N has multiple uses. In general, addressing computation is used as 1125 // addresses by all of its uses. But watch out for CopyToReg uses, that 1126 // means the address computation is liveout. It will be computed by a LEA 1127 // so we want to avoid computing the address twice. 1128 for (SDNode::use_iterator UI = N.getNode()->use_begin(), 1129 UE = N.getNode()->use_end(); UI != UE; ++UI) { 1130 if (UI->getOpcode() == ISD::CopyToReg) { 1131 MatchAddressBase(N, AM); 1132 Done = true; 1133 break; 1134 } 1135 } 1136 } 1137 } 1138 1139 if (!Done && MatchAddress(N, AM)) 1140 return false; 1141 1142 MVT VT = N.getValueType(); 1143 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1144 if (!AM.Base.Reg.getNode()) 1145 AM.Base.Reg = CurDAG->getRegister(0, VT); 1146 } 1147 1148 if (!AM.IndexReg.getNode()) 1149 AM.IndexReg = CurDAG->getRegister(0, VT); 1150 1151 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1152 return true; 1153} 1154 1155/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1156/// match a load whose top elements are either undef or zeros. The load flavor 1157/// is derived from the type of N, which is either v4f32 or v2f64. 1158bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred, 1159 SDValue N, SDValue &Base, 1160 SDValue &Scale, SDValue &Index, 1161 SDValue &Disp, SDValue &Segment, 1162 SDValue &InChain, 1163 SDValue &OutChain) { 1164 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1165 InChain = N.getOperand(0).getValue(1); 1166 if (ISD::isNON_EXTLoad(InChain.getNode()) && 1167 InChain.getValue(0).hasOneUse() && 1168 N.hasOneUse() && 1169 IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) { 1170 LoadSDNode *LD = cast<LoadSDNode>(InChain); 1171 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1172 return false; 1173 OutChain = LD->getChain(); 1174 return true; 1175 } 1176 } 1177 1178 // Also handle the case where we explicitly require zeros in the top 1179 // elements. This is a vector shuffle from the zero vector. 1180 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1181 // Check to see if the top elements are all zeros (or bitcast of zeros). 1182 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1183 N.getOperand(0).getNode()->hasOneUse() && 1184 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1185 N.getOperand(0).getOperand(0).hasOneUse()) { 1186 // Okay, this is a zero extending load. Fold it. 1187 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1188 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1189 return false; 1190 OutChain = LD->getChain(); 1191 InChain = SDValue(LD, 1); 1192 return true; 1193 } 1194 return false; 1195} 1196 1197 1198/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1199/// mode it matches can be cost effectively emitted as an LEA instruction. 1200bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N, 1201 SDValue &Base, SDValue &Scale, 1202 SDValue &Index, SDValue &Disp) { 1203 X86ISelAddressMode AM; 1204 1205 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1206 // segments. 1207 SDValue Copy = AM.Segment; 1208 SDValue T = CurDAG->getRegister(0, MVT::i32); 1209 AM.Segment = T; 1210 if (MatchAddress(N, AM)) 1211 return false; 1212 assert (T == AM.Segment); 1213 AM.Segment = Copy; 1214 1215 MVT VT = N.getValueType(); 1216 unsigned Complexity = 0; 1217 if (AM.BaseType == X86ISelAddressMode::RegBase) 1218 if (AM.Base.Reg.getNode()) 1219 Complexity = 1; 1220 else 1221 AM.Base.Reg = CurDAG->getRegister(0, VT); 1222 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1223 Complexity = 4; 1224 1225 if (AM.IndexReg.getNode()) 1226 Complexity++; 1227 else 1228 AM.IndexReg = CurDAG->getRegister(0, VT); 1229 1230 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1231 // a simple shift. 1232 if (AM.Scale > 1) 1233 Complexity++; 1234 1235 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1236 // to a LEA. This is determined with some expermentation but is by no means 1237 // optimal (especially for code size consideration). LEA is nice because of 1238 // its three-address nature. Tweak the cost function again when we can run 1239 // convertToThreeAddress() at register allocation time. 1240 if (AM.hasSymbolicDisplacement()) { 1241 // For X86-64, we should always use lea to materialize RIP relative 1242 // addresses. 1243 if (Subtarget->is64Bit()) 1244 Complexity = 4; 1245 else 1246 Complexity += 2; 1247 } 1248 1249 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode())) 1250 Complexity++; 1251 1252 if (Complexity > 2) { 1253 SDValue Segment; 1254 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1255 return true; 1256 } 1257 return false; 1258} 1259 1260bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N, 1261 SDValue &Base, SDValue &Scale, 1262 SDValue &Index, SDValue &Disp, 1263 SDValue &Segment) { 1264 if (ISD::isNON_EXTLoad(N.getNode()) && 1265 N.hasOneUse() && 1266 IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode())) 1267 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment); 1268 return false; 1269} 1270 1271/// getGlobalBaseReg - Return an SDNode that returns the value of 1272/// the global base register. Output instructions required to 1273/// initialize the global base register, if necessary. 1274/// 1275SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1276 MachineFunction *MF = CurBB->getParent(); 1277 unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF); 1278 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1279} 1280 1281static SDNode *FindCallStartFromCall(SDNode *Node) { 1282 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node; 1283 assert(Node->getOperand(0).getValueType() == MVT::Other && 1284 "Node doesn't have a token chain argument!"); 1285 return FindCallStartFromCall(Node->getOperand(0).getNode()); 1286} 1287 1288SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1289 SDValue Chain = Node->getOperand(0); 1290 SDValue In1 = Node->getOperand(1); 1291 SDValue In2L = Node->getOperand(2); 1292 SDValue In2H = Node->getOperand(3); 1293 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1294 if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1295 return NULL; 1296 SDValue LSI = Node->getOperand(4); // MemOperand 1297 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain}; 1298 return CurDAG->getTargetNode(Opc, Node->getDebugLoc(), 1299 MVT::i32, MVT::i32, MVT::Other, Ops, 1300 array_lengthof(Ops)); 1301} 1302 1303SDNode *X86DAGToDAGISel::Select(SDValue N) { 1304 SDNode *Node = N.getNode(); 1305 MVT NVT = Node->getValueType(0); 1306 unsigned Opc, MOpc; 1307 unsigned Opcode = Node->getOpcode(); 1308 DebugLoc dl = Node->getDebugLoc(); 1309 1310#ifndef NDEBUG 1311 DOUT << std::string(Indent, ' ') << "Selecting: "; 1312 DEBUG(Node->dump(CurDAG)); 1313 DOUT << "\n"; 1314 Indent += 2; 1315#endif 1316 1317 if (Node->isMachineOpcode()) { 1318#ifndef NDEBUG 1319 DOUT << std::string(Indent-2, ' ') << "== "; 1320 DEBUG(Node->dump(CurDAG)); 1321 DOUT << "\n"; 1322 Indent -= 2; 1323#endif 1324 return NULL; // Already selected. 1325 } 1326 1327 switch (Opcode) { 1328 default: break; 1329 case X86ISD::GlobalBaseReg: 1330 return getGlobalBaseReg(); 1331 1332 case X86ISD::ATOMOR64_DAG: 1333 return SelectAtomic64(Node, X86::ATOMOR6432); 1334 case X86ISD::ATOMXOR64_DAG: 1335 return SelectAtomic64(Node, X86::ATOMXOR6432); 1336 case X86ISD::ATOMADD64_DAG: 1337 return SelectAtomic64(Node, X86::ATOMADD6432); 1338 case X86ISD::ATOMSUB64_DAG: 1339 return SelectAtomic64(Node, X86::ATOMSUB6432); 1340 case X86ISD::ATOMNAND64_DAG: 1341 return SelectAtomic64(Node, X86::ATOMNAND6432); 1342 case X86ISD::ATOMAND64_DAG: 1343 return SelectAtomic64(Node, X86::ATOMAND6432); 1344 case X86ISD::ATOMSWAP64_DAG: 1345 return SelectAtomic64(Node, X86::ATOMSWAP6432); 1346 1347 case ISD::SMUL_LOHI: 1348 case ISD::UMUL_LOHI: { 1349 SDValue N0 = Node->getOperand(0); 1350 SDValue N1 = Node->getOperand(1); 1351 1352 bool isSigned = Opcode == ISD::SMUL_LOHI; 1353 if (!isSigned) 1354 switch (NVT.getSimpleVT()) { 1355 default: assert(0 && "Unsupported VT!"); 1356 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 1357 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 1358 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break; 1359 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; 1360 } 1361 else 1362 switch (NVT.getSimpleVT()) { 1363 default: assert(0 && "Unsupported VT!"); 1364 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 1365 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 1366 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 1367 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 1368 } 1369 1370 unsigned LoReg, HiReg; 1371 switch (NVT.getSimpleVT()) { 1372 default: assert(0 && "Unsupported VT!"); 1373 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; 1374 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; 1375 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break; 1376 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; 1377 } 1378 1379 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1380 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1381 // multiplty is commmutative 1382 if (!foldedLoad) { 1383 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1384 if (foldedLoad) 1385 std::swap(N0, N1); 1386 } 1387 1388 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 1389 N0, SDValue()).getValue(1); 1390 1391 if (foldedLoad) { 1392 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 1393 InFlag }; 1394 SDNode *CNode = 1395 CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops, 1396 array_lengthof(Ops)); 1397 InFlag = SDValue(CNode, 1); 1398 // Update the chain. 1399 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 1400 } else { 1401 InFlag = 1402 SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0); 1403 } 1404 1405 // Copy the low half of the result, if it is needed. 1406 if (!N.getValue(0).use_empty()) { 1407 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1408 LoReg, NVT, InFlag); 1409 InFlag = Result.getValue(2); 1410 ReplaceUses(N.getValue(0), Result); 1411#ifndef NDEBUG 1412 DOUT << std::string(Indent-2, ' ') << "=> "; 1413 DEBUG(Result.getNode()->dump(CurDAG)); 1414 DOUT << "\n"; 1415#endif 1416 } 1417 // Copy the high half of the result, if it is needed. 1418 if (!N.getValue(1).use_empty()) { 1419 SDValue Result; 1420 if (HiReg == X86::AH && Subtarget->is64Bit()) { 1421 // Prevent use of AH in a REX instruction by referencing AX instead. 1422 // Shift it down 8 bits. 1423 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1424 X86::AX, MVT::i16, InFlag); 1425 InFlag = Result.getValue(2); 1426 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16, 1427 Result, 1428 CurDAG->getTargetConstant(8, MVT::i8)), 0); 1429 // Then truncate it down to i8. 1430 SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32); 1431 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl, 1432 MVT::i8, Result, SRIdx), 0); 1433 } else { 1434 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1435 HiReg, NVT, InFlag); 1436 InFlag = Result.getValue(2); 1437 } 1438 ReplaceUses(N.getValue(1), Result); 1439#ifndef NDEBUG 1440 DOUT << std::string(Indent-2, ' ') << "=> "; 1441 DEBUG(Result.getNode()->dump(CurDAG)); 1442 DOUT << "\n"; 1443#endif 1444 } 1445 1446#ifndef NDEBUG 1447 Indent -= 2; 1448#endif 1449 1450 return NULL; 1451 } 1452 1453 case ISD::SDIVREM: 1454 case ISD::UDIVREM: { 1455 SDValue N0 = Node->getOperand(0); 1456 SDValue N1 = Node->getOperand(1); 1457 1458 bool isSigned = Opcode == ISD::SDIVREM; 1459 if (!isSigned) 1460 switch (NVT.getSimpleVT()) { 1461 default: assert(0 && "Unsupported VT!"); 1462 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 1463 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 1464 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 1465 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 1466 } 1467 else 1468 switch (NVT.getSimpleVT()) { 1469 default: assert(0 && "Unsupported VT!"); 1470 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 1471 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 1472 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 1473 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 1474 } 1475 1476 unsigned LoReg, HiReg; 1477 unsigned ClrOpcode, SExtOpcode; 1478 switch (NVT.getSimpleVT()) { 1479 default: assert(0 && "Unsupported VT!"); 1480 case MVT::i8: 1481 LoReg = X86::AL; HiReg = X86::AH; 1482 ClrOpcode = 0; 1483 SExtOpcode = X86::CBW; 1484 break; 1485 case MVT::i16: 1486 LoReg = X86::AX; HiReg = X86::DX; 1487 ClrOpcode = X86::MOV16r0; 1488 SExtOpcode = X86::CWD; 1489 break; 1490 case MVT::i32: 1491 LoReg = X86::EAX; HiReg = X86::EDX; 1492 ClrOpcode = X86::MOV32r0; 1493 SExtOpcode = X86::CDQ; 1494 break; 1495 case MVT::i64: 1496 LoReg = X86::RAX; HiReg = X86::RDX; 1497 ClrOpcode = X86::MOV64r0; 1498 SExtOpcode = X86::CQO; 1499 break; 1500 } 1501 1502 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1503 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1504 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 1505 1506 SDValue InFlag; 1507 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 1508 // Special case for div8, just use a move with zero extension to AX to 1509 // clear the upper 8 bits (AH). 1510 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 1511 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 1512 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 1513 Move = 1514 SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16, 1515 MVT::Other, Ops, 1516 array_lengthof(Ops)), 0); 1517 Chain = Move.getValue(1); 1518 ReplaceUses(N0.getValue(1), Chain); 1519 } else { 1520 Move = 1521 SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0); 1522 Chain = CurDAG->getEntryNode(); 1523 } 1524 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue()); 1525 InFlag = Chain.getValue(1); 1526 } else { 1527 InFlag = 1528 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 1529 LoReg, N0, SDValue()).getValue(1); 1530 if (isSigned && !signBitIsZero) { 1531 // Sign extend the low part into the high part. 1532 InFlag = 1533 SDValue(CurDAG->getTargetNode(SExtOpcode, dl, MVT::Flag, InFlag),0); 1534 } else { 1535 // Zero out the high part, effectively zero extending the input. 1536 SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, dl, NVT), 1537 0); 1538 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg, 1539 ClrNode, InFlag).getValue(1); 1540 } 1541 } 1542 1543 if (foldedLoad) { 1544 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 1545 InFlag }; 1546 SDNode *CNode = 1547 CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops, 1548 array_lengthof(Ops)); 1549 InFlag = SDValue(CNode, 1); 1550 // Update the chain. 1551 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 1552 } else { 1553 InFlag = 1554 SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0); 1555 } 1556 1557 // Copy the division (low) result, if it is needed. 1558 if (!N.getValue(0).use_empty()) { 1559 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1560 LoReg, NVT, InFlag); 1561 InFlag = Result.getValue(2); 1562 ReplaceUses(N.getValue(0), Result); 1563#ifndef NDEBUG 1564 DOUT << std::string(Indent-2, ' ') << "=> "; 1565 DEBUG(Result.getNode()->dump(CurDAG)); 1566 DOUT << "\n"; 1567#endif 1568 } 1569 // Copy the remainder (high) result, if it is needed. 1570 if (!N.getValue(1).use_empty()) { 1571 SDValue Result; 1572 if (HiReg == X86::AH && Subtarget->is64Bit()) { 1573 // Prevent use of AH in a REX instruction by referencing AX instead. 1574 // Shift it down 8 bits. 1575 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1576 X86::AX, MVT::i16, InFlag); 1577 InFlag = Result.getValue(2); 1578 Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16, 1579 Result, 1580 CurDAG->getTargetConstant(8, MVT::i8)), 1581 0); 1582 // Then truncate it down to i8. 1583 SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32); 1584 Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl, 1585 MVT::i8, Result, SRIdx), 0); 1586 } else { 1587 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1588 HiReg, NVT, InFlag); 1589 InFlag = Result.getValue(2); 1590 } 1591 ReplaceUses(N.getValue(1), Result); 1592#ifndef NDEBUG 1593 DOUT << std::string(Indent-2, ' ') << "=> "; 1594 DEBUG(Result.getNode()->dump(CurDAG)); 1595 DOUT << "\n"; 1596#endif 1597 } 1598 1599#ifndef NDEBUG 1600 Indent -= 2; 1601#endif 1602 1603 return NULL; 1604 } 1605 1606 case ISD::DECLARE: { 1607 // Handle DECLARE nodes here because the second operand may have been 1608 // wrapped in X86ISD::Wrapper. 1609 SDValue Chain = Node->getOperand(0); 1610 SDValue N1 = Node->getOperand(1); 1611 SDValue N2 = Node->getOperand(2); 1612 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1); 1613 1614 // FIXME: We need to handle this for VLAs. 1615 if (!FINode) { 1616 ReplaceUses(N.getValue(0), Chain); 1617 return NULL; 1618 } 1619 1620 if (N2.getOpcode() == ISD::ADD && 1621 N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg) 1622 N2 = N2.getOperand(1); 1623 1624 // If N2 is not Wrapper(decriptor) then the llvm.declare is mangled 1625 // somehow, just ignore it. 1626 if (N2.getOpcode() != X86ISD::Wrapper) { 1627 ReplaceUses(N.getValue(0), Chain); 1628 return NULL; 1629 } 1630 GlobalAddressSDNode *GVNode = 1631 dyn_cast<GlobalAddressSDNode>(N2.getOperand(0)); 1632 if (GVNode == 0) { 1633 ReplaceUses(N.getValue(0), Chain); 1634 return NULL; 1635 } 1636 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(), 1637 TLI.getPointerTy()); 1638 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(), 1639 TLI.getPointerTy()); 1640 SDValue Ops[] = { Tmp1, Tmp2, Chain }; 1641 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl, 1642 MVT::Other, Ops, 1643 array_lengthof(Ops)); 1644 } 1645 } 1646 1647 SDNode *ResNode = SelectCode(N); 1648 1649#ifndef NDEBUG 1650 DOUT << std::string(Indent-2, ' ') << "=> "; 1651 if (ResNode == NULL || ResNode == N.getNode()) 1652 DEBUG(N.getNode()->dump(CurDAG)); 1653 else 1654 DEBUG(ResNode->dump(CurDAG)); 1655 DOUT << "\n"; 1656 Indent -= 2; 1657#endif 1658 1659 return ResNode; 1660} 1661 1662bool X86DAGToDAGISel:: 1663SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 1664 std::vector<SDValue> &OutOps) { 1665 SDValue Op0, Op1, Op2, Op3, Op4; 1666 switch (ConstraintCode) { 1667 case 'o': // offsetable ?? 1668 case 'v': // not offsetable ?? 1669 default: return true; 1670 case 'm': // memory 1671 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3, Op4)) 1672 return true; 1673 break; 1674 } 1675 1676 OutOps.push_back(Op0); 1677 OutOps.push_back(Op1); 1678 OutOps.push_back(Op2); 1679 OutOps.push_back(Op3); 1680 OutOps.push_back(Op4); 1681 return false; 1682} 1683 1684/// createX86ISelDag - This pass converts a legalized DAG into a 1685/// X86-specific DAG, ready for instruction scheduling. 1686/// 1687FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) { 1688 return new X86DAGToDAGISel(TM, Fast); 1689} 1690