X86ISelDAGToDAG.cpp revision dddcd78e24babb4ca6b35d99abe40bdedde71fab
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Instructions.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/Support/CFG.h" 25#include "llvm/Type.h" 26#include "llvm/CodeGen/FunctionLoweringInfo.h" 27#include "llvm/CodeGen/MachineConstantPool.h" 28#include "llvm/CodeGen/MachineFunction.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineInstrBuilder.h" 31#include "llvm/CodeGen/MachineRegisterInfo.h" 32#include "llvm/CodeGen/SelectionDAGISel.h" 33#include "llvm/Target/TargetMachine.h" 34#include "llvm/Target/TargetOptions.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/raw_ostream.h" 39#include "llvm/ADT/SmallPtrSet.h" 40#include "llvm/ADT/Statistic.h" 41using namespace llvm; 42 43STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 44 45//===----------------------------------------------------------------------===// 46// Pattern Matcher Implementation 47//===----------------------------------------------------------------------===// 48 49namespace { 50 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 51 /// SDValue's instead of register numbers for the leaves of the matched 52 /// tree. 53 struct X86ISelAddressMode { 54 enum { 55 RegBase, 56 FrameIndexBase 57 } BaseType; 58 59 // This is really a union, discriminated by BaseType! 60 SDValue Base_Reg; 61 int Base_FrameIndex; 62 63 unsigned Scale; 64 SDValue IndexReg; 65 int32_t Disp; 66 SDValue Segment; 67 const GlobalValue *GV; 68 const Constant *CP; 69 const BlockAddress *BlockAddr; 70 const char *ES; 71 int JT; 72 unsigned Align; // CP alignment. 73 unsigned char SymbolFlags; // X86II::MO_* 74 75 X86ISelAddressMode() 76 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 77 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 78 SymbolFlags(X86II::MO_NO_FLAG) { 79 } 80 81 bool hasSymbolicDisplacement() const { 82 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 83 } 84 85 bool hasBaseOrIndexReg() const { 86 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; 87 } 88 89 /// isRIPRelative - Return true if this addressing mode is already RIP 90 /// relative. 91 bool isRIPRelative() const { 92 if (BaseType != RegBase) return false; 93 if (RegisterSDNode *RegNode = 94 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 95 return RegNode->getReg() == X86::RIP; 96 return false; 97 } 98 99 void setBaseReg(SDValue Reg) { 100 BaseType = RegBase; 101 Base_Reg = Reg; 102 } 103 104 void dump() { 105 dbgs() << "X86ISelAddressMode " << this << '\n'; 106 dbgs() << "Base_Reg "; 107 if (Base_Reg.getNode() != 0) 108 Base_Reg.getNode()->dump(); 109 else 110 dbgs() << "nul"; 111 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 112 << " Scale" << Scale << '\n' 113 << "IndexReg "; 114 if (IndexReg.getNode() != 0) 115 IndexReg.getNode()->dump(); 116 else 117 dbgs() << "nul"; 118 dbgs() << " Disp " << Disp << '\n' 119 << "GV "; 120 if (GV) 121 GV->dump(); 122 else 123 dbgs() << "nul"; 124 dbgs() << " CP "; 125 if (CP) 126 CP->dump(); 127 else 128 dbgs() << "nul"; 129 dbgs() << '\n' 130 << "ES "; 131 if (ES) 132 dbgs() << ES; 133 else 134 dbgs() << "nul"; 135 dbgs() << " JT" << JT << " Align" << Align << '\n'; 136 } 137 }; 138} 139 140namespace { 141 //===--------------------------------------------------------------------===// 142 /// ISel - X86 specific code to select X86 machine instructions for 143 /// SelectionDAG operations. 144 /// 145 class X86DAGToDAGISel : public SelectionDAGISel { 146 /// X86Lowering - This object fully describes how to lower LLVM code to an 147 /// X86-specific SelectionDAG. 148 const X86TargetLowering &X86Lowering; 149 150 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 151 /// make the right decision when generating code for different targets. 152 const X86Subtarget *Subtarget; 153 154 /// OptForSize - If true, selector should try to optimize for code size 155 /// instead of performance. 156 bool OptForSize; 157 158 public: 159 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 160 : SelectionDAGISel(tm, OptLevel), 161 X86Lowering(*tm.getTargetLowering()), 162 Subtarget(&tm.getSubtarget<X86Subtarget>()), 163 OptForSize(false) {} 164 165 virtual const char *getPassName() const { 166 return "X86 DAG->DAG Instruction Selection"; 167 } 168 169 virtual void EmitFunctionEntryCode(); 170 171 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 172 173 virtual void PreprocessISelDAG(); 174 175 inline bool immSext8(SDNode *N) const { 176 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 177 } 178 179 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 180 // sign extended field. 181 inline bool i64immSExt32(SDNode *N) const { 182 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 183 return (int64_t)v == (int32_t)v; 184 } 185 186// Include the pieces autogenerated from the target description. 187#include "X86GenDAGISel.inc" 188 189 private: 190 SDNode *Select(SDNode *N); 191 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 192 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT); 193 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT); 194 195 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 196 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 197 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 198 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 199 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 200 unsigned Depth); 201 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 202 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 203 SDValue &Scale, SDValue &Index, SDValue &Disp, 204 SDValue &Segment); 205 bool SelectLEAAddr(SDValue N, SDValue &Base, 206 SDValue &Scale, SDValue &Index, SDValue &Disp, 207 SDValue &Segment); 208 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 209 SDValue &Scale, SDValue &Index, SDValue &Disp, 210 SDValue &Segment); 211 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 212 SDValue &Base, SDValue &Scale, 213 SDValue &Index, SDValue &Disp, 214 SDValue &Segment, 215 SDValue &NodeWithChain); 216 217 bool TryFoldLoad(SDNode *P, SDValue N, 218 SDValue &Base, SDValue &Scale, 219 SDValue &Index, SDValue &Disp, 220 SDValue &Segment); 221 222 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 223 /// inline asm expressions. 224 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 225 char ConstraintCode, 226 std::vector<SDValue> &OutOps); 227 228 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 229 230 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 231 SDValue &Scale, SDValue &Index, 232 SDValue &Disp, SDValue &Segment) { 233 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 234 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) : 235 AM.Base_Reg; 236 Scale = getI8Imm(AM.Scale); 237 Index = AM.IndexReg; 238 // These are 32-bit even in 64-bit mode since RIP relative offset 239 // is 32-bit. 240 if (AM.GV) 241 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(), 242 MVT::i32, AM.Disp, 243 AM.SymbolFlags); 244 else if (AM.CP) 245 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 246 AM.Align, AM.Disp, AM.SymbolFlags); 247 else if (AM.ES) 248 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 249 else if (AM.JT != -1) 250 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 251 else if (AM.BlockAddr) 252 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32, 253 true, AM.SymbolFlags); 254 else 255 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 256 257 if (AM.Segment.getNode()) 258 Segment = AM.Segment; 259 else 260 Segment = CurDAG->getRegister(0, MVT::i32); 261 } 262 263 /// getI8Imm - Return a target constant with the specified value, of type 264 /// i8. 265 inline SDValue getI8Imm(unsigned Imm) { 266 return CurDAG->getTargetConstant(Imm, MVT::i8); 267 } 268 269 /// getI32Imm - Return a target constant with the specified value, of type 270 /// i32. 271 inline SDValue getI32Imm(unsigned Imm) { 272 return CurDAG->getTargetConstant(Imm, MVT::i32); 273 } 274 275 /// getGlobalBaseReg - Return an SDNode that returns the value of 276 /// the global base register. Output instructions required to 277 /// initialize the global base register, if necessary. 278 /// 279 SDNode *getGlobalBaseReg(); 280 281 /// getTargetMachine - Return a reference to the TargetMachine, casted 282 /// to the target-specific type. 283 const X86TargetMachine &getTargetMachine() { 284 return static_cast<const X86TargetMachine &>(TM); 285 } 286 287 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 288 /// to the target-specific type. 289 const X86InstrInfo *getInstrInfo() { 290 return getTargetMachine().getInstrInfo(); 291 } 292 }; 293} 294 295 296bool 297X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 298 if (OptLevel == CodeGenOpt::None) return false; 299 300 if (!N.hasOneUse()) 301 return false; 302 303 if (N.getOpcode() != ISD::LOAD) 304 return true; 305 306 // If N is a load, do additional profitability checks. 307 if (U == Root) { 308 switch (U->getOpcode()) { 309 default: break; 310 case X86ISD::ADD: 311 case X86ISD::SUB: 312 case X86ISD::AND: 313 case X86ISD::XOR: 314 case X86ISD::OR: 315 case ISD::ADD: 316 case ISD::ADDC: 317 case ISD::ADDE: 318 case ISD::AND: 319 case ISD::OR: 320 case ISD::XOR: { 321 SDValue Op1 = U->getOperand(1); 322 323 // If the other operand is a 8-bit immediate we should fold the immediate 324 // instead. This reduces code size. 325 // e.g. 326 // movl 4(%esp), %eax 327 // addl $4, %eax 328 // vs. 329 // movl $4, %eax 330 // addl 4(%esp), %eax 331 // The former is 2 bytes shorter. In case where the increment is 1, then 332 // the saving can be 4 bytes (by using incl %eax). 333 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 334 if (Imm->getAPIntValue().isSignedIntN(8)) 335 return false; 336 337 // If the other operand is a TLS address, we should fold it instead. 338 // This produces 339 // movl %gs:0, %eax 340 // leal i@NTPOFF(%eax), %eax 341 // instead of 342 // movl $i@NTPOFF, %eax 343 // addl %gs:0, %eax 344 // if the block also has an access to a second TLS address this will save 345 // a load. 346 // FIXME: This is probably also true for non TLS addresses. 347 if (Op1.getOpcode() == X86ISD::Wrapper) { 348 SDValue Val = Op1.getOperand(0); 349 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 350 return false; 351 } 352 } 353 } 354 } 355 356 return true; 357} 358 359/// MoveBelowCallOrigChain - Replace the original chain operand of the call with 360/// load's chain operand and move load below the call's chain operand. 361static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 362 SDValue Call, SDValue OrigChain) { 363 SmallVector<SDValue, 8> Ops; 364 SDValue Chain = OrigChain.getOperand(0); 365 if (Chain.getNode() == Load.getNode()) 366 Ops.push_back(Load.getOperand(0)); 367 else { 368 assert(Chain.getOpcode() == ISD::TokenFactor && 369 "Unexpected chain operand"); 370 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 371 if (Chain.getOperand(i).getNode() == Load.getNode()) 372 Ops.push_back(Load.getOperand(0)); 373 else 374 Ops.push_back(Chain.getOperand(i)); 375 SDValue NewChain = 376 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 377 MVT::Other, &Ops[0], Ops.size()); 378 Ops.clear(); 379 Ops.push_back(NewChain); 380 } 381 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 382 Ops.push_back(OrigChain.getOperand(i)); 383 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); 384 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 385 Load.getOperand(1), Load.getOperand(2)); 386 Ops.clear(); 387 Ops.push_back(SDValue(Load.getNode(), 1)); 388 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i) 389 Ops.push_back(Call.getOperand(i)); 390 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size()); 391} 392 393/// isCalleeLoad - Return true if call address is a load and it can be 394/// moved below CALLSEQ_START and the chains leading up to the call. 395/// Return the CALLSEQ_START by reference as a second output. 396/// In the case of a tail call, there isn't a callseq node between the call 397/// chain and the load. 398static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 399 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 400 return false; 401 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 402 if (!LD || 403 LD->isVolatile() || 404 LD->getAddressingMode() != ISD::UNINDEXED || 405 LD->getExtensionType() != ISD::NON_EXTLOAD) 406 return false; 407 408 // Now let's find the callseq_start. 409 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 410 if (!Chain.hasOneUse()) 411 return false; 412 Chain = Chain.getOperand(0); 413 } 414 415 if (!Chain.getNumOperands()) 416 return false; 417 if (Chain.getOperand(0).getNode() == Callee.getNode()) 418 return true; 419 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 420 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 421 Callee.getValue(1).hasOneUse()) 422 return true; 423 return false; 424} 425 426void X86DAGToDAGISel::PreprocessISelDAG() { 427 // OptForSize is used in pattern predicates that isel is matching. 428 OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize); 429 430 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 431 E = CurDAG->allnodes_end(); I != E; ) { 432 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 433 434 if (OptLevel != CodeGenOpt::None && 435 (N->getOpcode() == X86ISD::CALL || 436 N->getOpcode() == X86ISD::TC_RETURN)) { 437 /// Also try moving call address load from outside callseq_start to just 438 /// before the call to allow it to be folded. 439 /// 440 /// [Load chain] 441 /// ^ 442 /// | 443 /// [Load] 444 /// ^ ^ 445 /// | | 446 /// / \-- 447 /// / | 448 ///[CALLSEQ_START] | 449 /// ^ | 450 /// | | 451 /// [LOAD/C2Reg] | 452 /// | | 453 /// \ / 454 /// \ / 455 /// [CALL] 456 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 457 SDValue Chain = N->getOperand(0); 458 SDValue Load = N->getOperand(1); 459 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 460 continue; 461 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 462 ++NumLoadMoved; 463 continue; 464 } 465 466 // Lower fpround and fpextend nodes that target the FP stack to be store and 467 // load to the stack. This is a gross hack. We would like to simply mark 468 // these as being illegal, but when we do that, legalize produces these when 469 // it expands calls, then expands these in the same legalize pass. We would 470 // like dag combine to be able to hack on these between the call expansion 471 // and the node legalization. As such this pass basically does "really 472 // late" legalization of these inline with the X86 isel pass. 473 // FIXME: This should only happen when not compiled with -O0. 474 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 475 continue; 476 477 EVT SrcVT = N->getOperand(0).getValueType(); 478 EVT DstVT = N->getValueType(0); 479 480 // If any of the sources are vectors, no fp stack involved. 481 if (SrcVT.isVector() || DstVT.isVector()) 482 continue; 483 484 // If the source and destination are SSE registers, then this is a legal 485 // conversion that should not be lowered. 486 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 487 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 488 if (SrcIsSSE && DstIsSSE) 489 continue; 490 491 if (!SrcIsSSE && !DstIsSSE) { 492 // If this is an FPStack extension, it is a noop. 493 if (N->getOpcode() == ISD::FP_EXTEND) 494 continue; 495 // If this is a value-preserving FPStack truncation, it is a noop. 496 if (N->getConstantOperandVal(1)) 497 continue; 498 } 499 500 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 501 // FPStack has extload and truncstore. SSE can fold direct loads into other 502 // operations. Based on this, decide what we want to do. 503 EVT MemVT; 504 if (N->getOpcode() == ISD::FP_ROUND) 505 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 506 else 507 MemVT = SrcIsSSE ? SrcVT : DstVT; 508 509 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 510 DebugLoc dl = N->getDebugLoc(); 511 512 // FIXME: optimize the case where the src/dest is a load or store? 513 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 514 N->getOperand(0), 515 MemTmp, MachinePointerInfo(), MemVT, 516 false, false, 0); 517 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 518 MachinePointerInfo(), 519 MemVT, false, false, 0); 520 521 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 522 // extload we created. This will cause general havok on the dag because 523 // anything below the conversion could be folded into other existing nodes. 524 // To avoid invalidating 'I', back it up to the convert node. 525 --I; 526 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 527 528 // Now that we did that, the node is dead. Increment the iterator to the 529 // next node to process, then delete N. 530 ++I; 531 CurDAG->DeleteNode(N); 532 } 533} 534 535 536/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 537/// the main function. 538void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 539 MachineFrameInfo *MFI) { 540 const TargetInstrInfo *TII = TM.getInstrInfo(); 541 if (Subtarget->isTargetCygMing()) { 542 unsigned CallOp = 543 Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32; 544 BuildMI(BB, DebugLoc(), 545 TII->get(CallOp)).addExternalSymbol("__main"); 546 } 547} 548 549void X86DAGToDAGISel::EmitFunctionEntryCode() { 550 // If this is main, emit special code for main. 551 if (const Function *Fn = MF->getFunction()) 552 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 553 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 554} 555 556static bool isDispSafeForFrameIndex(int64_t Val) { 557 // On 64-bit platforms, we can run into an issue where a frame index 558 // includes a displacement that, when added to the explicit displacement, 559 // will overflow the displacement field. Assuming that the frame index 560 // displacement fits into a 31-bit integer (which is only slightly more 561 // aggressive than the current fundamental assumption that it fits into 562 // a 32-bit integer), a 31-bit disp should always be safe. 563 return isInt<31>(Val); 564} 565 566bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 567 X86ISelAddressMode &AM) { 568 int64_t Val = AM.Disp + Offset; 569 CodeModel::Model M = TM.getCodeModel(); 570 if (Subtarget->is64Bit()) { 571 if (!X86::isOffsetSuitableForCodeModel(Val, M, 572 AM.hasSymbolicDisplacement())) 573 return true; 574 // In addition to the checks required for a register base, check that 575 // we do not try to use an unsafe Disp with a frame index. 576 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 577 !isDispSafeForFrameIndex(Val)) 578 return true; 579 } 580 AM.Disp = Val; 581 return false; 582 583} 584 585bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 586 SDValue Address = N->getOperand(1); 587 588 // load gs:0 -> GS segment register. 589 // load fs:0 -> FS segment register. 590 // 591 // This optimization is valid because the GNU TLS model defines that 592 // gs:0 (or fs:0 on X86-64) contains its own address. 593 // For more information see http://people.redhat.com/drepper/tls.pdf 594 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 595 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && 596 Subtarget->isTargetELF()) 597 switch (N->getPointerInfo().getAddrSpace()) { 598 case 256: 599 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 600 return false; 601 case 257: 602 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 603 return false; 604 } 605 606 return true; 607} 608 609/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 610/// into an addressing mode. These wrap things that will resolve down into a 611/// symbol reference. If no match is possible, this returns true, otherwise it 612/// returns false. 613bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 614 // If the addressing mode already has a symbol as the displacement, we can 615 // never match another symbol. 616 if (AM.hasSymbolicDisplacement()) 617 return true; 618 619 SDValue N0 = N.getOperand(0); 620 CodeModel::Model M = TM.getCodeModel(); 621 622 // Handle X86-64 rip-relative addresses. We check this before checking direct 623 // folding because RIP is preferable to non-RIP accesses. 624 if (Subtarget->is64Bit() && 625 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 626 // they cannot be folded into immediate fields. 627 // FIXME: This can be improved for kernel and other models? 628 (M == CodeModel::Small || M == CodeModel::Kernel) && 629 // Base and index reg must be 0 in order to use %rip as base and lowering 630 // must allow RIP. 631 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) { 632 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 633 X86ISelAddressMode Backup = AM; 634 AM.GV = G->getGlobal(); 635 AM.SymbolFlags = G->getTargetFlags(); 636 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 637 AM = Backup; 638 return true; 639 } 640 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 641 X86ISelAddressMode Backup = AM; 642 AM.CP = CP->getConstVal(); 643 AM.Align = CP->getAlignment(); 644 AM.SymbolFlags = CP->getTargetFlags(); 645 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 646 AM = Backup; 647 return true; 648 } 649 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 650 AM.ES = S->getSymbol(); 651 AM.SymbolFlags = S->getTargetFlags(); 652 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 653 AM.JT = J->getIndex(); 654 AM.SymbolFlags = J->getTargetFlags(); 655 } else { 656 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress(); 657 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags(); 658 } 659 660 if (N.getOpcode() == X86ISD::WrapperRIP) 661 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 662 return false; 663 } 664 665 // Handle the case when globals fit in our immediate field: This is true for 666 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit 667 // mode, this results in a non-RIP-relative computation. 668 if (!Subtarget->is64Bit() || 669 ((M == CodeModel::Small || M == CodeModel::Kernel) && 670 TM.getRelocationModel() == Reloc::Static)) { 671 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 672 AM.GV = G->getGlobal(); 673 AM.Disp += G->getOffset(); 674 AM.SymbolFlags = G->getTargetFlags(); 675 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 676 AM.CP = CP->getConstVal(); 677 AM.Align = CP->getAlignment(); 678 AM.Disp += CP->getOffset(); 679 AM.SymbolFlags = CP->getTargetFlags(); 680 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 681 AM.ES = S->getSymbol(); 682 AM.SymbolFlags = S->getTargetFlags(); 683 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 684 AM.JT = J->getIndex(); 685 AM.SymbolFlags = J->getTargetFlags(); 686 } else { 687 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress(); 688 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags(); 689 } 690 return false; 691 } 692 693 return true; 694} 695 696/// MatchAddress - Add the specified node to the specified addressing mode, 697/// returning true if it cannot be done. This just pattern matches for the 698/// addressing mode. 699bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 700 if (MatchAddressRecursively(N, AM, 0)) 701 return true; 702 703 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 704 // a smaller encoding and avoids a scaled-index. 705 if (AM.Scale == 2 && 706 AM.BaseType == X86ISelAddressMode::RegBase && 707 AM.Base_Reg.getNode() == 0) { 708 AM.Base_Reg = AM.IndexReg; 709 AM.Scale = 1; 710 } 711 712 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 713 // because it has a smaller encoding. 714 // TODO: Which other code models can use this? 715 if (TM.getCodeModel() == CodeModel::Small && 716 Subtarget->is64Bit() && 717 AM.Scale == 1 && 718 AM.BaseType == X86ISelAddressMode::RegBase && 719 AM.Base_Reg.getNode() == 0 && 720 AM.IndexReg.getNode() == 0 && 721 AM.SymbolFlags == X86II::MO_NO_FLAG && 722 AM.hasSymbolicDisplacement()) 723 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 724 725 return false; 726} 727 728// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This 729// allows us to convert the shift and and into an h-register extract and 730// a scaled index. Returns false if the simplification is performed. 731static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 732 uint64_t Mask, 733 SDValue Shift, SDValue X, 734 X86ISelAddressMode &AM) { 735 if (Shift.getOpcode() != ISD::SRL || 736 !isa<ConstantSDNode>(Shift.getOperand(1)) || 737 !Shift.hasOneUse()) 738 return true; 739 740 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 741 if (ScaleLog <= 0 || ScaleLog >= 4 || 742 Mask != (0xffu << ScaleLog)) 743 return true; 744 745 EVT VT = N.getValueType(); 746 DebugLoc DL = N.getDebugLoc(); 747 SDValue Eight = DAG.getConstant(8, MVT::i8); 748 SDValue NewMask = DAG.getConstant(0xff, VT); 749 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 750 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 751 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 752 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 753 754 // Insert the new nodes into the topological ordering. 755 if (Eight.getNode()->getNodeId() == -1 || 756 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) { 757 DAG.RepositionNode(X.getNode(), Eight.getNode()); 758 Eight.getNode()->setNodeId(X.getNode()->getNodeId()); 759 } 760 if (NewMask.getNode()->getNodeId() == -1 || 761 NewMask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 762 DAG.RepositionNode(X.getNode(), NewMask.getNode()); 763 NewMask.getNode()->setNodeId(X.getNode()->getNodeId()); 764 } 765 if (Srl.getNode()->getNodeId() == -1 || 766 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 767 DAG.RepositionNode(Shift.getNode(), Srl.getNode()); 768 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId()); 769 } 770 if (And.getNode()->getNodeId() == -1 || 771 And.getNode()->getNodeId() > N.getNode()->getNodeId()) { 772 DAG.RepositionNode(N.getNode(), And.getNode()); 773 And.getNode()->setNodeId(N.getNode()->getNodeId()); 774 } 775 if (ShlCount.getNode()->getNodeId() == -1 || 776 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) { 777 DAG.RepositionNode(X.getNode(), ShlCount.getNode()); 778 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId()); 779 } 780 if (Shl.getNode()->getNodeId() == -1 || 781 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) { 782 DAG.RepositionNode(N.getNode(), Shl.getNode()); 783 Shl.getNode()->setNodeId(N.getNode()->getNodeId()); 784 } 785 DAG.ReplaceAllUsesWith(N, Shl); 786 AM.IndexReg = And; 787 AM.Scale = (1 << ScaleLog); 788 return false; 789} 790 791// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 792// allows us to fold the shift into this addressing mode. Returns false if the 793// transform succeeded. 794static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 795 uint64_t Mask, 796 SDValue Shift, SDValue X, 797 X86ISelAddressMode &AM) { 798 if (Shift.getOpcode() != ISD::SHL || 799 !isa<ConstantSDNode>(Shift.getOperand(1))) 800 return true; 801 802 // Not likely to be profitable if either the AND or SHIFT node has more 803 // than one use (unless all uses are for address computation). Besides, 804 // isel mechanism requires their node ids to be reused. 805 if (!N.hasOneUse() || !Shift.hasOneUse()) 806 return true; 807 808 // Verify that the shift amount is something we can fold. 809 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 810 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 811 return true; 812 813 EVT VT = N.getValueType(); 814 DebugLoc DL = N.getDebugLoc(); 815 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 816 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 817 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 818 819 // Insert the new nodes into the topological ordering. 820 if (NewMask.getNode()->getNodeId() == -1 || 821 NewMask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 822 DAG.RepositionNode(X.getNode(), NewMask.getNode()); 823 NewMask.getNode()->setNodeId(X.getNode()->getNodeId()); 824 } 825 if (NewAnd.getNode()->getNodeId() == -1 || 826 NewAnd.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 827 DAG.RepositionNode(Shift.getNode(), NewAnd.getNode()); 828 NewAnd.getNode()->setNodeId(Shift.getNode()->getNodeId()); 829 } 830 if (NewShift.getNode()->getNodeId() == -1 || 831 NewShift.getNode()->getNodeId() > N.getNode()->getNodeId()) { 832 DAG.RepositionNode(N.getNode(), NewShift.getNode()); 833 NewShift.getNode()->setNodeId(N.getNode()->getNodeId()); 834 } 835 DAG.ReplaceAllUsesWith(N, NewShift); 836 837 AM.Scale = 1 << ShiftAmt; 838 AM.IndexReg = NewAnd; 839 return false; 840} 841 842// Implement some heroics to detect shifts of masked values where the mask can 843// be replaced by extending the shift and undoing that in the addressing mode 844// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 845// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 846// the addressing mode. This results in code such as: 847// 848// int f(short *y, int *lookup_table) { 849// ... 850// return *y + lookup_table[*y >> 11]; 851// } 852// 853// Turning into: 854// movzwl (%rdi), %eax 855// movl %eax, %ecx 856// shrl $11, %ecx 857// addl (%rsi,%rcx,4), %eax 858// 859// Instead of: 860// movzwl (%rdi), %eax 861// movl %eax, %ecx 862// shrl $9, %ecx 863// andl $124, %rcx 864// addl (%rsi,%rcx), %eax 865// 866// Note that this function assumes the mask is provided as a mask *after* the 867// value is shifted. The input chain may or may not match that, but computing 868// such a mask is trivial. 869static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 870 uint64_t Mask, 871 SDValue Shift, SDValue X, 872 X86ISelAddressMode &AM) { 873 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 874 !isa<ConstantSDNode>(Shift.getOperand(1))) 875 return true; 876 877 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 878 unsigned MaskLZ = CountLeadingZeros_64(Mask); 879 unsigned MaskTZ = CountTrailingZeros_64(Mask); 880 881 // The amount of shift we're trying to fit into the addressing mode is taken 882 // from the trailing zeros of the mask. 883 unsigned AMShiftAmt = MaskTZ; 884 885 // There is nothing we can do here unless the mask is removing some bits. 886 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 887 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 888 889 // We also need to ensure that mask is a continuous run of bits. 890 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 891 892 // Scale the leading zero count down based on the actual size of the value. 893 // Also scale it down based on the size of the shift. 894 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt; 895 896 // The final check is to ensure that any masked out high bits of X are 897 // already known to be zero. Otherwise, the mask has a semantic impact 898 // other than masking out a couple of low bits. Unfortunately, because of 899 // the mask, zero extensions will be removed from operands in some cases. 900 // This code works extra hard to look through extensions because we can 901 // replace them with zero extensions cheaply if necessary. 902 bool ReplacingAnyExtend = false; 903 if (X.getOpcode() == ISD::ANY_EXTEND) { 904 unsigned ExtendBits = 905 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits(); 906 // Assume that we'll replace the any-extend with a zero-extend, and 907 // narrow the search to the extended value. 908 X = X.getOperand(0); 909 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 910 ReplacingAnyExtend = true; 911 } 912 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(), 913 MaskLZ); 914 APInt KnownZero, KnownOne; 915 DAG.ComputeMaskedBits(X, MaskedHighBits, KnownZero, KnownOne); 916 if (MaskedHighBits != KnownZero) return true; 917 918 // We've identified a pattern that can be transformed into a single shift 919 // and an addressing mode. Make it so. 920 EVT VT = N.getValueType(); 921 if (ReplacingAnyExtend) { 922 assert(X.getValueType() != VT); 923 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 924 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X); 925 if (NewX.getNode()->getNodeId() == -1 || 926 NewX.getNode()->getNodeId() > N.getNode()->getNodeId()) { 927 DAG.RepositionNode(N.getNode(), NewX.getNode()); 928 NewX.getNode()->setNodeId(N.getNode()->getNodeId()); 929 } 930 X = NewX; 931 } 932 DebugLoc DL = N.getDebugLoc(); 933 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 934 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 935 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 936 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 937 if (NewSRLAmt.getNode()->getNodeId() == -1 || 938 NewSRLAmt.getNode()->getNodeId() > N.getNode()->getNodeId()) { 939 DAG.RepositionNode(N.getNode(), NewSRLAmt.getNode()); 940 NewSRLAmt.getNode()->setNodeId(N.getNode()->getNodeId()); 941 } 942 if (NewSRL.getNode()->getNodeId() == -1 || 943 NewSRL.getNode()->getNodeId() > N.getNode()->getNodeId()) { 944 DAG.RepositionNode(N.getNode(), NewSRL.getNode()); 945 NewSRL.getNode()->setNodeId(N.getNode()->getNodeId()); 946 } 947 if (NewSHLAmt.getNode()->getNodeId() == -1 || 948 NewSHLAmt.getNode()->getNodeId() > N.getNode()->getNodeId()) { 949 DAG.RepositionNode(N.getNode(), NewSHLAmt.getNode()); 950 NewSHLAmt.getNode()->setNodeId(N.getNode()->getNodeId()); 951 } 952 if (NewSHL.getNode()->getNodeId() == -1 || 953 NewSHL.getNode()->getNodeId() > N.getNode()->getNodeId()) { 954 DAG.RepositionNode(N.getNode(), NewSHL.getNode()); 955 NewSHL.getNode()->setNodeId(N.getNode()->getNodeId()); 956 } 957 DAG.ReplaceAllUsesWith(N, NewSHL); 958 959 AM.Scale = 1 << AMShiftAmt; 960 AM.IndexReg = NewSRL; 961 return false; 962} 963 964bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 965 unsigned Depth) { 966 DebugLoc dl = N.getDebugLoc(); 967 DEBUG({ 968 dbgs() << "MatchAddress: "; 969 AM.dump(); 970 }); 971 // Limit recursion. 972 if (Depth > 5) 973 return MatchAddressBase(N, AM); 974 975 // If this is already a %rip relative address, we can only merge immediates 976 // into it. Instead of handling this in every case, we handle it here. 977 // RIP relative addressing: %rip + 32-bit displacement! 978 if (AM.isRIPRelative()) { 979 // FIXME: JumpTable and ExternalSymbol address currently don't like 980 // displacements. It isn't very important, but this should be fixed for 981 // consistency. 982 if (!AM.ES && AM.JT != -1) return true; 983 984 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 985 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 986 return false; 987 return true; 988 } 989 990 switch (N.getOpcode()) { 991 default: break; 992 case ISD::Constant: { 993 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 994 if (!FoldOffsetIntoAddress(Val, AM)) 995 return false; 996 break; 997 } 998 999 case X86ISD::Wrapper: 1000 case X86ISD::WrapperRIP: 1001 if (!MatchWrapper(N, AM)) 1002 return false; 1003 break; 1004 1005 case ISD::LOAD: 1006 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 1007 return false; 1008 break; 1009 1010 case ISD::FrameIndex: 1011 if (AM.BaseType == X86ISelAddressMode::RegBase && 1012 AM.Base_Reg.getNode() == 0 && 1013 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1014 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1015 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1016 return false; 1017 } 1018 break; 1019 1020 case ISD::SHL: 1021 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 1022 break; 1023 1024 if (ConstantSDNode 1025 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 1026 unsigned Val = CN->getZExtValue(); 1027 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1028 // that the base operand remains free for further matching. If 1029 // the base doesn't end up getting used, a post-processing step 1030 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1031 if (Val == 1 || Val == 2 || Val == 3) { 1032 AM.Scale = 1 << Val; 1033 SDValue ShVal = N.getNode()->getOperand(0); 1034 1035 // Okay, we know that we have a scale by now. However, if the scaled 1036 // value is an add of something and a constant, we can fold the 1037 // constant into the disp field here. 1038 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1039 AM.IndexReg = ShVal.getNode()->getOperand(0); 1040 ConstantSDNode *AddVal = 1041 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1042 uint64_t Disp = AddVal->getSExtValue() << Val; 1043 if (!FoldOffsetIntoAddress(Disp, AM)) 1044 return false; 1045 } 1046 1047 AM.IndexReg = ShVal; 1048 return false; 1049 } 1050 break; 1051 } 1052 1053 case ISD::SRL: { 1054 // Scale must not be used already. 1055 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1056 1057 SDValue And = N.getOperand(0); 1058 if (And.getOpcode() != ISD::AND) break; 1059 SDValue X = And.getOperand(0); 1060 1061 // We only handle up to 64-bit values here as those are what matter for 1062 // addressing mode optimizations. 1063 if (X.getValueSizeInBits() > 64) break; 1064 1065 // The mask used for the transform is expected to be post-shift, but we 1066 // found the shift first so just apply the shift to the mask before passing 1067 // it down. 1068 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1069 !isa<ConstantSDNode>(And.getOperand(1))) 1070 break; 1071 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1072 1073 // Try to fold the mask and shift into the scale, and return false if we 1074 // succeed. 1075 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1076 return false; 1077 break; 1078 } 1079 1080 case ISD::SMUL_LOHI: 1081 case ISD::UMUL_LOHI: 1082 // A mul_lohi where we need the low part can be folded as a plain multiply. 1083 if (N.getResNo() != 0) break; 1084 // FALL THROUGH 1085 case ISD::MUL: 1086 case X86ISD::MUL_IMM: 1087 // X*[3,5,9] -> X+X*[2,4,8] 1088 if (AM.BaseType == X86ISelAddressMode::RegBase && 1089 AM.Base_Reg.getNode() == 0 && 1090 AM.IndexReg.getNode() == 0) { 1091 if (ConstantSDNode 1092 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1093 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1094 CN->getZExtValue() == 9) { 1095 AM.Scale = unsigned(CN->getZExtValue())-1; 1096 1097 SDValue MulVal = N.getNode()->getOperand(0); 1098 SDValue Reg; 1099 1100 // Okay, we know that we have a scale by now. However, if the scaled 1101 // value is an add of something and a constant, we can fold the 1102 // constant into the disp field here. 1103 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1104 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1105 Reg = MulVal.getNode()->getOperand(0); 1106 ConstantSDNode *AddVal = 1107 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1108 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1109 if (FoldOffsetIntoAddress(Disp, AM)) 1110 Reg = N.getNode()->getOperand(0); 1111 } else { 1112 Reg = N.getNode()->getOperand(0); 1113 } 1114 1115 AM.IndexReg = AM.Base_Reg = Reg; 1116 return false; 1117 } 1118 } 1119 break; 1120 1121 case ISD::SUB: { 1122 // Given A-B, if A can be completely folded into the address and 1123 // the index field with the index field unused, use -B as the index. 1124 // This is a win if a has multiple parts that can be folded into 1125 // the address. Also, this saves a mov if the base register has 1126 // other uses, since it avoids a two-address sub instruction, however 1127 // it costs an additional mov if the index register has other uses. 1128 1129 // Add an artificial use to this node so that we can keep track of 1130 // it if it gets CSE'd with a different node. 1131 HandleSDNode Handle(N); 1132 1133 // Test if the LHS of the sub can be folded. 1134 X86ISelAddressMode Backup = AM; 1135 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1136 AM = Backup; 1137 break; 1138 } 1139 // Test if the index field is free for use. 1140 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1141 AM = Backup; 1142 break; 1143 } 1144 1145 int Cost = 0; 1146 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1147 // If the RHS involves a register with multiple uses, this 1148 // transformation incurs an extra mov, due to the neg instruction 1149 // clobbering its operand. 1150 if (!RHS.getNode()->hasOneUse() || 1151 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1152 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1153 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1154 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1155 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1156 ++Cost; 1157 // If the base is a register with multiple uses, this 1158 // transformation may save a mov. 1159 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1160 AM.Base_Reg.getNode() && 1161 !AM.Base_Reg.getNode()->hasOneUse()) || 1162 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1163 --Cost; 1164 // If the folded LHS was interesting, this transformation saves 1165 // address arithmetic. 1166 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1167 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1168 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1169 --Cost; 1170 // If it doesn't look like it may be an overall win, don't do it. 1171 if (Cost >= 0) { 1172 AM = Backup; 1173 break; 1174 } 1175 1176 // Ok, the transformation is legal and appears profitable. Go for it. 1177 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1178 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1179 AM.IndexReg = Neg; 1180 AM.Scale = 1; 1181 1182 // Insert the new nodes into the topological ordering. 1183 if (Zero.getNode()->getNodeId() == -1 || 1184 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1185 CurDAG->RepositionNode(N.getNode(), Zero.getNode()); 1186 Zero.getNode()->setNodeId(N.getNode()->getNodeId()); 1187 } 1188 if (Neg.getNode()->getNodeId() == -1 || 1189 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1190 CurDAG->RepositionNode(N.getNode(), Neg.getNode()); 1191 Neg.getNode()->setNodeId(N.getNode()->getNodeId()); 1192 } 1193 return false; 1194 } 1195 1196 case ISD::ADD: { 1197 // Add an artificial use to this node so that we can keep track of 1198 // it if it gets CSE'd with a different node. 1199 HandleSDNode Handle(N); 1200 1201 X86ISelAddressMode Backup = AM; 1202 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1203 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1204 return false; 1205 AM = Backup; 1206 1207 // Try again after commuting the operands. 1208 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1209 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1210 return false; 1211 AM = Backup; 1212 1213 // If we couldn't fold both operands into the address at the same time, 1214 // see if we can just put each operand into a register and fold at least 1215 // the add. 1216 if (AM.BaseType == X86ISelAddressMode::RegBase && 1217 !AM.Base_Reg.getNode() && 1218 !AM.IndexReg.getNode()) { 1219 N = Handle.getValue(); 1220 AM.Base_Reg = N.getOperand(0); 1221 AM.IndexReg = N.getOperand(1); 1222 AM.Scale = 1; 1223 return false; 1224 } 1225 N = Handle.getValue(); 1226 break; 1227 } 1228 1229 case ISD::OR: 1230 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1231 if (CurDAG->isBaseWithConstantOffset(N)) { 1232 X86ISelAddressMode Backup = AM; 1233 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1234 1235 // Start with the LHS as an addr mode. 1236 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1237 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1238 return false; 1239 AM = Backup; 1240 } 1241 break; 1242 1243 case ISD::AND: { 1244 // Perform some heroic transforms on an and of a constant-count shift 1245 // with a constant to enable use of the scaled offset field. 1246 1247 // Scale must not be used already. 1248 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1249 1250 SDValue Shift = N.getOperand(0); 1251 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1252 SDValue X = Shift.getOperand(0); 1253 1254 // We only handle up to 64-bit values here as those are what matter for 1255 // addressing mode optimizations. 1256 if (X.getValueSizeInBits() > 64) break; 1257 1258 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1259 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 1260 if (!C1 || !C2) break; 1261 1262 // Try to fold the mask and shift into an extract and scale. 1263 if (!FoldMaskAndShiftToExtract(*CurDAG, N, C2->getZExtValue(), 1264 Shift, X, AM)) 1265 return false; 1266 1267 // Try to fold the mask and shift directly into the scale. 1268 if (!FoldMaskAndShiftToScale(*CurDAG, N, C2->getZExtValue(), Shift, X, AM)) 1269 return false; 1270 1271 // Try to swap the mask and shift to place shifts which can be done as 1272 // a scale on the outside of the mask. 1273 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, C2->getZExtValue(), 1274 Shift, X, AM)) 1275 return false; 1276 break; 1277 } 1278 } 1279 1280 return MatchAddressBase(N, AM); 1281} 1282 1283/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1284/// specified addressing mode without any further recursion. 1285bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1286 // Is the base register already occupied? 1287 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1288 // If so, check to see if the scale index register is set. 1289 if (AM.IndexReg.getNode() == 0) { 1290 AM.IndexReg = N; 1291 AM.Scale = 1; 1292 return false; 1293 } 1294 1295 // Otherwise, we cannot select it. 1296 return true; 1297 } 1298 1299 // Default, generate it as a register. 1300 AM.BaseType = X86ISelAddressMode::RegBase; 1301 AM.Base_Reg = N; 1302 return false; 1303} 1304 1305/// SelectAddr - returns true if it is able pattern match an addressing mode. 1306/// It returns the operands which make up the maximal addressing mode it can 1307/// match by reference. 1308/// 1309/// Parent is the parent node of the addr operand that is being matched. It 1310/// is always a load, store, atomic node, or null. It is only null when 1311/// checking memory operands for inline asm nodes. 1312bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1313 SDValue &Scale, SDValue &Index, 1314 SDValue &Disp, SDValue &Segment) { 1315 X86ISelAddressMode AM; 1316 1317 if (Parent && 1318 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1319 // that are not a MemSDNode, and thus don't have proper addrspace info. 1320 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1321 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1322 Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme 1323 unsigned AddrSpace = 1324 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1325 // AddrSpace 256 -> GS, 257 -> FS. 1326 if (AddrSpace == 256) 1327 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1328 if (AddrSpace == 257) 1329 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1330 } 1331 1332 if (MatchAddress(N, AM)) 1333 return false; 1334 1335 EVT VT = N.getValueType(); 1336 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1337 if (!AM.Base_Reg.getNode()) 1338 AM.Base_Reg = CurDAG->getRegister(0, VT); 1339 } 1340 1341 if (!AM.IndexReg.getNode()) 1342 AM.IndexReg = CurDAG->getRegister(0, VT); 1343 1344 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1345 return true; 1346} 1347 1348/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1349/// match a load whose top elements are either undef or zeros. The load flavor 1350/// is derived from the type of N, which is either v4f32 or v2f64. 1351/// 1352/// We also return: 1353/// PatternChainNode: this is the matched node that has a chain input and 1354/// output. 1355bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1356 SDValue N, SDValue &Base, 1357 SDValue &Scale, SDValue &Index, 1358 SDValue &Disp, SDValue &Segment, 1359 SDValue &PatternNodeWithChain) { 1360 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1361 PatternNodeWithChain = N.getOperand(0); 1362 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1363 PatternNodeWithChain.hasOneUse() && 1364 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1365 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1366 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1367 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1368 return false; 1369 return true; 1370 } 1371 } 1372 1373 // Also handle the case where we explicitly require zeros in the top 1374 // elements. This is a vector shuffle from the zero vector. 1375 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1376 // Check to see if the top elements are all zeros (or bitcast of zeros). 1377 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1378 N.getOperand(0).getNode()->hasOneUse() && 1379 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1380 N.getOperand(0).getOperand(0).hasOneUse() && 1381 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1382 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1383 // Okay, this is a zero extending load. Fold it. 1384 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1385 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1386 return false; 1387 PatternNodeWithChain = SDValue(LD, 0); 1388 return true; 1389 } 1390 return false; 1391} 1392 1393 1394/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1395/// mode it matches can be cost effectively emitted as an LEA instruction. 1396bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1397 SDValue &Base, SDValue &Scale, 1398 SDValue &Index, SDValue &Disp, 1399 SDValue &Segment) { 1400 X86ISelAddressMode AM; 1401 1402 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1403 // segments. 1404 SDValue Copy = AM.Segment; 1405 SDValue T = CurDAG->getRegister(0, MVT::i32); 1406 AM.Segment = T; 1407 if (MatchAddress(N, AM)) 1408 return false; 1409 assert (T == AM.Segment); 1410 AM.Segment = Copy; 1411 1412 EVT VT = N.getValueType(); 1413 unsigned Complexity = 0; 1414 if (AM.BaseType == X86ISelAddressMode::RegBase) 1415 if (AM.Base_Reg.getNode()) 1416 Complexity = 1; 1417 else 1418 AM.Base_Reg = CurDAG->getRegister(0, VT); 1419 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1420 Complexity = 4; 1421 1422 if (AM.IndexReg.getNode()) 1423 Complexity++; 1424 else 1425 AM.IndexReg = CurDAG->getRegister(0, VT); 1426 1427 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1428 // a simple shift. 1429 if (AM.Scale > 1) 1430 Complexity++; 1431 1432 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1433 // to a LEA. This is determined with some expermentation but is by no means 1434 // optimal (especially for code size consideration). LEA is nice because of 1435 // its three-address nature. Tweak the cost function again when we can run 1436 // convertToThreeAddress() at register allocation time. 1437 if (AM.hasSymbolicDisplacement()) { 1438 // For X86-64, we should always use lea to materialize RIP relative 1439 // addresses. 1440 if (Subtarget->is64Bit()) 1441 Complexity = 4; 1442 else 1443 Complexity += 2; 1444 } 1445 1446 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1447 Complexity++; 1448 1449 // If it isn't worth using an LEA, reject it. 1450 if (Complexity <= 2) 1451 return false; 1452 1453 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1454 return true; 1455} 1456 1457/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1458bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1459 SDValue &Scale, SDValue &Index, 1460 SDValue &Disp, SDValue &Segment) { 1461 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1462 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1463 1464 X86ISelAddressMode AM; 1465 AM.GV = GA->getGlobal(); 1466 AM.Disp += GA->getOffset(); 1467 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1468 AM.SymbolFlags = GA->getTargetFlags(); 1469 1470 if (N.getValueType() == MVT::i32) { 1471 AM.Scale = 1; 1472 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1473 } else { 1474 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1475 } 1476 1477 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1478 return true; 1479} 1480 1481 1482bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1483 SDValue &Base, SDValue &Scale, 1484 SDValue &Index, SDValue &Disp, 1485 SDValue &Segment) { 1486 if (!ISD::isNON_EXTLoad(N.getNode()) || 1487 !IsProfitableToFold(N, P, P) || 1488 !IsLegalToFold(N, P, P, OptLevel)) 1489 return false; 1490 1491 return SelectAddr(N.getNode(), 1492 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1493} 1494 1495/// getGlobalBaseReg - Return an SDNode that returns the value of 1496/// the global base register. Output instructions required to 1497/// initialize the global base register, if necessary. 1498/// 1499SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1500 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1501 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1502} 1503 1504SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1505 SDValue Chain = Node->getOperand(0); 1506 SDValue In1 = Node->getOperand(1); 1507 SDValue In2L = Node->getOperand(2); 1508 SDValue In2H = Node->getOperand(3); 1509 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1510 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1511 return NULL; 1512 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1513 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1514 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1515 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1516 MVT::i32, MVT::i32, MVT::Other, Ops, 1517 array_lengthof(Ops)); 1518 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1519 return ResNode; 1520} 1521 1522// FIXME: Figure out some way to unify this with the 'or' and other code 1523// below. 1524SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { 1525 if (Node->hasAnyUseOfValue(0)) 1526 return 0; 1527 1528 // Optimize common patterns for __sync_add_and_fetch and 1529 // __sync_sub_and_fetch where the result is not used. This allows us 1530 // to use "lock" version of add, sub, inc, dec instructions. 1531 // FIXME: Do not use special instructions but instead add the "lock" 1532 // prefix to the target node somehow. The extra information will then be 1533 // transferred to machine instruction and it denotes the prefix. 1534 SDValue Chain = Node->getOperand(0); 1535 SDValue Ptr = Node->getOperand(1); 1536 SDValue Val = Node->getOperand(2); 1537 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1538 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1539 return 0; 1540 1541 bool isInc = false, isDec = false, isSub = false, isCN = false; 1542 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val); 1543 if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) { 1544 isCN = true; 1545 int64_t CNVal = CN->getSExtValue(); 1546 if (CNVal == 1) 1547 isInc = true; 1548 else if (CNVal == -1) 1549 isDec = true; 1550 else if (CNVal >= 0) 1551 Val = CurDAG->getTargetConstant(CNVal, NVT); 1552 else { 1553 isSub = true; 1554 Val = CurDAG->getTargetConstant(-CNVal, NVT); 1555 } 1556 } else if (Val.hasOneUse() && 1557 Val.getOpcode() == ISD::SUB && 1558 X86::isZeroNode(Val.getOperand(0))) { 1559 isSub = true; 1560 Val = Val.getOperand(1); 1561 } 1562 1563 DebugLoc dl = Node->getDebugLoc(); 1564 unsigned Opc = 0; 1565 switch (NVT.getSimpleVT().SimpleTy) { 1566 default: return 0; 1567 case MVT::i8: 1568 if (isInc) 1569 Opc = X86::LOCK_INC8m; 1570 else if (isDec) 1571 Opc = X86::LOCK_DEC8m; 1572 else if (isSub) { 1573 if (isCN) 1574 Opc = X86::LOCK_SUB8mi; 1575 else 1576 Opc = X86::LOCK_SUB8mr; 1577 } else { 1578 if (isCN) 1579 Opc = X86::LOCK_ADD8mi; 1580 else 1581 Opc = X86::LOCK_ADD8mr; 1582 } 1583 break; 1584 case MVT::i16: 1585 if (isInc) 1586 Opc = X86::LOCK_INC16m; 1587 else if (isDec) 1588 Opc = X86::LOCK_DEC16m; 1589 else if (isSub) { 1590 if (isCN) { 1591 if (immSext8(Val.getNode())) 1592 Opc = X86::LOCK_SUB16mi8; 1593 else 1594 Opc = X86::LOCK_SUB16mi; 1595 } else 1596 Opc = X86::LOCK_SUB16mr; 1597 } else { 1598 if (isCN) { 1599 if (immSext8(Val.getNode())) 1600 Opc = X86::LOCK_ADD16mi8; 1601 else 1602 Opc = X86::LOCK_ADD16mi; 1603 } else 1604 Opc = X86::LOCK_ADD16mr; 1605 } 1606 break; 1607 case MVT::i32: 1608 if (isInc) 1609 Opc = X86::LOCK_INC32m; 1610 else if (isDec) 1611 Opc = X86::LOCK_DEC32m; 1612 else if (isSub) { 1613 if (isCN) { 1614 if (immSext8(Val.getNode())) 1615 Opc = X86::LOCK_SUB32mi8; 1616 else 1617 Opc = X86::LOCK_SUB32mi; 1618 } else 1619 Opc = X86::LOCK_SUB32mr; 1620 } else { 1621 if (isCN) { 1622 if (immSext8(Val.getNode())) 1623 Opc = X86::LOCK_ADD32mi8; 1624 else 1625 Opc = X86::LOCK_ADD32mi; 1626 } else 1627 Opc = X86::LOCK_ADD32mr; 1628 } 1629 break; 1630 case MVT::i64: 1631 if (isInc) 1632 Opc = X86::LOCK_INC64m; 1633 else if (isDec) 1634 Opc = X86::LOCK_DEC64m; 1635 else if (isSub) { 1636 Opc = X86::LOCK_SUB64mr; 1637 if (isCN) { 1638 if (immSext8(Val.getNode())) 1639 Opc = X86::LOCK_SUB64mi8; 1640 else if (i64immSExt32(Val.getNode())) 1641 Opc = X86::LOCK_SUB64mi32; 1642 } 1643 } else { 1644 Opc = X86::LOCK_ADD64mr; 1645 if (isCN) { 1646 if (immSext8(Val.getNode())) 1647 Opc = X86::LOCK_ADD64mi8; 1648 else if (i64immSExt32(Val.getNode())) 1649 Opc = X86::LOCK_ADD64mi32; 1650 } 1651 } 1652 break; 1653 } 1654 1655 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1656 dl, NVT), 0); 1657 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1658 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1659 if (isInc || isDec) { 1660 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1661 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0); 1662 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1663 SDValue RetVals[] = { Undef, Ret }; 1664 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1665 } else { 1666 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1667 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); 1668 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1669 SDValue RetVals[] = { Undef, Ret }; 1670 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1671 } 1672} 1673 1674enum AtomicOpc { 1675 OR, 1676 AND, 1677 XOR, 1678 AtomicOpcEnd 1679}; 1680 1681enum AtomicSz { 1682 ConstantI8, 1683 I8, 1684 SextConstantI16, 1685 ConstantI16, 1686 I16, 1687 SextConstantI32, 1688 ConstantI32, 1689 I32, 1690 SextConstantI64, 1691 ConstantI64, 1692 I64, 1693 AtomicSzEnd 1694}; 1695 1696static const unsigned int AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1697 { 1698 X86::LOCK_OR8mi, 1699 X86::LOCK_OR8mr, 1700 X86::LOCK_OR16mi8, 1701 X86::LOCK_OR16mi, 1702 X86::LOCK_OR16mr, 1703 X86::LOCK_OR32mi8, 1704 X86::LOCK_OR32mi, 1705 X86::LOCK_OR32mr, 1706 X86::LOCK_OR64mi8, 1707 X86::LOCK_OR64mi32, 1708 X86::LOCK_OR64mr 1709 }, 1710 { 1711 X86::LOCK_AND8mi, 1712 X86::LOCK_AND8mr, 1713 X86::LOCK_AND16mi8, 1714 X86::LOCK_AND16mi, 1715 X86::LOCK_AND16mr, 1716 X86::LOCK_AND32mi8, 1717 X86::LOCK_AND32mi, 1718 X86::LOCK_AND32mr, 1719 X86::LOCK_AND64mi8, 1720 X86::LOCK_AND64mi32, 1721 X86::LOCK_AND64mr 1722 }, 1723 { 1724 X86::LOCK_XOR8mi, 1725 X86::LOCK_XOR8mr, 1726 X86::LOCK_XOR16mi8, 1727 X86::LOCK_XOR16mi, 1728 X86::LOCK_XOR16mr, 1729 X86::LOCK_XOR32mi8, 1730 X86::LOCK_XOR32mi, 1731 X86::LOCK_XOR32mr, 1732 X86::LOCK_XOR64mi8, 1733 X86::LOCK_XOR64mi32, 1734 X86::LOCK_XOR64mr 1735 } 1736}; 1737 1738SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { 1739 if (Node->hasAnyUseOfValue(0)) 1740 return 0; 1741 1742 // Optimize common patterns for __sync_or_and_fetch and similar arith 1743 // operations where the result is not used. This allows us to use the "lock" 1744 // version of the arithmetic instruction. 1745 // FIXME: Same as for 'add' and 'sub', try to merge those down here. 1746 SDValue Chain = Node->getOperand(0); 1747 SDValue Ptr = Node->getOperand(1); 1748 SDValue Val = Node->getOperand(2); 1749 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1750 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1751 return 0; 1752 1753 // Which index into the table. 1754 enum AtomicOpc Op; 1755 switch (Node->getOpcode()) { 1756 case ISD::ATOMIC_LOAD_OR: 1757 Op = OR; 1758 break; 1759 case ISD::ATOMIC_LOAD_AND: 1760 Op = AND; 1761 break; 1762 case ISD::ATOMIC_LOAD_XOR: 1763 Op = XOR; 1764 break; 1765 default: 1766 return 0; 1767 } 1768 1769 bool isCN = false; 1770 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val); 1771 if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) { 1772 isCN = true; 1773 Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT); 1774 } 1775 1776 unsigned Opc = 0; 1777 switch (NVT.getSimpleVT().SimpleTy) { 1778 default: return 0; 1779 case MVT::i8: 1780 if (isCN) 1781 Opc = AtomicOpcTbl[Op][ConstantI8]; 1782 else 1783 Opc = AtomicOpcTbl[Op][I8]; 1784 break; 1785 case MVT::i16: 1786 if (isCN) { 1787 if (immSext8(Val.getNode())) 1788 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1789 else 1790 Opc = AtomicOpcTbl[Op][ConstantI16]; 1791 } else 1792 Opc = AtomicOpcTbl[Op][I16]; 1793 break; 1794 case MVT::i32: 1795 if (isCN) { 1796 if (immSext8(Val.getNode())) 1797 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1798 else 1799 Opc = AtomicOpcTbl[Op][ConstantI32]; 1800 } else 1801 Opc = AtomicOpcTbl[Op][I32]; 1802 break; 1803 case MVT::i64: 1804 Opc = AtomicOpcTbl[Op][I64]; 1805 if (isCN) { 1806 if (immSext8(Val.getNode())) 1807 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1808 else if (i64immSExt32(Val.getNode())) 1809 Opc = AtomicOpcTbl[Op][ConstantI64]; 1810 } 1811 break; 1812 } 1813 1814 assert(Opc != 0 && "Invalid arith lock transform!"); 1815 1816 DebugLoc dl = Node->getDebugLoc(); 1817 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1818 dl, NVT), 0); 1819 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1820 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1821 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1822 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); 1823 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1824 SDValue RetVals[] = { Undef, Ret }; 1825 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1826} 1827 1828/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1829/// any uses which require the SF or OF bits to be accurate. 1830static bool HasNoSignedComparisonUses(SDNode *N) { 1831 // Examine each user of the node. 1832 for (SDNode::use_iterator UI = N->use_begin(), 1833 UE = N->use_end(); UI != UE; ++UI) { 1834 // Only examine CopyToReg uses. 1835 if (UI->getOpcode() != ISD::CopyToReg) 1836 return false; 1837 // Only examine CopyToReg uses that copy to EFLAGS. 1838 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1839 X86::EFLAGS) 1840 return false; 1841 // Examine each user of the CopyToReg use. 1842 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1843 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1844 // Only examine the Flag result. 1845 if (FlagUI.getUse().getResNo() != 1) continue; 1846 // Anything unusual: assume conservatively. 1847 if (!FlagUI->isMachineOpcode()) return false; 1848 // Examine the opcode of the user. 1849 switch (FlagUI->getMachineOpcode()) { 1850 // These comparisons don't treat the most significant bit specially. 1851 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1852 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1853 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1854 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1855 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1856 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1857 case X86::CMOVA16rr: case X86::CMOVA16rm: 1858 case X86::CMOVA32rr: case X86::CMOVA32rm: 1859 case X86::CMOVA64rr: case X86::CMOVA64rm: 1860 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1861 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1862 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1863 case X86::CMOVB16rr: case X86::CMOVB16rm: 1864 case X86::CMOVB32rr: case X86::CMOVB32rm: 1865 case X86::CMOVB64rr: case X86::CMOVB64rm: 1866 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1867 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1868 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1869 case X86::CMOVE16rr: case X86::CMOVE16rm: 1870 case X86::CMOVE32rr: case X86::CMOVE32rm: 1871 case X86::CMOVE64rr: case X86::CMOVE64rm: 1872 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1873 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1874 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1875 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1876 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1877 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1878 case X86::CMOVP16rr: case X86::CMOVP16rm: 1879 case X86::CMOVP32rr: case X86::CMOVP32rm: 1880 case X86::CMOVP64rr: case X86::CMOVP64rm: 1881 continue; 1882 // Anything else: assume conservatively. 1883 default: return false; 1884 } 1885 } 1886 } 1887 return true; 1888} 1889 1890SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 1891 EVT NVT = Node->getValueType(0); 1892 unsigned Opc, MOpc; 1893 unsigned Opcode = Node->getOpcode(); 1894 DebugLoc dl = Node->getDebugLoc(); 1895 1896 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 1897 1898 if (Node->isMachineOpcode()) { 1899 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 1900 return NULL; // Already selected. 1901 } 1902 1903 switch (Opcode) { 1904 default: break; 1905 case X86ISD::GlobalBaseReg: 1906 return getGlobalBaseReg(); 1907 1908 case X86ISD::ATOMOR64_DAG: 1909 return SelectAtomic64(Node, X86::ATOMOR6432); 1910 case X86ISD::ATOMXOR64_DAG: 1911 return SelectAtomic64(Node, X86::ATOMXOR6432); 1912 case X86ISD::ATOMADD64_DAG: 1913 return SelectAtomic64(Node, X86::ATOMADD6432); 1914 case X86ISD::ATOMSUB64_DAG: 1915 return SelectAtomic64(Node, X86::ATOMSUB6432); 1916 case X86ISD::ATOMNAND64_DAG: 1917 return SelectAtomic64(Node, X86::ATOMNAND6432); 1918 case X86ISD::ATOMAND64_DAG: 1919 return SelectAtomic64(Node, X86::ATOMAND6432); 1920 case X86ISD::ATOMSWAP64_DAG: 1921 return SelectAtomic64(Node, X86::ATOMSWAP6432); 1922 1923 case ISD::ATOMIC_LOAD_ADD: { 1924 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT); 1925 if (RetVal) 1926 return RetVal; 1927 break; 1928 } 1929 case ISD::ATOMIC_LOAD_XOR: 1930 case ISD::ATOMIC_LOAD_AND: 1931 case ISD::ATOMIC_LOAD_OR: { 1932 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 1933 if (RetVal) 1934 return RetVal; 1935 break; 1936 } 1937 case ISD::AND: 1938 case ISD::OR: 1939 case ISD::XOR: { 1940 // For operations of the form (x << C1) op C2, check if we can use a smaller 1941 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 1942 SDValue N0 = Node->getOperand(0); 1943 SDValue N1 = Node->getOperand(1); 1944 1945 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 1946 break; 1947 1948 // i8 is unshrinkable, i16 should be promoted to i32. 1949 if (NVT != MVT::i32 && NVT != MVT::i64) 1950 break; 1951 1952 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 1953 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 1954 if (!Cst || !ShlCst) 1955 break; 1956 1957 int64_t Val = Cst->getSExtValue(); 1958 uint64_t ShlVal = ShlCst->getZExtValue(); 1959 1960 // Make sure that we don't change the operation by removing bits. 1961 // This only matters for OR and XOR, AND is unaffected. 1962 if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val) 1963 break; 1964 1965 unsigned ShlOp, Op = 0; 1966 EVT CstVT = NVT; 1967 1968 // Check the minimum bitwidth for the new constant. 1969 // TODO: AND32ri is the same as AND64ri32 with zext imm. 1970 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 1971 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 1972 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 1973 CstVT = MVT::i8; 1974 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 1975 CstVT = MVT::i32; 1976 1977 // Bail if there is no smaller encoding. 1978 if (NVT == CstVT) 1979 break; 1980 1981 switch (NVT.getSimpleVT().SimpleTy) { 1982 default: llvm_unreachable("Unsupported VT!"); 1983 case MVT::i32: 1984 assert(CstVT == MVT::i8); 1985 ShlOp = X86::SHL32ri; 1986 1987 switch (Opcode) { 1988 case ISD::AND: Op = X86::AND32ri8; break; 1989 case ISD::OR: Op = X86::OR32ri8; break; 1990 case ISD::XOR: Op = X86::XOR32ri8; break; 1991 } 1992 break; 1993 case MVT::i64: 1994 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 1995 ShlOp = X86::SHL64ri; 1996 1997 switch (Opcode) { 1998 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 1999 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2000 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2001 } 2002 break; 2003 } 2004 2005 // Emit the smaller op and the shift. 2006 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2007 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2008 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2009 getI8Imm(ShlVal)); 2010 break; 2011 } 2012 case X86ISD::UMUL: { 2013 SDValue N0 = Node->getOperand(0); 2014 SDValue N1 = Node->getOperand(1); 2015 2016 unsigned LoReg; 2017 switch (NVT.getSimpleVT().SimpleTy) { 2018 default: llvm_unreachable("Unsupported VT!"); 2019 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2020 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2021 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2022 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2023 } 2024 2025 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2026 N0, SDValue()).getValue(1); 2027 2028 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2029 SDValue Ops[] = {N1, InFlag}; 2030 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2); 2031 2032 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2033 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2034 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2035 return NULL; 2036 } 2037 2038 case ISD::SMUL_LOHI: 2039 case ISD::UMUL_LOHI: { 2040 SDValue N0 = Node->getOperand(0); 2041 SDValue N1 = Node->getOperand(1); 2042 2043 bool isSigned = Opcode == ISD::SMUL_LOHI; 2044 if (!isSigned) { 2045 switch (NVT.getSimpleVT().SimpleTy) { 2046 default: llvm_unreachable("Unsupported VT!"); 2047 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2048 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2049 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break; 2050 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; 2051 } 2052 } else { 2053 switch (NVT.getSimpleVT().SimpleTy) { 2054 default: llvm_unreachable("Unsupported VT!"); 2055 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2056 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2057 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2058 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2059 } 2060 } 2061 2062 unsigned LoReg, HiReg; 2063 switch (NVT.getSimpleVT().SimpleTy) { 2064 default: llvm_unreachable("Unsupported VT!"); 2065 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; 2066 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; 2067 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break; 2068 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; 2069 } 2070 2071 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2072 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2073 // Multiply is commmutative. 2074 if (!foldedLoad) { 2075 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2076 if (foldedLoad) 2077 std::swap(N0, N1); 2078 } 2079 2080 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2081 N0, SDValue()).getValue(1); 2082 2083 if (foldedLoad) { 2084 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2085 InFlag }; 2086 SDNode *CNode = 2087 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, 2088 array_lengthof(Ops)); 2089 InFlag = SDValue(CNode, 1); 2090 2091 // Update the chain. 2092 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2093 } else { 2094 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag); 2095 InFlag = SDValue(CNode, 0); 2096 } 2097 2098 // Prevent use of AH in a REX instruction by referencing AX instead. 2099 if (HiReg == X86::AH && Subtarget->is64Bit() && 2100 !SDValue(Node, 1).use_empty()) { 2101 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2102 X86::AX, MVT::i16, InFlag); 2103 InFlag = Result.getValue(2); 2104 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2105 // registers. 2106 if (!SDValue(Node, 0).use_empty()) 2107 ReplaceUses(SDValue(Node, 1), 2108 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2109 2110 // Shift AX down 8 bits. 2111 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2112 Result, 2113 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2114 // Then truncate it down to i8. 2115 ReplaceUses(SDValue(Node, 1), 2116 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2117 } 2118 // Copy the low half of the result, if it is needed. 2119 if (!SDValue(Node, 0).use_empty()) { 2120 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2121 LoReg, NVT, InFlag); 2122 InFlag = Result.getValue(2); 2123 ReplaceUses(SDValue(Node, 0), Result); 2124 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2125 } 2126 // Copy the high half of the result, if it is needed. 2127 if (!SDValue(Node, 1).use_empty()) { 2128 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2129 HiReg, NVT, InFlag); 2130 InFlag = Result.getValue(2); 2131 ReplaceUses(SDValue(Node, 1), Result); 2132 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2133 } 2134 2135 return NULL; 2136 } 2137 2138 case ISD::SDIVREM: 2139 case ISD::UDIVREM: { 2140 SDValue N0 = Node->getOperand(0); 2141 SDValue N1 = Node->getOperand(1); 2142 2143 bool isSigned = Opcode == ISD::SDIVREM; 2144 if (!isSigned) { 2145 switch (NVT.getSimpleVT().SimpleTy) { 2146 default: llvm_unreachable("Unsupported VT!"); 2147 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2148 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2149 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2150 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2151 } 2152 } else { 2153 switch (NVT.getSimpleVT().SimpleTy) { 2154 default: llvm_unreachable("Unsupported VT!"); 2155 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2156 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2157 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2158 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2159 } 2160 } 2161 2162 unsigned LoReg, HiReg, ClrReg; 2163 unsigned ClrOpcode, SExtOpcode; 2164 switch (NVT.getSimpleVT().SimpleTy) { 2165 default: llvm_unreachable("Unsupported VT!"); 2166 case MVT::i8: 2167 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2168 ClrOpcode = 0; 2169 SExtOpcode = X86::CBW; 2170 break; 2171 case MVT::i16: 2172 LoReg = X86::AX; HiReg = X86::DX; 2173 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX; 2174 SExtOpcode = X86::CWD; 2175 break; 2176 case MVT::i32: 2177 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2178 ClrOpcode = X86::MOV32r0; 2179 SExtOpcode = X86::CDQ; 2180 break; 2181 case MVT::i64: 2182 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2183 ClrOpcode = X86::MOV64r0; 2184 SExtOpcode = X86::CQO; 2185 break; 2186 } 2187 2188 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2189 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2190 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2191 2192 SDValue InFlag; 2193 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2194 // Special case for div8, just use a move with zero extension to AX to 2195 // clear the upper 8 bits (AH). 2196 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2197 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2198 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2199 Move = 2200 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2201 MVT::Other, Ops, 2202 array_lengthof(Ops)), 0); 2203 Chain = Move.getValue(1); 2204 ReplaceUses(N0.getValue(1), Chain); 2205 } else { 2206 Move = 2207 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2208 Chain = CurDAG->getEntryNode(); 2209 } 2210 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2211 InFlag = Chain.getValue(1); 2212 } else { 2213 InFlag = 2214 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2215 LoReg, N0, SDValue()).getValue(1); 2216 if (isSigned && !signBitIsZero) { 2217 // Sign extend the low part into the high part. 2218 InFlag = 2219 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2220 } else { 2221 // Zero out the high part, effectively zero extending the input. 2222 SDValue ClrNode = 2223 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); 2224 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2225 ClrNode, InFlag).getValue(1); 2226 } 2227 } 2228 2229 if (foldedLoad) { 2230 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2231 InFlag }; 2232 SDNode *CNode = 2233 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, 2234 array_lengthof(Ops)); 2235 InFlag = SDValue(CNode, 1); 2236 // Update the chain. 2237 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2238 } else { 2239 InFlag = 2240 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2241 } 2242 2243 // Prevent use of AH in a REX instruction by referencing AX instead. 2244 // Shift it down 8 bits. 2245 if (HiReg == X86::AH && Subtarget->is64Bit() && 2246 !SDValue(Node, 1).use_empty()) { 2247 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2248 X86::AX, MVT::i16, InFlag); 2249 InFlag = Result.getValue(2); 2250 2251 // If we also need AL (the quotient), get it by extracting a subreg from 2252 // Result. The fast register allocator does not like multiple CopyFromReg 2253 // nodes using aliasing registers. 2254 if (!SDValue(Node, 0).use_empty()) 2255 ReplaceUses(SDValue(Node, 0), 2256 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2257 2258 // Shift AX right by 8 bits instead of using AH. 2259 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2260 Result, 2261 CurDAG->getTargetConstant(8, MVT::i8)), 2262 0); 2263 ReplaceUses(SDValue(Node, 1), 2264 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2265 } 2266 // Copy the division (low) result, if it is needed. 2267 if (!SDValue(Node, 0).use_empty()) { 2268 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2269 LoReg, NVT, InFlag); 2270 InFlag = Result.getValue(2); 2271 ReplaceUses(SDValue(Node, 0), Result); 2272 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2273 } 2274 // Copy the remainder (high) result, if it is needed. 2275 if (!SDValue(Node, 1).use_empty()) { 2276 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2277 HiReg, NVT, InFlag); 2278 InFlag = Result.getValue(2); 2279 ReplaceUses(SDValue(Node, 1), Result); 2280 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2281 } 2282 return NULL; 2283 } 2284 2285 case X86ISD::CMP: { 2286 SDValue N0 = Node->getOperand(0); 2287 SDValue N1 = Node->getOperand(1); 2288 2289 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2290 // use a smaller encoding. 2291 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2292 HasNoSignedComparisonUses(Node)) 2293 // Look past the truncate if CMP is the only use of it. 2294 N0 = N0.getOperand(0); 2295 if ((N0.getNode()->getOpcode() == ISD::AND || 2296 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2297 N0.getNode()->hasOneUse() && 2298 N0.getValueType() != MVT::i8 && 2299 X86::isZeroNode(N1)) { 2300 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2301 if (!C) break; 2302 2303 // For example, convert "testl %eax, $8" to "testb %al, $8" 2304 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2305 (!(C->getZExtValue() & 0x80) || 2306 HasNoSignedComparisonUses(Node))) { 2307 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2308 SDValue Reg = N0.getNode()->getOperand(0); 2309 2310 // On x86-32, only the ABCD registers have 8-bit subregisters. 2311 if (!Subtarget->is64Bit()) { 2312 TargetRegisterClass *TRC = 0; 2313 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2314 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2315 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2316 default: llvm_unreachable("Unsupported TEST operand type!"); 2317 } 2318 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2319 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2320 Reg.getValueType(), Reg, RC), 0); 2321 } 2322 2323 // Extract the l-register. 2324 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2325 MVT::i8, Reg); 2326 2327 // Emit a testb. 2328 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm); 2329 } 2330 2331 // For example, "testl %eax, $2048" to "testb %ah, $8". 2332 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2333 (!(C->getZExtValue() & 0x8000) || 2334 HasNoSignedComparisonUses(Node))) { 2335 // Shift the immediate right by 8 bits. 2336 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2337 MVT::i8); 2338 SDValue Reg = N0.getNode()->getOperand(0); 2339 2340 // Put the value in an ABCD register. 2341 TargetRegisterClass *TRC = 0; 2342 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2343 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2344 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2345 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2346 default: llvm_unreachable("Unsupported TEST operand type!"); 2347 } 2348 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2349 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2350 Reg.getValueType(), Reg, RC), 0); 2351 2352 // Extract the h-register. 2353 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2354 MVT::i8, Reg); 2355 2356 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2357 // target GR8_NOREX registers, so make sure the register class is 2358 // forced. 2359 return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32, 2360 Subreg, ShiftedImm); 2361 } 2362 2363 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2364 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2365 N0.getValueType() != MVT::i16 && 2366 (!(C->getZExtValue() & 0x8000) || 2367 HasNoSignedComparisonUses(Node))) { 2368 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2369 SDValue Reg = N0.getNode()->getOperand(0); 2370 2371 // Extract the 16-bit subregister. 2372 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2373 MVT::i16, Reg); 2374 2375 // Emit a testw. 2376 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm); 2377 } 2378 2379 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2380 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2381 N0.getValueType() == MVT::i64 && 2382 (!(C->getZExtValue() & 0x80000000) || 2383 HasNoSignedComparisonUses(Node))) { 2384 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2385 SDValue Reg = N0.getNode()->getOperand(0); 2386 2387 // Extract the 32-bit subregister. 2388 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2389 MVT::i32, Reg); 2390 2391 // Emit a testl. 2392 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm); 2393 } 2394 } 2395 break; 2396 } 2397 case ISD::STORE: { 2398 // The DEC64m tablegen pattern is currently not able to match the case where 2399 // the EFLAGS on the original DEC are used. 2400 // we'll need to improve tablegen to allow flags to be transferred from a 2401 // node in the pattern to the result node. probably with a new keyword 2402 // for example, we have this 2403 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2404 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2405 // (implicit EFLAGS)]>; 2406 // but maybe need something like this 2407 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2408 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2409 // (transferrable EFLAGS)]>; 2410 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2411 SDValue Chain = StoreNode->getOperand(0); 2412 SDValue StoredVal = StoreNode->getOperand(1); 2413 SDValue Address = StoreNode->getOperand(2); 2414 SDValue Undef = StoreNode->getOperand(3); 2415 2416 if (StoreNode->getMemOperand()->getSize() != 8 || 2417 Undef->getOpcode() != ISD::UNDEF || 2418 Chain->getOpcode() != ISD::LOAD || 2419 StoredVal->getOpcode() != X86ISD::DEC || 2420 StoredVal.getResNo() != 0 || 2421 StoredVal->getOperand(0).getNode() != Chain.getNode()) 2422 break; 2423 2424 //OPC_CheckPredicate, 1, // Predicate_nontemporalstore 2425 if (StoreNode->isNonTemporal()) 2426 break; 2427 2428 LoadSDNode *LoadNode = cast<LoadSDNode>(Chain.getNode()); 2429 if (LoadNode->getOperand(1) != Address || 2430 LoadNode->getOperand(2) != Undef) 2431 break; 2432 2433 if (!ISD::isNormalLoad(LoadNode)) 2434 break; 2435 2436 if (!ISD::isNormalStore(StoreNode)) 2437 break; 2438 2439 // check load chain has only one use (from the store) 2440 if (!Chain.hasOneUse()) 2441 break; 2442 2443 // Merge the input chains if they are not intra-pattern references. 2444 SDValue InputChain = LoadNode->getOperand(0); 2445 2446 SDValue Base, Scale, Index, Disp, Segment; 2447 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2448 Base, Scale, Index, Disp, Segment)) 2449 break; 2450 2451 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2452 MemOp[0] = StoreNode->getMemOperand(); 2453 MemOp[1] = LoadNode->getMemOperand(); 2454 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2455 MachineSDNode *Result = CurDAG->getMachineNode(X86::DEC64m, 2456 Node->getDebugLoc(), 2457 MVT::i32, MVT::Other, Ops, 2458 array_lengthof(Ops)); 2459 Result->setMemRefs(MemOp, MemOp + 2); 2460 2461 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2462 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2463 2464 return Result; 2465 } 2466 } 2467 2468 SDNode *ResNode = SelectCode(Node); 2469 2470 DEBUG(dbgs() << "=> "; 2471 if (ResNode == NULL || ResNode == Node) 2472 Node->dump(CurDAG); 2473 else 2474 ResNode->dump(CurDAG); 2475 dbgs() << '\n'); 2476 2477 return ResNode; 2478} 2479 2480bool X86DAGToDAGISel:: 2481SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2482 std::vector<SDValue> &OutOps) { 2483 SDValue Op0, Op1, Op2, Op3, Op4; 2484 switch (ConstraintCode) { 2485 case 'o': // offsetable ?? 2486 case 'v': // not offsetable ?? 2487 default: return true; 2488 case 'm': // memory 2489 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) 2490 return true; 2491 break; 2492 } 2493 2494 OutOps.push_back(Op0); 2495 OutOps.push_back(Op1); 2496 OutOps.push_back(Op2); 2497 OutOps.push_back(Op3); 2498 OutOps.push_back(Op4); 2499 return false; 2500} 2501 2502/// createX86ISelDag - This pass converts a legalized DAG into a 2503/// X86-specific DAG, ready for instruction scheduling. 2504/// 2505FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2506 llvm::CodeGenOpt::Level OptLevel) { 2507 return new X86DAGToDAGISel(TM, OptLevel); 2508} 2509