X86ISelDAGToDAG.cpp revision 2865422a4d03f3676545825142f8a262084c5f47
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Instructions.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/FunctionLoweringInfo.h" 26#include "llvm/CodeGen/MachineConstantPool.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFrameInfo.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineRegisterInfo.h" 31#include "llvm/CodeGen/SelectionDAGISel.h" 32#include "llvm/Target/TargetMachine.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CFG.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/raw_ostream.h" 39#include "llvm/ADT/Statistic.h" 40using namespace llvm; 41 42STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 43 44//===----------------------------------------------------------------------===// 45// Pattern Matcher Implementation 46//===----------------------------------------------------------------------===// 47 48namespace { 49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 50 /// SDValue's instead of register numbers for the leaves of the matched 51 /// tree. 52 struct X86ISelAddressMode { 53 enum { 54 RegBase, 55 FrameIndexBase 56 } BaseType; 57 58 // This is really a union, discriminated by BaseType! 59 SDValue Base_Reg; 60 int Base_FrameIndex; 61 62 unsigned Scale; 63 SDValue IndexReg; 64 int32_t Disp; 65 SDValue Segment; 66 const GlobalValue *GV; 67 const Constant *CP; 68 const BlockAddress *BlockAddr; 69 const char *ES; 70 int JT; 71 unsigned Align; // CP alignment. 72 unsigned char SymbolFlags; // X86II::MO_* 73 74 X86ISelAddressMode() 75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 76 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 77 SymbolFlags(X86II::MO_NO_FLAG) { 78 } 79 80 bool hasSymbolicDisplacement() const { 81 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 82 } 83 84 bool hasBaseOrIndexReg() const { 85 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; 86 } 87 88 /// isRIPRelative - Return true if this addressing mode is already RIP 89 /// relative. 90 bool isRIPRelative() const { 91 if (BaseType != RegBase) return false; 92 if (RegisterSDNode *RegNode = 93 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 94 return RegNode->getReg() == X86::RIP; 95 return false; 96 } 97 98 void setBaseReg(SDValue Reg) { 99 BaseType = RegBase; 100 Base_Reg = Reg; 101 } 102 103 void dump() { 104 dbgs() << "X86ISelAddressMode " << this << '\n'; 105 dbgs() << "Base_Reg "; 106 if (Base_Reg.getNode() != 0) 107 Base_Reg.getNode()->dump(); 108 else 109 dbgs() << "nul"; 110 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 111 << " Scale" << Scale << '\n' 112 << "IndexReg "; 113 if (IndexReg.getNode() != 0) 114 IndexReg.getNode()->dump(); 115 else 116 dbgs() << "nul"; 117 dbgs() << " Disp " << Disp << '\n' 118 << "GV "; 119 if (GV) 120 GV->dump(); 121 else 122 dbgs() << "nul"; 123 dbgs() << " CP "; 124 if (CP) 125 CP->dump(); 126 else 127 dbgs() << "nul"; 128 dbgs() << '\n' 129 << "ES "; 130 if (ES) 131 dbgs() << ES; 132 else 133 dbgs() << "nul"; 134 dbgs() << " JT" << JT << " Align" << Align << '\n'; 135 } 136 }; 137} 138 139namespace { 140 //===--------------------------------------------------------------------===// 141 /// ISel - X86 specific code to select X86 machine instructions for 142 /// SelectionDAG operations. 143 /// 144 class X86DAGToDAGISel : public SelectionDAGISel { 145 /// X86Lowering - This object fully describes how to lower LLVM code to an 146 /// X86-specific SelectionDAG. 147 const X86TargetLowering &X86Lowering; 148 149 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 150 /// make the right decision when generating code for different targets. 151 const X86Subtarget *Subtarget; 152 153 /// OptForSize - If true, selector should try to optimize for code size 154 /// instead of performance. 155 bool OptForSize; 156 157 public: 158 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 159 : SelectionDAGISel(tm, OptLevel), 160 X86Lowering(*tm.getTargetLowering()), 161 Subtarget(&tm.getSubtarget<X86Subtarget>()), 162 OptForSize(false) {} 163 164 virtual const char *getPassName() const { 165 return "X86 DAG->DAG Instruction Selection"; 166 } 167 168 virtual void EmitFunctionEntryCode(); 169 170 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 171 172 virtual void PreprocessISelDAG(); 173 174 inline bool immSext8(SDNode *N) const { 175 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 176 } 177 178 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 179 // sign extended field. 180 inline bool i64immSExt32(SDNode *N) const { 181 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 182 return (int64_t)v == (int32_t)v; 183 } 184 185// Include the pieces autogenerated from the target description. 186#include "X86GenDAGISel.inc" 187 188 private: 189 SDNode *Select(SDNode *N); 190 SDNode *SelectGather(SDNode *N, unsigned Opc); 191 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 192 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT); 193 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT); 194 195 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 196 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 197 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 198 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 199 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 200 unsigned Depth); 201 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 202 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 203 SDValue &Scale, SDValue &Index, SDValue &Disp, 204 SDValue &Segment); 205 bool SelectLEAAddr(SDValue N, SDValue &Base, 206 SDValue &Scale, SDValue &Index, SDValue &Disp, 207 SDValue &Segment); 208 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 209 SDValue &Scale, SDValue &Index, SDValue &Disp, 210 SDValue &Segment); 211 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 212 SDValue &Base, SDValue &Scale, 213 SDValue &Index, SDValue &Disp, 214 SDValue &Segment, 215 SDValue &NodeWithChain); 216 217 bool TryFoldLoad(SDNode *P, SDValue N, 218 SDValue &Base, SDValue &Scale, 219 SDValue &Index, SDValue &Disp, 220 SDValue &Segment); 221 222 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 223 /// inline asm expressions. 224 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 225 char ConstraintCode, 226 std::vector<SDValue> &OutOps); 227 228 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 229 230 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 231 SDValue &Scale, SDValue &Index, 232 SDValue &Disp, SDValue &Segment) { 233 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 234 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) : 235 AM.Base_Reg; 236 Scale = getI8Imm(AM.Scale); 237 Index = AM.IndexReg; 238 // These are 32-bit even in 64-bit mode since RIP relative offset 239 // is 32-bit. 240 if (AM.GV) 241 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(), 242 MVT::i32, AM.Disp, 243 AM.SymbolFlags); 244 else if (AM.CP) 245 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 246 AM.Align, AM.Disp, AM.SymbolFlags); 247 else if (AM.ES) 248 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 249 else if (AM.JT != -1) 250 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 251 else if (AM.BlockAddr) 252 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32, 253 true, AM.SymbolFlags); 254 else 255 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 256 257 if (AM.Segment.getNode()) 258 Segment = AM.Segment; 259 else 260 Segment = CurDAG->getRegister(0, MVT::i32); 261 } 262 263 /// getI8Imm - Return a target constant with the specified value, of type 264 /// i8. 265 inline SDValue getI8Imm(unsigned Imm) { 266 return CurDAG->getTargetConstant(Imm, MVT::i8); 267 } 268 269 /// getI32Imm - Return a target constant with the specified value, of type 270 /// i32. 271 inline SDValue getI32Imm(unsigned Imm) { 272 return CurDAG->getTargetConstant(Imm, MVT::i32); 273 } 274 275 /// getGlobalBaseReg - Return an SDNode that returns the value of 276 /// the global base register. Output instructions required to 277 /// initialize the global base register, if necessary. 278 /// 279 SDNode *getGlobalBaseReg(); 280 281 /// getTargetMachine - Return a reference to the TargetMachine, casted 282 /// to the target-specific type. 283 const X86TargetMachine &getTargetMachine() { 284 return static_cast<const X86TargetMachine &>(TM); 285 } 286 287 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 288 /// to the target-specific type. 289 const X86InstrInfo *getInstrInfo() { 290 return getTargetMachine().getInstrInfo(); 291 } 292 }; 293} 294 295 296bool 297X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 298 if (OptLevel == CodeGenOpt::None) return false; 299 300 if (!N.hasOneUse()) 301 return false; 302 303 if (N.getOpcode() != ISD::LOAD) 304 return true; 305 306 // If N is a load, do additional profitability checks. 307 if (U == Root) { 308 switch (U->getOpcode()) { 309 default: break; 310 case X86ISD::ADD: 311 case X86ISD::SUB: 312 case X86ISD::AND: 313 case X86ISD::XOR: 314 case X86ISD::OR: 315 case ISD::ADD: 316 case ISD::ADDC: 317 case ISD::ADDE: 318 case ISD::AND: 319 case ISD::OR: 320 case ISD::XOR: { 321 SDValue Op1 = U->getOperand(1); 322 323 // If the other operand is a 8-bit immediate we should fold the immediate 324 // instead. This reduces code size. 325 // e.g. 326 // movl 4(%esp), %eax 327 // addl $4, %eax 328 // vs. 329 // movl $4, %eax 330 // addl 4(%esp), %eax 331 // The former is 2 bytes shorter. In case where the increment is 1, then 332 // the saving can be 4 bytes (by using incl %eax). 333 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 334 if (Imm->getAPIntValue().isSignedIntN(8)) 335 return false; 336 337 // If the other operand is a TLS address, we should fold it instead. 338 // This produces 339 // movl %gs:0, %eax 340 // leal i@NTPOFF(%eax), %eax 341 // instead of 342 // movl $i@NTPOFF, %eax 343 // addl %gs:0, %eax 344 // if the block also has an access to a second TLS address this will save 345 // a load. 346 // FIXME: This is probably also true for non TLS addresses. 347 if (Op1.getOpcode() == X86ISD::Wrapper) { 348 SDValue Val = Op1.getOperand(0); 349 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 350 return false; 351 } 352 } 353 } 354 } 355 356 return true; 357} 358 359/// MoveBelowCallOrigChain - Replace the original chain operand of the call with 360/// load's chain operand and move load below the call's chain operand. 361static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 362 SDValue Call, SDValue OrigChain) { 363 SmallVector<SDValue, 8> Ops; 364 SDValue Chain = OrigChain.getOperand(0); 365 if (Chain.getNode() == Load.getNode()) 366 Ops.push_back(Load.getOperand(0)); 367 else { 368 assert(Chain.getOpcode() == ISD::TokenFactor && 369 "Unexpected chain operand"); 370 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 371 if (Chain.getOperand(i).getNode() == Load.getNode()) 372 Ops.push_back(Load.getOperand(0)); 373 else 374 Ops.push_back(Chain.getOperand(i)); 375 SDValue NewChain = 376 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 377 MVT::Other, &Ops[0], Ops.size()); 378 Ops.clear(); 379 Ops.push_back(NewChain); 380 } 381 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 382 Ops.push_back(OrigChain.getOperand(i)); 383 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); 384 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 385 Load.getOperand(1), Load.getOperand(2)); 386 Ops.clear(); 387 Ops.push_back(SDValue(Load.getNode(), 1)); 388 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i) 389 Ops.push_back(Call.getOperand(i)); 390 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size()); 391} 392 393/// isCalleeLoad - Return true if call address is a load and it can be 394/// moved below CALLSEQ_START and the chains leading up to the call. 395/// Return the CALLSEQ_START by reference as a second output. 396/// In the case of a tail call, there isn't a callseq node between the call 397/// chain and the load. 398static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 399 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 400 return false; 401 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 402 if (!LD || 403 LD->isVolatile() || 404 LD->getAddressingMode() != ISD::UNINDEXED || 405 LD->getExtensionType() != ISD::NON_EXTLOAD) 406 return false; 407 408 // Now let's find the callseq_start. 409 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 410 if (!Chain.hasOneUse()) 411 return false; 412 Chain = Chain.getOperand(0); 413 } 414 415 if (!Chain.getNumOperands()) 416 return false; 417 if (Chain.getOperand(0).getNode() == Callee.getNode()) 418 return true; 419 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 420 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 421 Callee.getValue(1).hasOneUse()) 422 return true; 423 return false; 424} 425 426void X86DAGToDAGISel::PreprocessISelDAG() { 427 // OptForSize is used in pattern predicates that isel is matching. 428 OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize); 429 430 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 431 E = CurDAG->allnodes_end(); I != E; ) { 432 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 433 434 if (OptLevel != CodeGenOpt::None && 435 (N->getOpcode() == X86ISD::CALL || 436 N->getOpcode() == X86ISD::TC_RETURN)) { 437 /// Also try moving call address load from outside callseq_start to just 438 /// before the call to allow it to be folded. 439 /// 440 /// [Load chain] 441 /// ^ 442 /// | 443 /// [Load] 444 /// ^ ^ 445 /// | | 446 /// / \-- 447 /// / | 448 ///[CALLSEQ_START] | 449 /// ^ | 450 /// | | 451 /// [LOAD/C2Reg] | 452 /// | | 453 /// \ / 454 /// \ / 455 /// [CALL] 456 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 457 SDValue Chain = N->getOperand(0); 458 SDValue Load = N->getOperand(1); 459 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 460 continue; 461 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 462 ++NumLoadMoved; 463 continue; 464 } 465 466 // Lower fpround and fpextend nodes that target the FP stack to be store and 467 // load to the stack. This is a gross hack. We would like to simply mark 468 // these as being illegal, but when we do that, legalize produces these when 469 // it expands calls, then expands these in the same legalize pass. We would 470 // like dag combine to be able to hack on these between the call expansion 471 // and the node legalization. As such this pass basically does "really 472 // late" legalization of these inline with the X86 isel pass. 473 // FIXME: This should only happen when not compiled with -O0. 474 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 475 continue; 476 477 EVT SrcVT = N->getOperand(0).getValueType(); 478 EVT DstVT = N->getValueType(0); 479 480 // If any of the sources are vectors, no fp stack involved. 481 if (SrcVT.isVector() || DstVT.isVector()) 482 continue; 483 484 // If the source and destination are SSE registers, then this is a legal 485 // conversion that should not be lowered. 486 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 487 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 488 if (SrcIsSSE && DstIsSSE) 489 continue; 490 491 if (!SrcIsSSE && !DstIsSSE) { 492 // If this is an FPStack extension, it is a noop. 493 if (N->getOpcode() == ISD::FP_EXTEND) 494 continue; 495 // If this is a value-preserving FPStack truncation, it is a noop. 496 if (N->getConstantOperandVal(1)) 497 continue; 498 } 499 500 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 501 // FPStack has extload and truncstore. SSE can fold direct loads into other 502 // operations. Based on this, decide what we want to do. 503 EVT MemVT; 504 if (N->getOpcode() == ISD::FP_ROUND) 505 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 506 else 507 MemVT = SrcIsSSE ? SrcVT : DstVT; 508 509 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 510 DebugLoc dl = N->getDebugLoc(); 511 512 // FIXME: optimize the case where the src/dest is a load or store? 513 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 514 N->getOperand(0), 515 MemTmp, MachinePointerInfo(), MemVT, 516 false, false, 0); 517 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 518 MachinePointerInfo(), 519 MemVT, false, false, 0); 520 521 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 522 // extload we created. This will cause general havok on the dag because 523 // anything below the conversion could be folded into other existing nodes. 524 // To avoid invalidating 'I', back it up to the convert node. 525 --I; 526 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 527 528 // Now that we did that, the node is dead. Increment the iterator to the 529 // next node to process, then delete N. 530 ++I; 531 CurDAG->DeleteNode(N); 532 } 533} 534 535 536/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 537/// the main function. 538void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 539 MachineFrameInfo *MFI) { 540 const TargetInstrInfo *TII = TM.getInstrInfo(); 541 if (Subtarget->isTargetCygMing()) { 542 unsigned CallOp = 543 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 544 BuildMI(BB, DebugLoc(), 545 TII->get(CallOp)).addExternalSymbol("__main"); 546 } 547} 548 549void X86DAGToDAGISel::EmitFunctionEntryCode() { 550 // If this is main, emit special code for main. 551 if (const Function *Fn = MF->getFunction()) 552 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 553 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 554} 555 556static bool isDispSafeForFrameIndex(int64_t Val) { 557 // On 64-bit platforms, we can run into an issue where a frame index 558 // includes a displacement that, when added to the explicit displacement, 559 // will overflow the displacement field. Assuming that the frame index 560 // displacement fits into a 31-bit integer (which is only slightly more 561 // aggressive than the current fundamental assumption that it fits into 562 // a 32-bit integer), a 31-bit disp should always be safe. 563 return isInt<31>(Val); 564} 565 566bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 567 X86ISelAddressMode &AM) { 568 int64_t Val = AM.Disp + Offset; 569 CodeModel::Model M = TM.getCodeModel(); 570 if (Subtarget->is64Bit()) { 571 if (!X86::isOffsetSuitableForCodeModel(Val, M, 572 AM.hasSymbolicDisplacement())) 573 return true; 574 // In addition to the checks required for a register base, check that 575 // we do not try to use an unsafe Disp with a frame index. 576 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 577 !isDispSafeForFrameIndex(Val)) 578 return true; 579 } 580 AM.Disp = Val; 581 return false; 582 583} 584 585bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 586 SDValue Address = N->getOperand(1); 587 588 // load gs:0 -> GS segment register. 589 // load fs:0 -> FS segment register. 590 // 591 // This optimization is valid because the GNU TLS model defines that 592 // gs:0 (or fs:0 on X86-64) contains its own address. 593 // For more information see http://people.redhat.com/drepper/tls.pdf 594 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 595 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && 596 Subtarget->isTargetLinux()) 597 switch (N->getPointerInfo().getAddrSpace()) { 598 case 256: 599 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 600 return false; 601 case 257: 602 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 603 return false; 604 } 605 606 return true; 607} 608 609/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 610/// into an addressing mode. These wrap things that will resolve down into a 611/// symbol reference. If no match is possible, this returns true, otherwise it 612/// returns false. 613bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 614 // If the addressing mode already has a symbol as the displacement, we can 615 // never match another symbol. 616 if (AM.hasSymbolicDisplacement()) 617 return true; 618 619 SDValue N0 = N.getOperand(0); 620 CodeModel::Model M = TM.getCodeModel(); 621 622 // Handle X86-64 rip-relative addresses. We check this before checking direct 623 // folding because RIP is preferable to non-RIP accesses. 624 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && 625 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 626 // they cannot be folded into immediate fields. 627 // FIXME: This can be improved for kernel and other models? 628 (M == CodeModel::Small || M == CodeModel::Kernel)) { 629 // Base and index reg must be 0 in order to use %rip as base. 630 if (AM.hasBaseOrIndexReg()) 631 return true; 632 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 633 X86ISelAddressMode Backup = AM; 634 AM.GV = G->getGlobal(); 635 AM.SymbolFlags = G->getTargetFlags(); 636 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 637 AM = Backup; 638 return true; 639 } 640 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 641 X86ISelAddressMode Backup = AM; 642 AM.CP = CP->getConstVal(); 643 AM.Align = CP->getAlignment(); 644 AM.SymbolFlags = CP->getTargetFlags(); 645 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 646 AM = Backup; 647 return true; 648 } 649 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 650 AM.ES = S->getSymbol(); 651 AM.SymbolFlags = S->getTargetFlags(); 652 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 653 AM.JT = J->getIndex(); 654 AM.SymbolFlags = J->getTargetFlags(); 655 } else { 656 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress(); 657 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags(); 658 } 659 660 if (N.getOpcode() == X86ISD::WrapperRIP) 661 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 662 return false; 663 } 664 665 // Handle the case when globals fit in our immediate field: This is true for 666 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit 667 // mode, this only applies to a non-RIP-relative computation. 668 if (!Subtarget->is64Bit() || 669 M == CodeModel::Small || M == CodeModel::Kernel) { 670 assert(N.getOpcode() != X86ISD::WrapperRIP && 671 "RIP-relative addressing already handled"); 672 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 673 AM.GV = G->getGlobal(); 674 AM.Disp += G->getOffset(); 675 AM.SymbolFlags = G->getTargetFlags(); 676 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 677 AM.CP = CP->getConstVal(); 678 AM.Align = CP->getAlignment(); 679 AM.Disp += CP->getOffset(); 680 AM.SymbolFlags = CP->getTargetFlags(); 681 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 682 AM.ES = S->getSymbol(); 683 AM.SymbolFlags = S->getTargetFlags(); 684 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 685 AM.JT = J->getIndex(); 686 AM.SymbolFlags = J->getTargetFlags(); 687 } else { 688 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress(); 689 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags(); 690 } 691 return false; 692 } 693 694 return true; 695} 696 697/// MatchAddress - Add the specified node to the specified addressing mode, 698/// returning true if it cannot be done. This just pattern matches for the 699/// addressing mode. 700bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 701 if (MatchAddressRecursively(N, AM, 0)) 702 return true; 703 704 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 705 // a smaller encoding and avoids a scaled-index. 706 if (AM.Scale == 2 && 707 AM.BaseType == X86ISelAddressMode::RegBase && 708 AM.Base_Reg.getNode() == 0) { 709 AM.Base_Reg = AM.IndexReg; 710 AM.Scale = 1; 711 } 712 713 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 714 // because it has a smaller encoding. 715 // TODO: Which other code models can use this? 716 if (TM.getCodeModel() == CodeModel::Small && 717 Subtarget->is64Bit() && 718 AM.Scale == 1 && 719 AM.BaseType == X86ISelAddressMode::RegBase && 720 AM.Base_Reg.getNode() == 0 && 721 AM.IndexReg.getNode() == 0 && 722 AM.SymbolFlags == X86II::MO_NO_FLAG && 723 AM.hasSymbolicDisplacement()) 724 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 725 726 return false; 727} 728 729// Insert a node into the DAG at least before the Pos node's position. This 730// will reposition the node as needed, and will assign it a node ID that is <= 731// the Pos node's ID. Note that this does *not* preserve the uniqueness of node 732// IDs! The selection DAG must no longer depend on their uniqueness when this 733// is used. 734static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 735 if (N.getNode()->getNodeId() == -1 || 736 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { 737 DAG.RepositionNode(Pos.getNode(), N.getNode()); 738 N.getNode()->setNodeId(Pos.getNode()->getNodeId()); 739 } 740} 741 742// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This 743// allows us to convert the shift and and into an h-register extract and 744// a scaled index. Returns false if the simplification is performed. 745static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 746 uint64_t Mask, 747 SDValue Shift, SDValue X, 748 X86ISelAddressMode &AM) { 749 if (Shift.getOpcode() != ISD::SRL || 750 !isa<ConstantSDNode>(Shift.getOperand(1)) || 751 !Shift.hasOneUse()) 752 return true; 753 754 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 755 if (ScaleLog <= 0 || ScaleLog >= 4 || 756 Mask != (0xffu << ScaleLog)) 757 return true; 758 759 EVT VT = N.getValueType(); 760 DebugLoc DL = N.getDebugLoc(); 761 SDValue Eight = DAG.getConstant(8, MVT::i8); 762 SDValue NewMask = DAG.getConstant(0xff, VT); 763 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 764 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 765 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 766 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 767 768 // Insert the new nodes into the topological ordering. We must do this in 769 // a valid topological ordering as nothing is going to go back and re-sort 770 // these nodes. We continually insert before 'N' in sequence as this is 771 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 772 // hierarchy left to express. 773 InsertDAGNode(DAG, N, Eight); 774 InsertDAGNode(DAG, N, Srl); 775 InsertDAGNode(DAG, N, NewMask); 776 InsertDAGNode(DAG, N, And); 777 InsertDAGNode(DAG, N, ShlCount); 778 InsertDAGNode(DAG, N, Shl); 779 DAG.ReplaceAllUsesWith(N, Shl); 780 AM.IndexReg = And; 781 AM.Scale = (1 << ScaleLog); 782 return false; 783} 784 785// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 786// allows us to fold the shift into this addressing mode. Returns false if the 787// transform succeeded. 788static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 789 uint64_t Mask, 790 SDValue Shift, SDValue X, 791 X86ISelAddressMode &AM) { 792 if (Shift.getOpcode() != ISD::SHL || 793 !isa<ConstantSDNode>(Shift.getOperand(1))) 794 return true; 795 796 // Not likely to be profitable if either the AND or SHIFT node has more 797 // than one use (unless all uses are for address computation). Besides, 798 // isel mechanism requires their node ids to be reused. 799 if (!N.hasOneUse() || !Shift.hasOneUse()) 800 return true; 801 802 // Verify that the shift amount is something we can fold. 803 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 804 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 805 return true; 806 807 EVT VT = N.getValueType(); 808 DebugLoc DL = N.getDebugLoc(); 809 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 810 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 811 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 812 813 // Insert the new nodes into the topological ordering. We must do this in 814 // a valid topological ordering as nothing is going to go back and re-sort 815 // these nodes. We continually insert before 'N' in sequence as this is 816 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 817 // hierarchy left to express. 818 InsertDAGNode(DAG, N, NewMask); 819 InsertDAGNode(DAG, N, NewAnd); 820 InsertDAGNode(DAG, N, NewShift); 821 DAG.ReplaceAllUsesWith(N, NewShift); 822 823 AM.Scale = 1 << ShiftAmt; 824 AM.IndexReg = NewAnd; 825 return false; 826} 827 828// Implement some heroics to detect shifts of masked values where the mask can 829// be replaced by extending the shift and undoing that in the addressing mode 830// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 831// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 832// the addressing mode. This results in code such as: 833// 834// int f(short *y, int *lookup_table) { 835// ... 836// return *y + lookup_table[*y >> 11]; 837// } 838// 839// Turning into: 840// movzwl (%rdi), %eax 841// movl %eax, %ecx 842// shrl $11, %ecx 843// addl (%rsi,%rcx,4), %eax 844// 845// Instead of: 846// movzwl (%rdi), %eax 847// movl %eax, %ecx 848// shrl $9, %ecx 849// andl $124, %rcx 850// addl (%rsi,%rcx), %eax 851// 852// Note that this function assumes the mask is provided as a mask *after* the 853// value is shifted. The input chain may or may not match that, but computing 854// such a mask is trivial. 855static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 856 uint64_t Mask, 857 SDValue Shift, SDValue X, 858 X86ISelAddressMode &AM) { 859 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 860 !isa<ConstantSDNode>(Shift.getOperand(1))) 861 return true; 862 863 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 864 unsigned MaskLZ = CountLeadingZeros_64(Mask); 865 unsigned MaskTZ = CountTrailingZeros_64(Mask); 866 867 // The amount of shift we're trying to fit into the addressing mode is taken 868 // from the trailing zeros of the mask. 869 unsigned AMShiftAmt = MaskTZ; 870 871 // There is nothing we can do here unless the mask is removing some bits. 872 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 873 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 874 875 // We also need to ensure that mask is a continuous run of bits. 876 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 877 878 // Scale the leading zero count down based on the actual size of the value. 879 // Also scale it down based on the size of the shift. 880 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt; 881 882 // The final check is to ensure that any masked out high bits of X are 883 // already known to be zero. Otherwise, the mask has a semantic impact 884 // other than masking out a couple of low bits. Unfortunately, because of 885 // the mask, zero extensions will be removed from operands in some cases. 886 // This code works extra hard to look through extensions because we can 887 // replace them with zero extensions cheaply if necessary. 888 bool ReplacingAnyExtend = false; 889 if (X.getOpcode() == ISD::ANY_EXTEND) { 890 unsigned ExtendBits = 891 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits(); 892 // Assume that we'll replace the any-extend with a zero-extend, and 893 // narrow the search to the extended value. 894 X = X.getOperand(0); 895 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 896 ReplacingAnyExtend = true; 897 } 898 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(), 899 MaskLZ); 900 APInt KnownZero, KnownOne; 901 DAG.ComputeMaskedBits(X, KnownZero, KnownOne); 902 if (MaskedHighBits != KnownZero) return true; 903 904 // We've identified a pattern that can be transformed into a single shift 905 // and an addressing mode. Make it so. 906 EVT VT = N.getValueType(); 907 if (ReplacingAnyExtend) { 908 assert(X.getValueType() != VT); 909 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 910 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X); 911 InsertDAGNode(DAG, N, NewX); 912 X = NewX; 913 } 914 DebugLoc DL = N.getDebugLoc(); 915 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 916 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 917 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 918 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 919 920 // Insert the new nodes into the topological ordering. We must do this in 921 // a valid topological ordering as nothing is going to go back and re-sort 922 // these nodes. We continually insert before 'N' in sequence as this is 923 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 924 // hierarchy left to express. 925 InsertDAGNode(DAG, N, NewSRLAmt); 926 InsertDAGNode(DAG, N, NewSRL); 927 InsertDAGNode(DAG, N, NewSHLAmt); 928 InsertDAGNode(DAG, N, NewSHL); 929 DAG.ReplaceAllUsesWith(N, NewSHL); 930 931 AM.Scale = 1 << AMShiftAmt; 932 AM.IndexReg = NewSRL; 933 return false; 934} 935 936bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 937 unsigned Depth) { 938 DebugLoc dl = N.getDebugLoc(); 939 DEBUG({ 940 dbgs() << "MatchAddress: "; 941 AM.dump(); 942 }); 943 // Limit recursion. 944 if (Depth > 5) 945 return MatchAddressBase(N, AM); 946 947 // If this is already a %rip relative address, we can only merge immediates 948 // into it. Instead of handling this in every case, we handle it here. 949 // RIP relative addressing: %rip + 32-bit displacement! 950 if (AM.isRIPRelative()) { 951 // FIXME: JumpTable and ExternalSymbol address currently don't like 952 // displacements. It isn't very important, but this should be fixed for 953 // consistency. 954 if (!AM.ES && AM.JT != -1) return true; 955 956 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 957 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 958 return false; 959 return true; 960 } 961 962 switch (N.getOpcode()) { 963 default: break; 964 case ISD::Constant: { 965 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 966 if (!FoldOffsetIntoAddress(Val, AM)) 967 return false; 968 break; 969 } 970 971 case X86ISD::Wrapper: 972 case X86ISD::WrapperRIP: 973 if (!MatchWrapper(N, AM)) 974 return false; 975 break; 976 977 case ISD::LOAD: 978 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 979 return false; 980 break; 981 982 case ISD::FrameIndex: 983 if (AM.BaseType == X86ISelAddressMode::RegBase && 984 AM.Base_Reg.getNode() == 0 && 985 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 986 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 987 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 988 return false; 989 } 990 break; 991 992 case ISD::SHL: 993 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 994 break; 995 996 if (ConstantSDNode 997 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 998 unsigned Val = CN->getZExtValue(); 999 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1000 // that the base operand remains free for further matching. If 1001 // the base doesn't end up getting used, a post-processing step 1002 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1003 if (Val == 1 || Val == 2 || Val == 3) { 1004 AM.Scale = 1 << Val; 1005 SDValue ShVal = N.getNode()->getOperand(0); 1006 1007 // Okay, we know that we have a scale by now. However, if the scaled 1008 // value is an add of something and a constant, we can fold the 1009 // constant into the disp field here. 1010 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1011 AM.IndexReg = ShVal.getNode()->getOperand(0); 1012 ConstantSDNode *AddVal = 1013 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1014 uint64_t Disp = AddVal->getSExtValue() << Val; 1015 if (!FoldOffsetIntoAddress(Disp, AM)) 1016 return false; 1017 } 1018 1019 AM.IndexReg = ShVal; 1020 return false; 1021 } 1022 break; 1023 } 1024 1025 case ISD::SRL: { 1026 // Scale must not be used already. 1027 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1028 1029 SDValue And = N.getOperand(0); 1030 if (And.getOpcode() != ISD::AND) break; 1031 SDValue X = And.getOperand(0); 1032 1033 // We only handle up to 64-bit values here as those are what matter for 1034 // addressing mode optimizations. 1035 if (X.getValueSizeInBits() > 64) break; 1036 1037 // The mask used for the transform is expected to be post-shift, but we 1038 // found the shift first so just apply the shift to the mask before passing 1039 // it down. 1040 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1041 !isa<ConstantSDNode>(And.getOperand(1))) 1042 break; 1043 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1044 1045 // Try to fold the mask and shift into the scale, and return false if we 1046 // succeed. 1047 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1048 return false; 1049 break; 1050 } 1051 1052 case ISD::SMUL_LOHI: 1053 case ISD::UMUL_LOHI: 1054 // A mul_lohi where we need the low part can be folded as a plain multiply. 1055 if (N.getResNo() != 0) break; 1056 // FALL THROUGH 1057 case ISD::MUL: 1058 case X86ISD::MUL_IMM: 1059 // X*[3,5,9] -> X+X*[2,4,8] 1060 if (AM.BaseType == X86ISelAddressMode::RegBase && 1061 AM.Base_Reg.getNode() == 0 && 1062 AM.IndexReg.getNode() == 0) { 1063 if (ConstantSDNode 1064 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1065 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1066 CN->getZExtValue() == 9) { 1067 AM.Scale = unsigned(CN->getZExtValue())-1; 1068 1069 SDValue MulVal = N.getNode()->getOperand(0); 1070 SDValue Reg; 1071 1072 // Okay, we know that we have a scale by now. However, if the scaled 1073 // value is an add of something and a constant, we can fold the 1074 // constant into the disp field here. 1075 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1076 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1077 Reg = MulVal.getNode()->getOperand(0); 1078 ConstantSDNode *AddVal = 1079 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1080 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1081 if (FoldOffsetIntoAddress(Disp, AM)) 1082 Reg = N.getNode()->getOperand(0); 1083 } else { 1084 Reg = N.getNode()->getOperand(0); 1085 } 1086 1087 AM.IndexReg = AM.Base_Reg = Reg; 1088 return false; 1089 } 1090 } 1091 break; 1092 1093 case ISD::SUB: { 1094 // Given A-B, if A can be completely folded into the address and 1095 // the index field with the index field unused, use -B as the index. 1096 // This is a win if a has multiple parts that can be folded into 1097 // the address. Also, this saves a mov if the base register has 1098 // other uses, since it avoids a two-address sub instruction, however 1099 // it costs an additional mov if the index register has other uses. 1100 1101 // Add an artificial use to this node so that we can keep track of 1102 // it if it gets CSE'd with a different node. 1103 HandleSDNode Handle(N); 1104 1105 // Test if the LHS of the sub can be folded. 1106 X86ISelAddressMode Backup = AM; 1107 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1108 AM = Backup; 1109 break; 1110 } 1111 // Test if the index field is free for use. 1112 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1113 AM = Backup; 1114 break; 1115 } 1116 1117 int Cost = 0; 1118 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1119 // If the RHS involves a register with multiple uses, this 1120 // transformation incurs an extra mov, due to the neg instruction 1121 // clobbering its operand. 1122 if (!RHS.getNode()->hasOneUse() || 1123 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1124 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1125 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1126 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1127 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1128 ++Cost; 1129 // If the base is a register with multiple uses, this 1130 // transformation may save a mov. 1131 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1132 AM.Base_Reg.getNode() && 1133 !AM.Base_Reg.getNode()->hasOneUse()) || 1134 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1135 --Cost; 1136 // If the folded LHS was interesting, this transformation saves 1137 // address arithmetic. 1138 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1139 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1140 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1141 --Cost; 1142 // If it doesn't look like it may be an overall win, don't do it. 1143 if (Cost >= 0) { 1144 AM = Backup; 1145 break; 1146 } 1147 1148 // Ok, the transformation is legal and appears profitable. Go for it. 1149 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1150 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1151 AM.IndexReg = Neg; 1152 AM.Scale = 1; 1153 1154 // Insert the new nodes into the topological ordering. 1155 InsertDAGNode(*CurDAG, N, Zero); 1156 InsertDAGNode(*CurDAG, N, Neg); 1157 return false; 1158 } 1159 1160 case ISD::ADD: { 1161 // Add an artificial use to this node so that we can keep track of 1162 // it if it gets CSE'd with a different node. 1163 HandleSDNode Handle(N); 1164 1165 X86ISelAddressMode Backup = AM; 1166 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1167 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1168 return false; 1169 AM = Backup; 1170 1171 // Try again after commuting the operands. 1172 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1173 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1174 return false; 1175 AM = Backup; 1176 1177 // If we couldn't fold both operands into the address at the same time, 1178 // see if we can just put each operand into a register and fold at least 1179 // the add. 1180 if (AM.BaseType == X86ISelAddressMode::RegBase && 1181 !AM.Base_Reg.getNode() && 1182 !AM.IndexReg.getNode()) { 1183 N = Handle.getValue(); 1184 AM.Base_Reg = N.getOperand(0); 1185 AM.IndexReg = N.getOperand(1); 1186 AM.Scale = 1; 1187 return false; 1188 } 1189 N = Handle.getValue(); 1190 break; 1191 } 1192 1193 case ISD::OR: 1194 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1195 if (CurDAG->isBaseWithConstantOffset(N)) { 1196 X86ISelAddressMode Backup = AM; 1197 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1198 1199 // Start with the LHS as an addr mode. 1200 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1201 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1202 return false; 1203 AM = Backup; 1204 } 1205 break; 1206 1207 case ISD::AND: { 1208 // Perform some heroic transforms on an and of a constant-count shift 1209 // with a constant to enable use of the scaled offset field. 1210 1211 // Scale must not be used already. 1212 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1213 1214 SDValue Shift = N.getOperand(0); 1215 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1216 SDValue X = Shift.getOperand(0); 1217 1218 // We only handle up to 64-bit values here as those are what matter for 1219 // addressing mode optimizations. 1220 if (X.getValueSizeInBits() > 64) break; 1221 1222 if (!isa<ConstantSDNode>(N.getOperand(1))) 1223 break; 1224 uint64_t Mask = N.getConstantOperandVal(1); 1225 1226 // Try to fold the mask and shift into an extract and scale. 1227 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1228 return false; 1229 1230 // Try to fold the mask and shift directly into the scale. 1231 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1232 return false; 1233 1234 // Try to swap the mask and shift to place shifts which can be done as 1235 // a scale on the outside of the mask. 1236 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1237 return false; 1238 break; 1239 } 1240 } 1241 1242 return MatchAddressBase(N, AM); 1243} 1244 1245/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1246/// specified addressing mode without any further recursion. 1247bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1248 // Is the base register already occupied? 1249 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1250 // If so, check to see if the scale index register is set. 1251 if (AM.IndexReg.getNode() == 0) { 1252 AM.IndexReg = N; 1253 AM.Scale = 1; 1254 return false; 1255 } 1256 1257 // Otherwise, we cannot select it. 1258 return true; 1259 } 1260 1261 // Default, generate it as a register. 1262 AM.BaseType = X86ISelAddressMode::RegBase; 1263 AM.Base_Reg = N; 1264 return false; 1265} 1266 1267/// SelectAddr - returns true if it is able pattern match an addressing mode. 1268/// It returns the operands which make up the maximal addressing mode it can 1269/// match by reference. 1270/// 1271/// Parent is the parent node of the addr operand that is being matched. It 1272/// is always a load, store, atomic node, or null. It is only null when 1273/// checking memory operands for inline asm nodes. 1274bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1275 SDValue &Scale, SDValue &Index, 1276 SDValue &Disp, SDValue &Segment) { 1277 X86ISelAddressMode AM; 1278 1279 if (Parent && 1280 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1281 // that are not a MemSDNode, and thus don't have proper addrspace info. 1282 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1283 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1284 Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme 1285 unsigned AddrSpace = 1286 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1287 // AddrSpace 256 -> GS, 257 -> FS. 1288 if (AddrSpace == 256) 1289 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1290 if (AddrSpace == 257) 1291 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1292 } 1293 1294 if (MatchAddress(N, AM)) 1295 return false; 1296 1297 EVT VT = N.getValueType(); 1298 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1299 if (!AM.Base_Reg.getNode()) 1300 AM.Base_Reg = CurDAG->getRegister(0, VT); 1301 } 1302 1303 if (!AM.IndexReg.getNode()) 1304 AM.IndexReg = CurDAG->getRegister(0, VT); 1305 1306 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1307 return true; 1308} 1309 1310/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1311/// match a load whose top elements are either undef or zeros. The load flavor 1312/// is derived from the type of N, which is either v4f32 or v2f64. 1313/// 1314/// We also return: 1315/// PatternChainNode: this is the matched node that has a chain input and 1316/// output. 1317bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1318 SDValue N, SDValue &Base, 1319 SDValue &Scale, SDValue &Index, 1320 SDValue &Disp, SDValue &Segment, 1321 SDValue &PatternNodeWithChain) { 1322 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1323 PatternNodeWithChain = N.getOperand(0); 1324 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1325 PatternNodeWithChain.hasOneUse() && 1326 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1327 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1328 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1329 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1330 return false; 1331 return true; 1332 } 1333 } 1334 1335 // Also handle the case where we explicitly require zeros in the top 1336 // elements. This is a vector shuffle from the zero vector. 1337 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1338 // Check to see if the top elements are all zeros (or bitcast of zeros). 1339 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1340 N.getOperand(0).getNode()->hasOneUse() && 1341 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1342 N.getOperand(0).getOperand(0).hasOneUse() && 1343 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1344 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1345 // Okay, this is a zero extending load. Fold it. 1346 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1347 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1348 return false; 1349 PatternNodeWithChain = SDValue(LD, 0); 1350 return true; 1351 } 1352 return false; 1353} 1354 1355 1356/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1357/// mode it matches can be cost effectively emitted as an LEA instruction. 1358bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1359 SDValue &Base, SDValue &Scale, 1360 SDValue &Index, SDValue &Disp, 1361 SDValue &Segment) { 1362 X86ISelAddressMode AM; 1363 1364 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1365 // segments. 1366 SDValue Copy = AM.Segment; 1367 SDValue T = CurDAG->getRegister(0, MVT::i32); 1368 AM.Segment = T; 1369 if (MatchAddress(N, AM)) 1370 return false; 1371 assert (T == AM.Segment); 1372 AM.Segment = Copy; 1373 1374 EVT VT = N.getValueType(); 1375 unsigned Complexity = 0; 1376 if (AM.BaseType == X86ISelAddressMode::RegBase) 1377 if (AM.Base_Reg.getNode()) 1378 Complexity = 1; 1379 else 1380 AM.Base_Reg = CurDAG->getRegister(0, VT); 1381 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1382 Complexity = 4; 1383 1384 if (AM.IndexReg.getNode()) 1385 Complexity++; 1386 else 1387 AM.IndexReg = CurDAG->getRegister(0, VT); 1388 1389 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1390 // a simple shift. 1391 if (AM.Scale > 1) 1392 Complexity++; 1393 1394 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1395 // to a LEA. This is determined with some expermentation but is by no means 1396 // optimal (especially for code size consideration). LEA is nice because of 1397 // its three-address nature. Tweak the cost function again when we can run 1398 // convertToThreeAddress() at register allocation time. 1399 if (AM.hasSymbolicDisplacement()) { 1400 // For X86-64, we should always use lea to materialize RIP relative 1401 // addresses. 1402 if (Subtarget->is64Bit()) 1403 Complexity = 4; 1404 else 1405 Complexity += 2; 1406 } 1407 1408 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1409 Complexity++; 1410 1411 // If it isn't worth using an LEA, reject it. 1412 if (Complexity <= 2) 1413 return false; 1414 1415 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1416 return true; 1417} 1418 1419/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1420bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1421 SDValue &Scale, SDValue &Index, 1422 SDValue &Disp, SDValue &Segment) { 1423 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1424 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1425 1426 X86ISelAddressMode AM; 1427 AM.GV = GA->getGlobal(); 1428 AM.Disp += GA->getOffset(); 1429 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1430 AM.SymbolFlags = GA->getTargetFlags(); 1431 1432 if (N.getValueType() == MVT::i32) { 1433 AM.Scale = 1; 1434 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1435 } else { 1436 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1437 } 1438 1439 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1440 return true; 1441} 1442 1443 1444bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1445 SDValue &Base, SDValue &Scale, 1446 SDValue &Index, SDValue &Disp, 1447 SDValue &Segment) { 1448 if (!ISD::isNON_EXTLoad(N.getNode()) || 1449 !IsProfitableToFold(N, P, P) || 1450 !IsLegalToFold(N, P, P, OptLevel)) 1451 return false; 1452 1453 return SelectAddr(N.getNode(), 1454 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1455} 1456 1457/// getGlobalBaseReg - Return an SDNode that returns the value of 1458/// the global base register. Output instructions required to 1459/// initialize the global base register, if necessary. 1460/// 1461SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1462 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1463 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1464} 1465 1466SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1467 SDValue Chain = Node->getOperand(0); 1468 SDValue In1 = Node->getOperand(1); 1469 SDValue In2L = Node->getOperand(2); 1470 SDValue In2H = Node->getOperand(3); 1471 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1472 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1473 return NULL; 1474 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1475 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1476 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1477 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1478 MVT::i32, MVT::i32, MVT::Other, Ops, 1479 array_lengthof(Ops)); 1480 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1481 return ResNode; 1482} 1483 1484// FIXME: Figure out some way to unify this with the 'or' and other code 1485// below. 1486SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { 1487 if (Node->hasAnyUseOfValue(0)) 1488 return 0; 1489 1490 // Optimize common patterns for __sync_add_and_fetch and 1491 // __sync_sub_and_fetch where the result is not used. This allows us 1492 // to use "lock" version of add, sub, inc, dec instructions. 1493 // FIXME: Do not use special instructions but instead add the "lock" 1494 // prefix to the target node somehow. The extra information will then be 1495 // transferred to machine instruction and it denotes the prefix. 1496 SDValue Chain = Node->getOperand(0); 1497 SDValue Ptr = Node->getOperand(1); 1498 SDValue Val = Node->getOperand(2); 1499 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1500 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1501 return 0; 1502 1503 bool isInc = false, isDec = false, isSub = false, isCN = false; 1504 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val); 1505 if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) { 1506 isCN = true; 1507 int64_t CNVal = CN->getSExtValue(); 1508 if (CNVal == 1) 1509 isInc = true; 1510 else if (CNVal == -1) 1511 isDec = true; 1512 else if (CNVal >= 0) 1513 Val = CurDAG->getTargetConstant(CNVal, NVT); 1514 else { 1515 isSub = true; 1516 Val = CurDAG->getTargetConstant(-CNVal, NVT); 1517 } 1518 } else if (Val.hasOneUse() && 1519 Val.getOpcode() == ISD::SUB && 1520 X86::isZeroNode(Val.getOperand(0))) { 1521 isSub = true; 1522 Val = Val.getOperand(1); 1523 } 1524 1525 DebugLoc dl = Node->getDebugLoc(); 1526 unsigned Opc = 0; 1527 switch (NVT.getSimpleVT().SimpleTy) { 1528 default: return 0; 1529 case MVT::i8: 1530 if (isInc) 1531 Opc = X86::LOCK_INC8m; 1532 else if (isDec) 1533 Opc = X86::LOCK_DEC8m; 1534 else if (isSub) { 1535 if (isCN) 1536 Opc = X86::LOCK_SUB8mi; 1537 else 1538 Opc = X86::LOCK_SUB8mr; 1539 } else { 1540 if (isCN) 1541 Opc = X86::LOCK_ADD8mi; 1542 else 1543 Opc = X86::LOCK_ADD8mr; 1544 } 1545 break; 1546 case MVT::i16: 1547 if (isInc) 1548 Opc = X86::LOCK_INC16m; 1549 else if (isDec) 1550 Opc = X86::LOCK_DEC16m; 1551 else if (isSub) { 1552 if (isCN) { 1553 if (immSext8(Val.getNode())) 1554 Opc = X86::LOCK_SUB16mi8; 1555 else 1556 Opc = X86::LOCK_SUB16mi; 1557 } else 1558 Opc = X86::LOCK_SUB16mr; 1559 } else { 1560 if (isCN) { 1561 if (immSext8(Val.getNode())) 1562 Opc = X86::LOCK_ADD16mi8; 1563 else 1564 Opc = X86::LOCK_ADD16mi; 1565 } else 1566 Opc = X86::LOCK_ADD16mr; 1567 } 1568 break; 1569 case MVT::i32: 1570 if (isInc) 1571 Opc = X86::LOCK_INC32m; 1572 else if (isDec) 1573 Opc = X86::LOCK_DEC32m; 1574 else if (isSub) { 1575 if (isCN) { 1576 if (immSext8(Val.getNode())) 1577 Opc = X86::LOCK_SUB32mi8; 1578 else 1579 Opc = X86::LOCK_SUB32mi; 1580 } else 1581 Opc = X86::LOCK_SUB32mr; 1582 } else { 1583 if (isCN) { 1584 if (immSext8(Val.getNode())) 1585 Opc = X86::LOCK_ADD32mi8; 1586 else 1587 Opc = X86::LOCK_ADD32mi; 1588 } else 1589 Opc = X86::LOCK_ADD32mr; 1590 } 1591 break; 1592 case MVT::i64: 1593 if (isInc) 1594 Opc = X86::LOCK_INC64m; 1595 else if (isDec) 1596 Opc = X86::LOCK_DEC64m; 1597 else if (isSub) { 1598 Opc = X86::LOCK_SUB64mr; 1599 if (isCN) { 1600 if (immSext8(Val.getNode())) 1601 Opc = X86::LOCK_SUB64mi8; 1602 else if (i64immSExt32(Val.getNode())) 1603 Opc = X86::LOCK_SUB64mi32; 1604 } 1605 } else { 1606 Opc = X86::LOCK_ADD64mr; 1607 if (isCN) { 1608 if (immSext8(Val.getNode())) 1609 Opc = X86::LOCK_ADD64mi8; 1610 else if (i64immSExt32(Val.getNode())) 1611 Opc = X86::LOCK_ADD64mi32; 1612 } 1613 } 1614 break; 1615 } 1616 1617 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1618 dl, NVT), 0); 1619 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1620 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1621 if (isInc || isDec) { 1622 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1623 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0); 1624 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1625 SDValue RetVals[] = { Undef, Ret }; 1626 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1627 } else { 1628 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1629 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); 1630 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1631 SDValue RetVals[] = { Undef, Ret }; 1632 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1633 } 1634} 1635 1636enum AtomicOpc { 1637 OR, 1638 AND, 1639 XOR, 1640 AtomicOpcEnd 1641}; 1642 1643enum AtomicSz { 1644 ConstantI8, 1645 I8, 1646 SextConstantI16, 1647 ConstantI16, 1648 I16, 1649 SextConstantI32, 1650 ConstantI32, 1651 I32, 1652 SextConstantI64, 1653 ConstantI64, 1654 I64, 1655 AtomicSzEnd 1656}; 1657 1658static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1659 { 1660 X86::LOCK_OR8mi, 1661 X86::LOCK_OR8mr, 1662 X86::LOCK_OR16mi8, 1663 X86::LOCK_OR16mi, 1664 X86::LOCK_OR16mr, 1665 X86::LOCK_OR32mi8, 1666 X86::LOCK_OR32mi, 1667 X86::LOCK_OR32mr, 1668 X86::LOCK_OR64mi8, 1669 X86::LOCK_OR64mi32, 1670 X86::LOCK_OR64mr 1671 }, 1672 { 1673 X86::LOCK_AND8mi, 1674 X86::LOCK_AND8mr, 1675 X86::LOCK_AND16mi8, 1676 X86::LOCK_AND16mi, 1677 X86::LOCK_AND16mr, 1678 X86::LOCK_AND32mi8, 1679 X86::LOCK_AND32mi, 1680 X86::LOCK_AND32mr, 1681 X86::LOCK_AND64mi8, 1682 X86::LOCK_AND64mi32, 1683 X86::LOCK_AND64mr 1684 }, 1685 { 1686 X86::LOCK_XOR8mi, 1687 X86::LOCK_XOR8mr, 1688 X86::LOCK_XOR16mi8, 1689 X86::LOCK_XOR16mi, 1690 X86::LOCK_XOR16mr, 1691 X86::LOCK_XOR32mi8, 1692 X86::LOCK_XOR32mi, 1693 X86::LOCK_XOR32mr, 1694 X86::LOCK_XOR64mi8, 1695 X86::LOCK_XOR64mi32, 1696 X86::LOCK_XOR64mr 1697 } 1698}; 1699 1700SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { 1701 if (Node->hasAnyUseOfValue(0)) 1702 return 0; 1703 1704 // Optimize common patterns for __sync_or_and_fetch and similar arith 1705 // operations where the result is not used. This allows us to use the "lock" 1706 // version of the arithmetic instruction. 1707 // FIXME: Same as for 'add' and 'sub', try to merge those down here. 1708 SDValue Chain = Node->getOperand(0); 1709 SDValue Ptr = Node->getOperand(1); 1710 SDValue Val = Node->getOperand(2); 1711 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1712 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1713 return 0; 1714 1715 // Which index into the table. 1716 enum AtomicOpc Op; 1717 switch (Node->getOpcode()) { 1718 case ISD::ATOMIC_LOAD_OR: 1719 Op = OR; 1720 break; 1721 case ISD::ATOMIC_LOAD_AND: 1722 Op = AND; 1723 break; 1724 case ISD::ATOMIC_LOAD_XOR: 1725 Op = XOR; 1726 break; 1727 default: 1728 return 0; 1729 } 1730 1731 bool isCN = false; 1732 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val); 1733 if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) { 1734 isCN = true; 1735 Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT); 1736 } 1737 1738 unsigned Opc = 0; 1739 switch (NVT.getSimpleVT().SimpleTy) { 1740 default: return 0; 1741 case MVT::i8: 1742 if (isCN) 1743 Opc = AtomicOpcTbl[Op][ConstantI8]; 1744 else 1745 Opc = AtomicOpcTbl[Op][I8]; 1746 break; 1747 case MVT::i16: 1748 if (isCN) { 1749 if (immSext8(Val.getNode())) 1750 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1751 else 1752 Opc = AtomicOpcTbl[Op][ConstantI16]; 1753 } else 1754 Opc = AtomicOpcTbl[Op][I16]; 1755 break; 1756 case MVT::i32: 1757 if (isCN) { 1758 if (immSext8(Val.getNode())) 1759 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1760 else 1761 Opc = AtomicOpcTbl[Op][ConstantI32]; 1762 } else 1763 Opc = AtomicOpcTbl[Op][I32]; 1764 break; 1765 case MVT::i64: 1766 Opc = AtomicOpcTbl[Op][I64]; 1767 if (isCN) { 1768 if (immSext8(Val.getNode())) 1769 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1770 else if (i64immSExt32(Val.getNode())) 1771 Opc = AtomicOpcTbl[Op][ConstantI64]; 1772 } 1773 break; 1774 } 1775 1776 assert(Opc != 0 && "Invalid arith lock transform!"); 1777 1778 DebugLoc dl = Node->getDebugLoc(); 1779 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1780 dl, NVT), 0); 1781 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1782 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1783 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1784 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); 1785 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1786 SDValue RetVals[] = { Undef, Ret }; 1787 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1788} 1789 1790/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1791/// any uses which require the SF or OF bits to be accurate. 1792static bool HasNoSignedComparisonUses(SDNode *N) { 1793 // Examine each user of the node. 1794 for (SDNode::use_iterator UI = N->use_begin(), 1795 UE = N->use_end(); UI != UE; ++UI) { 1796 // Only examine CopyToReg uses. 1797 if (UI->getOpcode() != ISD::CopyToReg) 1798 return false; 1799 // Only examine CopyToReg uses that copy to EFLAGS. 1800 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1801 X86::EFLAGS) 1802 return false; 1803 // Examine each user of the CopyToReg use. 1804 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1805 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1806 // Only examine the Flag result. 1807 if (FlagUI.getUse().getResNo() != 1) continue; 1808 // Anything unusual: assume conservatively. 1809 if (!FlagUI->isMachineOpcode()) return false; 1810 // Examine the opcode of the user. 1811 switch (FlagUI->getMachineOpcode()) { 1812 // These comparisons don't treat the most significant bit specially. 1813 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1814 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1815 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1816 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1817 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1818 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1819 case X86::CMOVA16rr: case X86::CMOVA16rm: 1820 case X86::CMOVA32rr: case X86::CMOVA32rm: 1821 case X86::CMOVA64rr: case X86::CMOVA64rm: 1822 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1823 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1824 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1825 case X86::CMOVB16rr: case X86::CMOVB16rm: 1826 case X86::CMOVB32rr: case X86::CMOVB32rm: 1827 case X86::CMOVB64rr: case X86::CMOVB64rm: 1828 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1829 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1830 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1831 case X86::CMOVE16rr: case X86::CMOVE16rm: 1832 case X86::CMOVE32rr: case X86::CMOVE32rm: 1833 case X86::CMOVE64rr: case X86::CMOVE64rm: 1834 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1835 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1836 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1837 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1838 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1839 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1840 case X86::CMOVP16rr: case X86::CMOVP16rm: 1841 case X86::CMOVP32rr: case X86::CMOVP32rm: 1842 case X86::CMOVP64rr: case X86::CMOVP64rm: 1843 continue; 1844 // Anything else: assume conservatively. 1845 default: return false; 1846 } 1847 } 1848 } 1849 return true; 1850} 1851 1852/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode 1853/// is suitable for doing the {load; increment or decrement; store} to modify 1854/// transformation. 1855static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, 1856 SDValue StoredVal, SelectionDAG *CurDAG, 1857 LoadSDNode* &LoadNode, SDValue &InputChain) { 1858 1859 // is the value stored the result of a DEC or INC? 1860 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; 1861 1862 // is the stored value result 0 of the load? 1863 if (StoredVal.getResNo() != 0) return false; 1864 1865 // are there other uses of the loaded value than the inc or dec? 1866 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 1867 1868 // is the store non-extending and non-indexed? 1869 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1870 return false; 1871 1872 SDValue Load = StoredVal->getOperand(0); 1873 // Is the stored value a non-extending and non-indexed load? 1874 if (!ISD::isNormalLoad(Load.getNode())) return false; 1875 1876 // Return LoadNode by reference. 1877 LoadNode = cast<LoadSDNode>(Load); 1878 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8) 1879 EVT LdVT = LoadNode->getMemoryVT(); 1880 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 && 1881 LdVT != MVT::i8) 1882 return false; 1883 1884 // Is store the only read of the loaded value? 1885 if (!Load.hasOneUse()) 1886 return false; 1887 1888 // Is the address of the store the same as the load? 1889 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1890 LoadNode->getOffset() != StoreNode->getOffset()) 1891 return false; 1892 1893 // Check if the chain is produced by the load or is a TokenFactor with 1894 // the load output chain as an operand. Return InputChain by reference. 1895 SDValue Chain = StoreNode->getChain(); 1896 1897 bool ChainCheck = false; 1898 if (Chain == Load.getValue(1)) { 1899 ChainCheck = true; 1900 InputChain = LoadNode->getChain(); 1901 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1902 SmallVector<SDValue, 4> ChainOps; 1903 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1904 SDValue Op = Chain.getOperand(i); 1905 if (Op == Load.getValue(1)) { 1906 ChainCheck = true; 1907 continue; 1908 } 1909 1910 // Make sure using Op as part of the chain would not cause a cycle here. 1911 // In theory, we could check whether the chain node is a predecessor of 1912 // the load. But that can be very expensive. Instead visit the uses and 1913 // make sure they all have smaller node id than the load. 1914 int LoadId = LoadNode->getNodeId(); 1915 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1916 UE = UI->use_end(); UI != UE; ++UI) { 1917 if (UI.getUse().getResNo() != 0) 1918 continue; 1919 if (UI->getNodeId() > LoadId) 1920 return false; 1921 } 1922 1923 ChainOps.push_back(Op); 1924 } 1925 1926 if (ChainCheck) 1927 // Make a new TokenFactor with all the other input chains except 1928 // for the load. 1929 InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(), 1930 MVT::Other, &ChainOps[0], ChainOps.size()); 1931 } 1932 if (!ChainCheck) 1933 return false; 1934 1935 return true; 1936} 1937 1938/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory 1939/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC. 1940static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { 1941 if (Opc == X86ISD::DEC) { 1942 if (LdVT == MVT::i64) return X86::DEC64m; 1943 if (LdVT == MVT::i32) return X86::DEC32m; 1944 if (LdVT == MVT::i16) return X86::DEC16m; 1945 if (LdVT == MVT::i8) return X86::DEC8m; 1946 } else { 1947 assert(Opc == X86ISD::INC && "unrecognized opcode"); 1948 if (LdVT == MVT::i64) return X86::INC64m; 1949 if (LdVT == MVT::i32) return X86::INC32m; 1950 if (LdVT == MVT::i16) return X86::INC16m; 1951 if (LdVT == MVT::i8) return X86::INC8m; 1952 } 1953 llvm_unreachable("unrecognized size for LdVT"); 1954} 1955 1956/// SelectGather - Customized ISel for GATHER operations. 1957/// 1958SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { 1959 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale 1960 SDValue Chain = Node->getOperand(0); 1961 SDValue VSrc = Node->getOperand(2); 1962 SDValue Base = Node->getOperand(3); 1963 SDValue VIdx = Node->getOperand(4); 1964 SDValue VMask = Node->getOperand(5); 1965 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6)); 1966 if (!Scale) 1967 return 0; 1968 1969 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), 1970 MVT::Other); 1971 1972 // Memory Operands: Base, Scale, Index, Disp, Segment 1973 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32); 1974 SDValue Segment = CurDAG->getRegister(0, MVT::i32); 1975 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, 1976 Disp, Segment, VMask, Chain}; 1977 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1978 VTs, Ops, array_lengthof(Ops)); 1979 // Node has 2 outputs: VDst and MVT::Other. 1980 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. 1981 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other 1982 // of ResNode. 1983 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); 1984 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2)); 1985 return ResNode; 1986} 1987 1988SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 1989 EVT NVT = Node->getValueType(0); 1990 unsigned Opc, MOpc; 1991 unsigned Opcode = Node->getOpcode(); 1992 DebugLoc dl = Node->getDebugLoc(); 1993 1994 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 1995 1996 if (Node->isMachineOpcode()) { 1997 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 1998 return NULL; // Already selected. 1999 } 2000 2001 switch (Opcode) { 2002 default: break; 2003 case ISD::INTRINSIC_W_CHAIN: { 2004 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 2005 switch (IntNo) { 2006 default: break; 2007 case Intrinsic::x86_avx2_gather_d_pd: 2008 case Intrinsic::x86_avx2_gather_d_pd_256: 2009 case Intrinsic::x86_avx2_gather_q_pd: 2010 case Intrinsic::x86_avx2_gather_q_pd_256: 2011 case Intrinsic::x86_avx2_gather_d_ps: 2012 case Intrinsic::x86_avx2_gather_d_ps_256: 2013 case Intrinsic::x86_avx2_gather_q_ps: 2014 case Intrinsic::x86_avx2_gather_q_ps_256: 2015 case Intrinsic::x86_avx2_gather_d_q: 2016 case Intrinsic::x86_avx2_gather_d_q_256: 2017 case Intrinsic::x86_avx2_gather_q_q: 2018 case Intrinsic::x86_avx2_gather_q_q_256: 2019 case Intrinsic::x86_avx2_gather_d_d: 2020 case Intrinsic::x86_avx2_gather_d_d_256: 2021 case Intrinsic::x86_avx2_gather_q_d: 2022 case Intrinsic::x86_avx2_gather_q_d_256: { 2023 unsigned Opc; 2024 switch (IntNo) { 2025 default: llvm_unreachable("Impossible intrinsic"); 2026 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break; 2027 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break; 2028 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break; 2029 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break; 2030 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break; 2031 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break; 2032 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break; 2033 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break; 2034 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break; 2035 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break; 2036 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break; 2037 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break; 2038 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break; 2039 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break; 2040 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break; 2041 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break; 2042 } 2043 SDNode *RetVal = SelectGather(Node, Opc); 2044 if (RetVal) 2045 // We already called ReplaceUses inside SelectGather. 2046 return NULL; 2047 break; 2048 } 2049 } 2050 break; 2051 } 2052 case X86ISD::GlobalBaseReg: 2053 return getGlobalBaseReg(); 2054 2055 2056 case X86ISD::ATOMOR64_DAG: 2057 case X86ISD::ATOMXOR64_DAG: 2058 case X86ISD::ATOMADD64_DAG: 2059 case X86ISD::ATOMSUB64_DAG: 2060 case X86ISD::ATOMNAND64_DAG: 2061 case X86ISD::ATOMAND64_DAG: 2062 case X86ISD::ATOMSWAP64_DAG: { 2063 unsigned Opc; 2064 switch (Opcode) { 2065 default: llvm_unreachable("Impossible opcode"); 2066 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break; 2067 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break; 2068 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break; 2069 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break; 2070 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break; 2071 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break; 2072 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break; 2073 } 2074 SDNode *RetVal = SelectAtomic64(Node, Opc); 2075 if (RetVal) 2076 return RetVal; 2077 break; 2078 } 2079 2080 case ISD::ATOMIC_LOAD_ADD: { 2081 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT); 2082 if (RetVal) 2083 return RetVal; 2084 break; 2085 } 2086 case ISD::ATOMIC_LOAD_XOR: 2087 case ISD::ATOMIC_LOAD_AND: 2088 case ISD::ATOMIC_LOAD_OR: { 2089 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 2090 if (RetVal) 2091 return RetVal; 2092 break; 2093 } 2094 case ISD::AND: 2095 case ISD::OR: 2096 case ISD::XOR: { 2097 // For operations of the form (x << C1) op C2, check if we can use a smaller 2098 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2099 SDValue N0 = Node->getOperand(0); 2100 SDValue N1 = Node->getOperand(1); 2101 2102 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2103 break; 2104 2105 // i8 is unshrinkable, i16 should be promoted to i32. 2106 if (NVT != MVT::i32 && NVT != MVT::i64) 2107 break; 2108 2109 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2110 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2111 if (!Cst || !ShlCst) 2112 break; 2113 2114 int64_t Val = Cst->getSExtValue(); 2115 uint64_t ShlVal = ShlCst->getZExtValue(); 2116 2117 // Make sure that we don't change the operation by removing bits. 2118 // This only matters for OR and XOR, AND is unaffected. 2119 if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val) 2120 break; 2121 2122 unsigned ShlOp, Op; 2123 EVT CstVT = NVT; 2124 2125 // Check the minimum bitwidth for the new constant. 2126 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2127 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2128 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2129 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2130 CstVT = MVT::i8; 2131 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2132 CstVT = MVT::i32; 2133 2134 // Bail if there is no smaller encoding. 2135 if (NVT == CstVT) 2136 break; 2137 2138 switch (NVT.getSimpleVT().SimpleTy) { 2139 default: llvm_unreachable("Unsupported VT!"); 2140 case MVT::i32: 2141 assert(CstVT == MVT::i8); 2142 ShlOp = X86::SHL32ri; 2143 2144 switch (Opcode) { 2145 default: llvm_unreachable("Impossible opcode"); 2146 case ISD::AND: Op = X86::AND32ri8; break; 2147 case ISD::OR: Op = X86::OR32ri8; break; 2148 case ISD::XOR: Op = X86::XOR32ri8; break; 2149 } 2150 break; 2151 case MVT::i64: 2152 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2153 ShlOp = X86::SHL64ri; 2154 2155 switch (Opcode) { 2156 default: llvm_unreachable("Impossible opcode"); 2157 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2158 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2159 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2160 } 2161 break; 2162 } 2163 2164 // Emit the smaller op and the shift. 2165 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2166 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2167 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2168 getI8Imm(ShlVal)); 2169 } 2170 case X86ISD::UMUL: { 2171 SDValue N0 = Node->getOperand(0); 2172 SDValue N1 = Node->getOperand(1); 2173 2174 unsigned LoReg; 2175 switch (NVT.getSimpleVT().SimpleTy) { 2176 default: llvm_unreachable("Unsupported VT!"); 2177 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2178 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2179 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2180 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2181 } 2182 2183 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2184 N0, SDValue()).getValue(1); 2185 2186 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2187 SDValue Ops[] = {N1, InFlag}; 2188 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2); 2189 2190 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2191 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2192 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2193 return NULL; 2194 } 2195 2196 case ISD::SMUL_LOHI: 2197 case ISD::UMUL_LOHI: { 2198 SDValue N0 = Node->getOperand(0); 2199 SDValue N1 = Node->getOperand(1); 2200 2201 bool isSigned = Opcode == ISD::SMUL_LOHI; 2202 if (!isSigned) { 2203 switch (NVT.getSimpleVT().SimpleTy) { 2204 default: llvm_unreachable("Unsupported VT!"); 2205 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2206 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2207 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break; 2208 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; 2209 } 2210 } else { 2211 switch (NVT.getSimpleVT().SimpleTy) { 2212 default: llvm_unreachable("Unsupported VT!"); 2213 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2214 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2215 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2216 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2217 } 2218 } 2219 2220 unsigned LoReg, HiReg; 2221 switch (NVT.getSimpleVT().SimpleTy) { 2222 default: llvm_unreachable("Unsupported VT!"); 2223 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; 2224 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; 2225 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break; 2226 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; 2227 } 2228 2229 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2230 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2231 // Multiply is commmutative. 2232 if (!foldedLoad) { 2233 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2234 if (foldedLoad) 2235 std::swap(N0, N1); 2236 } 2237 2238 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2239 N0, SDValue()).getValue(1); 2240 2241 if (foldedLoad) { 2242 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2243 InFlag }; 2244 SDNode *CNode = 2245 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, 2246 array_lengthof(Ops)); 2247 InFlag = SDValue(CNode, 1); 2248 2249 // Update the chain. 2250 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2251 } else { 2252 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag); 2253 InFlag = SDValue(CNode, 0); 2254 } 2255 2256 // Prevent use of AH in a REX instruction by referencing AX instead. 2257 if (HiReg == X86::AH && Subtarget->is64Bit() && 2258 !SDValue(Node, 1).use_empty()) { 2259 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2260 X86::AX, MVT::i16, InFlag); 2261 InFlag = Result.getValue(2); 2262 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2263 // registers. 2264 if (!SDValue(Node, 0).use_empty()) 2265 ReplaceUses(SDValue(Node, 1), 2266 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2267 2268 // Shift AX down 8 bits. 2269 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2270 Result, 2271 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2272 // Then truncate it down to i8. 2273 ReplaceUses(SDValue(Node, 1), 2274 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2275 } 2276 // Copy the low half of the result, if it is needed. 2277 if (!SDValue(Node, 0).use_empty()) { 2278 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2279 LoReg, NVT, InFlag); 2280 InFlag = Result.getValue(2); 2281 ReplaceUses(SDValue(Node, 0), Result); 2282 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2283 } 2284 // Copy the high half of the result, if it is needed. 2285 if (!SDValue(Node, 1).use_empty()) { 2286 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2287 HiReg, NVT, InFlag); 2288 InFlag = Result.getValue(2); 2289 ReplaceUses(SDValue(Node, 1), Result); 2290 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2291 } 2292 2293 return NULL; 2294 } 2295 2296 case ISD::SDIVREM: 2297 case ISD::UDIVREM: { 2298 SDValue N0 = Node->getOperand(0); 2299 SDValue N1 = Node->getOperand(1); 2300 2301 bool isSigned = Opcode == ISD::SDIVREM; 2302 if (!isSigned) { 2303 switch (NVT.getSimpleVT().SimpleTy) { 2304 default: llvm_unreachable("Unsupported VT!"); 2305 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2306 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2307 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2308 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2309 } 2310 } else { 2311 switch (NVT.getSimpleVT().SimpleTy) { 2312 default: llvm_unreachable("Unsupported VT!"); 2313 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2314 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2315 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2316 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2317 } 2318 } 2319 2320 unsigned LoReg, HiReg, ClrReg; 2321 unsigned ClrOpcode, SExtOpcode; 2322 switch (NVT.getSimpleVT().SimpleTy) { 2323 default: llvm_unreachable("Unsupported VT!"); 2324 case MVT::i8: 2325 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2326 ClrOpcode = 0; 2327 SExtOpcode = X86::CBW; 2328 break; 2329 case MVT::i16: 2330 LoReg = X86::AX; HiReg = X86::DX; 2331 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX; 2332 SExtOpcode = X86::CWD; 2333 break; 2334 case MVT::i32: 2335 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2336 ClrOpcode = X86::MOV32r0; 2337 SExtOpcode = X86::CDQ; 2338 break; 2339 case MVT::i64: 2340 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2341 ClrOpcode = X86::MOV64r0; 2342 SExtOpcode = X86::CQO; 2343 break; 2344 } 2345 2346 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2347 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2348 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2349 2350 SDValue InFlag; 2351 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2352 // Special case for div8, just use a move with zero extension to AX to 2353 // clear the upper 8 bits (AH). 2354 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2355 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2356 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2357 Move = 2358 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2359 MVT::Other, Ops, 2360 array_lengthof(Ops)), 0); 2361 Chain = Move.getValue(1); 2362 ReplaceUses(N0.getValue(1), Chain); 2363 } else { 2364 Move = 2365 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2366 Chain = CurDAG->getEntryNode(); 2367 } 2368 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2369 InFlag = Chain.getValue(1); 2370 } else { 2371 InFlag = 2372 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2373 LoReg, N0, SDValue()).getValue(1); 2374 if (isSigned && !signBitIsZero) { 2375 // Sign extend the low part into the high part. 2376 InFlag = 2377 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2378 } else { 2379 // Zero out the high part, effectively zero extending the input. 2380 SDValue ClrNode = 2381 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); 2382 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2383 ClrNode, InFlag).getValue(1); 2384 } 2385 } 2386 2387 if (foldedLoad) { 2388 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2389 InFlag }; 2390 SDNode *CNode = 2391 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, 2392 array_lengthof(Ops)); 2393 InFlag = SDValue(CNode, 1); 2394 // Update the chain. 2395 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2396 } else { 2397 InFlag = 2398 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2399 } 2400 2401 // Prevent use of AH in a REX instruction by referencing AX instead. 2402 // Shift it down 8 bits. 2403 if (HiReg == X86::AH && Subtarget->is64Bit() && 2404 !SDValue(Node, 1).use_empty()) { 2405 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2406 X86::AX, MVT::i16, InFlag); 2407 InFlag = Result.getValue(2); 2408 2409 // If we also need AL (the quotient), get it by extracting a subreg from 2410 // Result. The fast register allocator does not like multiple CopyFromReg 2411 // nodes using aliasing registers. 2412 if (!SDValue(Node, 0).use_empty()) 2413 ReplaceUses(SDValue(Node, 0), 2414 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2415 2416 // Shift AX right by 8 bits instead of using AH. 2417 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2418 Result, 2419 CurDAG->getTargetConstant(8, MVT::i8)), 2420 0); 2421 ReplaceUses(SDValue(Node, 1), 2422 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2423 } 2424 // Copy the division (low) result, if it is needed. 2425 if (!SDValue(Node, 0).use_empty()) { 2426 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2427 LoReg, NVT, InFlag); 2428 InFlag = Result.getValue(2); 2429 ReplaceUses(SDValue(Node, 0), Result); 2430 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2431 } 2432 // Copy the remainder (high) result, if it is needed. 2433 if (!SDValue(Node, 1).use_empty()) { 2434 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2435 HiReg, NVT, InFlag); 2436 InFlag = Result.getValue(2); 2437 ReplaceUses(SDValue(Node, 1), Result); 2438 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2439 } 2440 return NULL; 2441 } 2442 2443 case X86ISD::CMP: 2444 case X86ISD::SUB: { 2445 // Sometimes a SUB is used to perform comparison. 2446 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0)) 2447 // This node is not a CMP. 2448 break; 2449 SDValue N0 = Node->getOperand(0); 2450 SDValue N1 = Node->getOperand(1); 2451 2452 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2453 // use a smaller encoding. 2454 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2455 HasNoSignedComparisonUses(Node)) 2456 // Look past the truncate if CMP is the only use of it. 2457 N0 = N0.getOperand(0); 2458 if ((N0.getNode()->getOpcode() == ISD::AND || 2459 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2460 N0.getNode()->hasOneUse() && 2461 N0.getValueType() != MVT::i8 && 2462 X86::isZeroNode(N1)) { 2463 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2464 if (!C) break; 2465 2466 // For example, convert "testl %eax, $8" to "testb %al, $8" 2467 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2468 (!(C->getZExtValue() & 0x80) || 2469 HasNoSignedComparisonUses(Node))) { 2470 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2471 SDValue Reg = N0.getNode()->getOperand(0); 2472 2473 // On x86-32, only the ABCD registers have 8-bit subregisters. 2474 if (!Subtarget->is64Bit()) { 2475 const TargetRegisterClass *TRC; 2476 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2477 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2478 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2479 default: llvm_unreachable("Unsupported TEST operand type!"); 2480 } 2481 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2482 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2483 Reg.getValueType(), Reg, RC), 0); 2484 } 2485 2486 // Extract the l-register. 2487 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2488 MVT::i8, Reg); 2489 2490 // Emit a testb. 2491 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm); 2492 } 2493 2494 // For example, "testl %eax, $2048" to "testb %ah, $8". 2495 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2496 (!(C->getZExtValue() & 0x8000) || 2497 HasNoSignedComparisonUses(Node))) { 2498 // Shift the immediate right by 8 bits. 2499 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2500 MVT::i8); 2501 SDValue Reg = N0.getNode()->getOperand(0); 2502 2503 // Put the value in an ABCD register. 2504 const TargetRegisterClass *TRC; 2505 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2506 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2507 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2508 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2509 default: llvm_unreachable("Unsupported TEST operand type!"); 2510 } 2511 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2512 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2513 Reg.getValueType(), Reg, RC), 0); 2514 2515 // Extract the h-register. 2516 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2517 MVT::i8, Reg); 2518 2519 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2520 // target GR8_NOREX registers, so make sure the register class is 2521 // forced. 2522 return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32, 2523 Subreg, ShiftedImm); 2524 } 2525 2526 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2527 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2528 N0.getValueType() != MVT::i16 && 2529 (!(C->getZExtValue() & 0x8000) || 2530 HasNoSignedComparisonUses(Node))) { 2531 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2532 SDValue Reg = N0.getNode()->getOperand(0); 2533 2534 // Extract the 16-bit subregister. 2535 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2536 MVT::i16, Reg); 2537 2538 // Emit a testw. 2539 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm); 2540 } 2541 2542 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2543 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2544 N0.getValueType() == MVT::i64 && 2545 (!(C->getZExtValue() & 0x80000000) || 2546 HasNoSignedComparisonUses(Node))) { 2547 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2548 SDValue Reg = N0.getNode()->getOperand(0); 2549 2550 // Extract the 32-bit subregister. 2551 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2552 MVT::i32, Reg); 2553 2554 // Emit a testl. 2555 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm); 2556 } 2557 } 2558 break; 2559 } 2560 case ISD::STORE: { 2561 // Change a chain of {load; incr or dec; store} of the same value into 2562 // a simple increment or decrement through memory of that value, if the 2563 // uses of the modified value and its address are suitable. 2564 // The DEC64m tablegen pattern is currently not able to match the case where 2565 // the EFLAGS on the original DEC are used. (This also applies to 2566 // {INC,DEC}X{64,32,16,8}.) 2567 // We'll need to improve tablegen to allow flags to be transferred from a 2568 // node in the pattern to the result node. probably with a new keyword 2569 // for example, we have this 2570 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2571 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2572 // (implicit EFLAGS)]>; 2573 // but maybe need something like this 2574 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2575 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2576 // (transferrable EFLAGS)]>; 2577 2578 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2579 SDValue StoredVal = StoreNode->getOperand(1); 2580 unsigned Opc = StoredVal->getOpcode(); 2581 2582 LoadSDNode *LoadNode = 0; 2583 SDValue InputChain; 2584 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, 2585 LoadNode, InputChain)) 2586 break; 2587 2588 SDValue Base, Scale, Index, Disp, Segment; 2589 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2590 Base, Scale, Index, Disp, Segment)) 2591 break; 2592 2593 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2594 MemOp[0] = StoreNode->getMemOperand(); 2595 MemOp[1] = LoadNode->getMemOperand(); 2596 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2597 EVT LdVT = LoadNode->getMemoryVT(); 2598 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); 2599 MachineSDNode *Result = CurDAG->getMachineNode(newOpc, 2600 Node->getDebugLoc(), 2601 MVT::i32, MVT::Other, Ops, 2602 array_lengthof(Ops)); 2603 Result->setMemRefs(MemOp, MemOp + 2); 2604 2605 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2606 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2607 2608 return Result; 2609 } 2610 2611 // FIXME: Custom handling because TableGen doesn't support multiple implicit 2612 // defs in an instruction pattern 2613 case X86ISD::PCMPESTRI: { 2614 SDValue N0 = Node->getOperand(0); 2615 SDValue N1 = Node->getOperand(1); 2616 SDValue N2 = Node->getOperand(2); 2617 SDValue N3 = Node->getOperand(3); 2618 SDValue N4 = Node->getOperand(4); 2619 2620 // Make sure last argument is a constant 2621 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N4); 2622 if (!Cst) 2623 break; 2624 2625 uint64_t Imm = Cst->getZExtValue(); 2626 2627 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2628 X86::EAX, N1, SDValue()).getValue(1); 2629 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX, 2630 N3, InFlag).getValue(1); 2631 2632 SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag }; 2633 unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : 2634 X86::PCMPESTRIrr; 2635 InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops, 2636 array_lengthof(Ops)), 0); 2637 2638 if (!SDValue(Node, 0).use_empty()) { 2639 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2640 X86::ECX, NVT, InFlag); 2641 InFlag = Result.getValue(2); 2642 ReplaceUses(SDValue(Node, 0), Result); 2643 } 2644 if (!SDValue(Node, 1).use_empty()) { 2645 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2646 X86::EFLAGS, NVT, InFlag); 2647 InFlag = Result.getValue(2); 2648 ReplaceUses(SDValue(Node, 1), Result); 2649 } 2650 2651 return NULL; 2652 } 2653 2654 // FIXME: Custom handling because TableGen doesn't support multiple implicit 2655 // defs in an instruction pattern 2656 case X86ISD::PCMPISTRI: { 2657 SDValue N0 = Node->getOperand(0); 2658 SDValue N1 = Node->getOperand(1); 2659 SDValue N2 = Node->getOperand(2); 2660 2661 // Make sure last argument is a constant 2662 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N2); 2663 if (!Cst) 2664 break; 2665 2666 uint64_t Imm = Cst->getZExtValue(); 2667 2668 SDValue Ops[] = { N0, N1, getI8Imm(Imm) }; 2669 unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : 2670 X86::PCMPISTRIrr; 2671 SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops, 2672 array_lengthof(Ops)), 0); 2673 2674 if (!SDValue(Node, 0).use_empty()) { 2675 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2676 X86::ECX, NVT, InFlag); 2677 InFlag = Result.getValue(2); 2678 ReplaceUses(SDValue(Node, 0), Result); 2679 } 2680 if (!SDValue(Node, 1).use_empty()) { 2681 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2682 X86::EFLAGS, NVT, InFlag); 2683 InFlag = Result.getValue(2); 2684 ReplaceUses(SDValue(Node, 1), Result); 2685 } 2686 2687 return NULL; 2688 } 2689 } 2690 2691 SDNode *ResNode = SelectCode(Node); 2692 2693 DEBUG(dbgs() << "=> "; 2694 if (ResNode == NULL || ResNode == Node) 2695 Node->dump(CurDAG); 2696 else 2697 ResNode->dump(CurDAG); 2698 dbgs() << '\n'); 2699 2700 return ResNode; 2701} 2702 2703bool X86DAGToDAGISel:: 2704SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2705 std::vector<SDValue> &OutOps) { 2706 SDValue Op0, Op1, Op2, Op3, Op4; 2707 switch (ConstraintCode) { 2708 case 'o': // offsetable ?? 2709 case 'v': // not offsetable ?? 2710 default: return true; 2711 case 'm': // memory 2712 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) 2713 return true; 2714 break; 2715 } 2716 2717 OutOps.push_back(Op0); 2718 OutOps.push_back(Op1); 2719 OutOps.push_back(Op2); 2720 OutOps.push_back(Op3); 2721 OutOps.push_back(Op4); 2722 return false; 2723} 2724 2725/// createX86ISelDag - This pass converts a legalized DAG into a 2726/// X86-specific DAG, ready for instruction scheduling. 2727/// 2728FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2729 CodeGenOpt::Level OptLevel) { 2730 return new X86DAGToDAGISel(TM, OptLevel); 2731} 2732