X86ISelDAGToDAG.cpp revision 2b87e06d265e83d61873075e8f8e9c51430ff332
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/Instructions.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/Type.h" 25#include "llvm/CodeGen/FunctionLoweringInfo.h" 26#include "llvm/CodeGen/MachineConstantPool.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineFrameInfo.h" 29#include "llvm/CodeGen/MachineInstrBuilder.h" 30#include "llvm/CodeGen/MachineRegisterInfo.h" 31#include "llvm/CodeGen/SelectionDAGISel.h" 32#include "llvm/Target/TargetMachine.h" 33#include "llvm/Target/TargetOptions.h" 34#include "llvm/Support/CFG.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/MathExtras.h" 38#include "llvm/Support/raw_ostream.h" 39#include "llvm/ADT/Statistic.h" 40using namespace llvm; 41 42STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 43 44//===----------------------------------------------------------------------===// 45// Pattern Matcher Implementation 46//===----------------------------------------------------------------------===// 47 48namespace { 49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 50 /// SDValue's instead of register numbers for the leaves of the matched 51 /// tree. 52 struct X86ISelAddressMode { 53 enum { 54 RegBase, 55 FrameIndexBase 56 } BaseType; 57 58 // This is really a union, discriminated by BaseType! 59 SDValue Base_Reg; 60 int Base_FrameIndex; 61 62 unsigned Scale; 63 SDValue IndexReg; 64 int32_t Disp; 65 SDValue Segment; 66 const GlobalValue *GV; 67 const Constant *CP; 68 const BlockAddress *BlockAddr; 69 const char *ES; 70 int JT; 71 unsigned Align; // CP alignment. 72 unsigned char SymbolFlags; // X86II::MO_* 73 74 X86ISelAddressMode() 75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 76 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 77 SymbolFlags(X86II::MO_NO_FLAG) { 78 } 79 80 bool hasSymbolicDisplacement() const { 81 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 82 } 83 84 bool hasBaseOrIndexReg() const { 85 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; 86 } 87 88 /// isRIPRelative - Return true if this addressing mode is already RIP 89 /// relative. 90 bool isRIPRelative() const { 91 if (BaseType != RegBase) return false; 92 if (RegisterSDNode *RegNode = 93 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 94 return RegNode->getReg() == X86::RIP; 95 return false; 96 } 97 98 void setBaseReg(SDValue Reg) { 99 BaseType = RegBase; 100 Base_Reg = Reg; 101 } 102 103#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 104 void dump() { 105 dbgs() << "X86ISelAddressMode " << this << '\n'; 106 dbgs() << "Base_Reg "; 107 if (Base_Reg.getNode() != 0) 108 Base_Reg.getNode()->dump(); 109 else 110 dbgs() << "nul"; 111 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 112 << " Scale" << Scale << '\n' 113 << "IndexReg "; 114 if (IndexReg.getNode() != 0) 115 IndexReg.getNode()->dump(); 116 else 117 dbgs() << "nul"; 118 dbgs() << " Disp " << Disp << '\n' 119 << "GV "; 120 if (GV) 121 GV->dump(); 122 else 123 dbgs() << "nul"; 124 dbgs() << " CP "; 125 if (CP) 126 CP->dump(); 127 else 128 dbgs() << "nul"; 129 dbgs() << '\n' 130 << "ES "; 131 if (ES) 132 dbgs() << ES; 133 else 134 dbgs() << "nul"; 135 dbgs() << " JT" << JT << " Align" << Align << '\n'; 136 } 137#endif 138 }; 139} 140 141namespace { 142 //===--------------------------------------------------------------------===// 143 /// ISel - X86 specific code to select X86 machine instructions for 144 /// SelectionDAG operations. 145 /// 146 class X86DAGToDAGISel : public SelectionDAGISel { 147 /// X86Lowering - This object fully describes how to lower LLVM code to an 148 /// X86-specific SelectionDAG. 149 const X86TargetLowering &X86Lowering; 150 151 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 152 /// make the right decision when generating code for different targets. 153 const X86Subtarget *Subtarget; 154 155 /// OptForSize - If true, selector should try to optimize for code size 156 /// instead of performance. 157 bool OptForSize; 158 159 public: 160 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 161 : SelectionDAGISel(tm, OptLevel), 162 X86Lowering(*tm.getTargetLowering()), 163 Subtarget(&tm.getSubtarget<X86Subtarget>()), 164 OptForSize(false) {} 165 166 virtual const char *getPassName() const { 167 return "X86 DAG->DAG Instruction Selection"; 168 } 169 170 virtual void EmitFunctionEntryCode(); 171 172 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 173 174 virtual void PreprocessISelDAG(); 175 176 inline bool immSext8(SDNode *N) const { 177 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 178 } 179 180 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 181 // sign extended field. 182 inline bool i64immSExt32(SDNode *N) const { 183 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 184 return (int64_t)v == (int32_t)v; 185 } 186 187// Include the pieces autogenerated from the target description. 188#include "X86GenDAGISel.inc" 189 190 private: 191 SDNode *Select(SDNode *N); 192 SDNode *SelectGather(SDNode *N, unsigned Opc); 193 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 194 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT); 195 196 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 197 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 198 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 199 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 200 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 201 unsigned Depth); 202 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 203 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 204 SDValue &Scale, SDValue &Index, SDValue &Disp, 205 SDValue &Segment); 206 bool SelectLEAAddr(SDValue N, SDValue &Base, 207 SDValue &Scale, SDValue &Index, SDValue &Disp, 208 SDValue &Segment); 209 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 210 SDValue &Scale, SDValue &Index, SDValue &Disp, 211 SDValue &Segment); 212 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 213 SDValue &Base, SDValue &Scale, 214 SDValue &Index, SDValue &Disp, 215 SDValue &Segment, 216 SDValue &NodeWithChain); 217 218 bool TryFoldLoad(SDNode *P, SDValue N, 219 SDValue &Base, SDValue &Scale, 220 SDValue &Index, SDValue &Disp, 221 SDValue &Segment); 222 223 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 224 /// inline asm expressions. 225 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 226 char ConstraintCode, 227 std::vector<SDValue> &OutOps); 228 229 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 230 231 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 232 SDValue &Scale, SDValue &Index, 233 SDValue &Disp, SDValue &Segment) { 234 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 235 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) : 236 AM.Base_Reg; 237 Scale = getI8Imm(AM.Scale); 238 Index = AM.IndexReg; 239 // These are 32-bit even in 64-bit mode since RIP relative offset 240 // is 32-bit. 241 if (AM.GV) 242 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(), 243 MVT::i32, AM.Disp, 244 AM.SymbolFlags); 245 else if (AM.CP) 246 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 247 AM.Align, AM.Disp, AM.SymbolFlags); 248 else if (AM.ES) { 249 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 250 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 251 } else if (AM.JT != -1) { 252 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 253 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 254 } else if (AM.BlockAddr) 255 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 256 AM.SymbolFlags); 257 else 258 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 259 260 if (AM.Segment.getNode()) 261 Segment = AM.Segment; 262 else 263 Segment = CurDAG->getRegister(0, MVT::i32); 264 } 265 266 /// getI8Imm - Return a target constant with the specified value, of type 267 /// i8. 268 inline SDValue getI8Imm(unsigned Imm) { 269 return CurDAG->getTargetConstant(Imm, MVT::i8); 270 } 271 272 /// getI32Imm - Return a target constant with the specified value, of type 273 /// i32. 274 inline SDValue getI32Imm(unsigned Imm) { 275 return CurDAG->getTargetConstant(Imm, MVT::i32); 276 } 277 278 /// getGlobalBaseReg - Return an SDNode that returns the value of 279 /// the global base register. Output instructions required to 280 /// initialize the global base register, if necessary. 281 /// 282 SDNode *getGlobalBaseReg(); 283 284 /// getTargetMachine - Return a reference to the TargetMachine, casted 285 /// to the target-specific type. 286 const X86TargetMachine &getTargetMachine() { 287 return static_cast<const X86TargetMachine &>(TM); 288 } 289 290 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 291 /// to the target-specific type. 292 const X86InstrInfo *getInstrInfo() { 293 return getTargetMachine().getInstrInfo(); 294 } 295 }; 296} 297 298 299bool 300X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 301 if (OptLevel == CodeGenOpt::None) return false; 302 303 if (!N.hasOneUse()) 304 return false; 305 306 if (N.getOpcode() != ISD::LOAD) 307 return true; 308 309 // If N is a load, do additional profitability checks. 310 if (U == Root) { 311 switch (U->getOpcode()) { 312 default: break; 313 case X86ISD::ADD: 314 case X86ISD::SUB: 315 case X86ISD::AND: 316 case X86ISD::XOR: 317 case X86ISD::OR: 318 case ISD::ADD: 319 case ISD::ADDC: 320 case ISD::ADDE: 321 case ISD::AND: 322 case ISD::OR: 323 case ISD::XOR: { 324 SDValue Op1 = U->getOperand(1); 325 326 // If the other operand is a 8-bit immediate we should fold the immediate 327 // instead. This reduces code size. 328 // e.g. 329 // movl 4(%esp), %eax 330 // addl $4, %eax 331 // vs. 332 // movl $4, %eax 333 // addl 4(%esp), %eax 334 // The former is 2 bytes shorter. In case where the increment is 1, then 335 // the saving can be 4 bytes (by using incl %eax). 336 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 337 if (Imm->getAPIntValue().isSignedIntN(8)) 338 return false; 339 340 // If the other operand is a TLS address, we should fold it instead. 341 // This produces 342 // movl %gs:0, %eax 343 // leal i@NTPOFF(%eax), %eax 344 // instead of 345 // movl $i@NTPOFF, %eax 346 // addl %gs:0, %eax 347 // if the block also has an access to a second TLS address this will save 348 // a load. 349 // FIXME: This is probably also true for non TLS addresses. 350 if (Op1.getOpcode() == X86ISD::Wrapper) { 351 SDValue Val = Op1.getOperand(0); 352 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 353 return false; 354 } 355 } 356 } 357 } 358 359 return true; 360} 361 362/// MoveBelowCallOrigChain - Replace the original chain operand of the call with 363/// load's chain operand and move load below the call's chain operand. 364static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 365 SDValue Call, SDValue OrigChain) { 366 SmallVector<SDValue, 8> Ops; 367 SDValue Chain = OrigChain.getOperand(0); 368 if (Chain.getNode() == Load.getNode()) 369 Ops.push_back(Load.getOperand(0)); 370 else { 371 assert(Chain.getOpcode() == ISD::TokenFactor && 372 "Unexpected chain operand"); 373 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 374 if (Chain.getOperand(i).getNode() == Load.getNode()) 375 Ops.push_back(Load.getOperand(0)); 376 else 377 Ops.push_back(Chain.getOperand(i)); 378 SDValue NewChain = 379 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 380 MVT::Other, &Ops[0], Ops.size()); 381 Ops.clear(); 382 Ops.push_back(NewChain); 383 } 384 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 385 Ops.push_back(OrigChain.getOperand(i)); 386 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); 387 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 388 Load.getOperand(1), Load.getOperand(2)); 389 390 bool IsGlued = Call.getOperand(0).getNode()->getGluedUser() == Call.getNode(); 391 unsigned NumOps = Call.getNode()->getNumOperands(); 392 Ops.clear(); 393 Ops.push_back(SDValue(Load.getNode(), 1)); 394 for (unsigned i = 1, e = NumOps; i != e; ++i) 395 Ops.push_back(Call.getOperand(i)); 396 if (!IsGlued) 397 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps); 398 else 399 // If call's chain was glued to the call (tailcall), and now the load 400 // is moved between them. Remove the glue to avoid a cycle (where the 401 // call is glued to its old chain and the load is using the old chain 402 // as its new chain). 403 CurDAG->MorphNodeTo(Call.getNode(), Call.getOpcode(), 404 Call.getNode()->getVTList(), &Ops[0], NumOps-1); 405} 406 407/// isCalleeLoad - Return true if call address is a load and it can be 408/// moved below CALLSEQ_START and the chains leading up to the call. 409/// Return the CALLSEQ_START by reference as a second output. 410/// In the case of a tail call, there isn't a callseq node between the call 411/// chain and the load. 412static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 413 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 414 return false; 415 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 416 if (!LD || 417 LD->isVolatile() || 418 LD->getAddressingMode() != ISD::UNINDEXED || 419 LD->getExtensionType() != ISD::NON_EXTLOAD) 420 return false; 421 422 // Now let's find the callseq_start. 423 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 424 if (!Chain.hasOneUse()) 425 return false; 426 Chain = Chain.getOperand(0); 427 } 428 429 if (!Chain.getNumOperands()) 430 return false; 431 if (Chain.getOperand(0).getNode() == Callee.getNode()) 432 return true; 433 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 434 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 435 Callee.getValue(1).hasOneUse()) 436 return true; 437 return false; 438} 439 440void X86DAGToDAGISel::PreprocessISelDAG() { 441 // OptForSize is used in pattern predicates that isel is matching. 442 OptForSize = MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr(); 443 444 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 445 E = CurDAG->allnodes_end(); I != E; ) { 446 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 447 448 if (OptLevel != CodeGenOpt::None && 449 (N->getOpcode() == X86ISD::CALL || 450 N->getOpcode() == X86ISD::TC_RETURN)) { 451 /// Also try moving call address load from outside callseq_start to just 452 /// before the call to allow it to be folded. 453 /// 454 /// [Load chain] 455 /// ^ 456 /// | 457 /// [Load] 458 /// ^ ^ 459 /// | | 460 /// / \-- 461 /// / | 462 ///[CALLSEQ_START] | 463 /// ^ | 464 /// | | 465 /// [LOAD/C2Reg] | 466 /// | | 467 /// \ / 468 /// \ / 469 /// [CALL] 470 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 471 SDValue Chain = N->getOperand(0); 472 SDValue Load = N->getOperand(1); 473 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 474 continue; 475 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 476 ++NumLoadMoved; 477 continue; 478 } 479 480 // Lower fpround and fpextend nodes that target the FP stack to be store and 481 // load to the stack. This is a gross hack. We would like to simply mark 482 // these as being illegal, but when we do that, legalize produces these when 483 // it expands calls, then expands these in the same legalize pass. We would 484 // like dag combine to be able to hack on these between the call expansion 485 // and the node legalization. As such this pass basically does "really 486 // late" legalization of these inline with the X86 isel pass. 487 // FIXME: This should only happen when not compiled with -O0. 488 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 489 continue; 490 491 EVT SrcVT = N->getOperand(0).getValueType(); 492 EVT DstVT = N->getValueType(0); 493 494 // If any of the sources are vectors, no fp stack involved. 495 if (SrcVT.isVector() || DstVT.isVector()) 496 continue; 497 498 // If the source and destination are SSE registers, then this is a legal 499 // conversion that should not be lowered. 500 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 501 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 502 if (SrcIsSSE && DstIsSSE) 503 continue; 504 505 if (!SrcIsSSE && !DstIsSSE) { 506 // If this is an FPStack extension, it is a noop. 507 if (N->getOpcode() == ISD::FP_EXTEND) 508 continue; 509 // If this is a value-preserving FPStack truncation, it is a noop. 510 if (N->getConstantOperandVal(1)) 511 continue; 512 } 513 514 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 515 // FPStack has extload and truncstore. SSE can fold direct loads into other 516 // operations. Based on this, decide what we want to do. 517 EVT MemVT; 518 if (N->getOpcode() == ISD::FP_ROUND) 519 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 520 else 521 MemVT = SrcIsSSE ? SrcVT : DstVT; 522 523 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 524 DebugLoc dl = N->getDebugLoc(); 525 526 // FIXME: optimize the case where the src/dest is a load or store? 527 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 528 N->getOperand(0), 529 MemTmp, MachinePointerInfo(), MemVT, 530 false, false, 0); 531 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 532 MachinePointerInfo(), 533 MemVT, false, false, 0); 534 535 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 536 // extload we created. This will cause general havok on the dag because 537 // anything below the conversion could be folded into other existing nodes. 538 // To avoid invalidating 'I', back it up to the convert node. 539 --I; 540 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 541 542 // Now that we did that, the node is dead. Increment the iterator to the 543 // next node to process, then delete N. 544 ++I; 545 CurDAG->DeleteNode(N); 546 } 547} 548 549 550/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 551/// the main function. 552void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 553 MachineFrameInfo *MFI) { 554 const TargetInstrInfo *TII = TM.getInstrInfo(); 555 if (Subtarget->isTargetCygMing()) { 556 unsigned CallOp = 557 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 558 BuildMI(BB, DebugLoc(), 559 TII->get(CallOp)).addExternalSymbol("__main"); 560 } 561} 562 563void X86DAGToDAGISel::EmitFunctionEntryCode() { 564 // If this is main, emit special code for main. 565 if (const Function *Fn = MF->getFunction()) 566 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 567 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 568} 569 570static bool isDispSafeForFrameIndex(int64_t Val) { 571 // On 64-bit platforms, we can run into an issue where a frame index 572 // includes a displacement that, when added to the explicit displacement, 573 // will overflow the displacement field. Assuming that the frame index 574 // displacement fits into a 31-bit integer (which is only slightly more 575 // aggressive than the current fundamental assumption that it fits into 576 // a 32-bit integer), a 31-bit disp should always be safe. 577 return isInt<31>(Val); 578} 579 580bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 581 X86ISelAddressMode &AM) { 582 int64_t Val = AM.Disp + Offset; 583 CodeModel::Model M = TM.getCodeModel(); 584 if (Subtarget->is64Bit()) { 585 if (!X86::isOffsetSuitableForCodeModel(Val, M, 586 AM.hasSymbolicDisplacement())) 587 return true; 588 // In addition to the checks required for a register base, check that 589 // we do not try to use an unsafe Disp with a frame index. 590 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 591 !isDispSafeForFrameIndex(Val)) 592 return true; 593 } 594 AM.Disp = Val; 595 return false; 596 597} 598 599bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 600 SDValue Address = N->getOperand(1); 601 602 // load gs:0 -> GS segment register. 603 // load fs:0 -> FS segment register. 604 // 605 // This optimization is valid because the GNU TLS model defines that 606 // gs:0 (or fs:0 on X86-64) contains its own address. 607 // For more information see http://people.redhat.com/drepper/tls.pdf 608 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 609 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && 610 Subtarget->isTargetLinux()) 611 switch (N->getPointerInfo().getAddrSpace()) { 612 case 256: 613 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 614 return false; 615 case 257: 616 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 617 return false; 618 } 619 620 return true; 621} 622 623/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 624/// into an addressing mode. These wrap things that will resolve down into a 625/// symbol reference. If no match is possible, this returns true, otherwise it 626/// returns false. 627bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 628 // If the addressing mode already has a symbol as the displacement, we can 629 // never match another symbol. 630 if (AM.hasSymbolicDisplacement()) 631 return true; 632 633 SDValue N0 = N.getOperand(0); 634 CodeModel::Model M = TM.getCodeModel(); 635 636 // Handle X86-64 rip-relative addresses. We check this before checking direct 637 // folding because RIP is preferable to non-RIP accesses. 638 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && 639 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 640 // they cannot be folded into immediate fields. 641 // FIXME: This can be improved for kernel and other models? 642 (M == CodeModel::Small || M == CodeModel::Kernel)) { 643 // Base and index reg must be 0 in order to use %rip as base. 644 if (AM.hasBaseOrIndexReg()) 645 return true; 646 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 647 X86ISelAddressMode Backup = AM; 648 AM.GV = G->getGlobal(); 649 AM.SymbolFlags = G->getTargetFlags(); 650 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 651 AM = Backup; 652 return true; 653 } 654 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 655 X86ISelAddressMode Backup = AM; 656 AM.CP = CP->getConstVal(); 657 AM.Align = CP->getAlignment(); 658 AM.SymbolFlags = CP->getTargetFlags(); 659 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 660 AM = Backup; 661 return true; 662 } 663 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 664 AM.ES = S->getSymbol(); 665 AM.SymbolFlags = S->getTargetFlags(); 666 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 667 AM.JT = J->getIndex(); 668 AM.SymbolFlags = J->getTargetFlags(); 669 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 670 X86ISelAddressMode Backup = AM; 671 AM.BlockAddr = BA->getBlockAddress(); 672 AM.SymbolFlags = BA->getTargetFlags(); 673 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) { 674 AM = Backup; 675 return true; 676 } 677 } else 678 llvm_unreachable("Unhandled symbol reference node."); 679 680 if (N.getOpcode() == X86ISD::WrapperRIP) 681 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 682 return false; 683 } 684 685 // Handle the case when globals fit in our immediate field: This is true for 686 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit 687 // mode, this only applies to a non-RIP-relative computation. 688 if (!Subtarget->is64Bit() || 689 M == CodeModel::Small || M == CodeModel::Kernel) { 690 assert(N.getOpcode() != X86ISD::WrapperRIP && 691 "RIP-relative addressing already handled"); 692 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 693 AM.GV = G->getGlobal(); 694 AM.Disp += G->getOffset(); 695 AM.SymbolFlags = G->getTargetFlags(); 696 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 697 AM.CP = CP->getConstVal(); 698 AM.Align = CP->getAlignment(); 699 AM.Disp += CP->getOffset(); 700 AM.SymbolFlags = CP->getTargetFlags(); 701 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 702 AM.ES = S->getSymbol(); 703 AM.SymbolFlags = S->getTargetFlags(); 704 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 705 AM.JT = J->getIndex(); 706 AM.SymbolFlags = J->getTargetFlags(); 707 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 708 AM.BlockAddr = BA->getBlockAddress(); 709 AM.Disp += BA->getOffset(); 710 AM.SymbolFlags = BA->getTargetFlags(); 711 } else 712 llvm_unreachable("Unhandled symbol reference node."); 713 return false; 714 } 715 716 return true; 717} 718 719/// MatchAddress - Add the specified node to the specified addressing mode, 720/// returning true if it cannot be done. This just pattern matches for the 721/// addressing mode. 722bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 723 if (MatchAddressRecursively(N, AM, 0)) 724 return true; 725 726 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 727 // a smaller encoding and avoids a scaled-index. 728 if (AM.Scale == 2 && 729 AM.BaseType == X86ISelAddressMode::RegBase && 730 AM.Base_Reg.getNode() == 0) { 731 AM.Base_Reg = AM.IndexReg; 732 AM.Scale = 1; 733 } 734 735 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 736 // because it has a smaller encoding. 737 // TODO: Which other code models can use this? 738 if (TM.getCodeModel() == CodeModel::Small && 739 Subtarget->is64Bit() && 740 AM.Scale == 1 && 741 AM.BaseType == X86ISelAddressMode::RegBase && 742 AM.Base_Reg.getNode() == 0 && 743 AM.IndexReg.getNode() == 0 && 744 AM.SymbolFlags == X86II::MO_NO_FLAG && 745 AM.hasSymbolicDisplacement()) 746 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 747 748 return false; 749} 750 751// Insert a node into the DAG at least before the Pos node's position. This 752// will reposition the node as needed, and will assign it a node ID that is <= 753// the Pos node's ID. Note that this does *not* preserve the uniqueness of node 754// IDs! The selection DAG must no longer depend on their uniqueness when this 755// is used. 756static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 757 if (N.getNode()->getNodeId() == -1 || 758 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { 759 DAG.RepositionNode(Pos.getNode(), N.getNode()); 760 N.getNode()->setNodeId(Pos.getNode()->getNodeId()); 761 } 762} 763 764// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This 765// allows us to convert the shift and and into an h-register extract and 766// a scaled index. Returns false if the simplification is performed. 767static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 768 uint64_t Mask, 769 SDValue Shift, SDValue X, 770 X86ISelAddressMode &AM) { 771 if (Shift.getOpcode() != ISD::SRL || 772 !isa<ConstantSDNode>(Shift.getOperand(1)) || 773 !Shift.hasOneUse()) 774 return true; 775 776 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 777 if (ScaleLog <= 0 || ScaleLog >= 4 || 778 Mask != (0xffu << ScaleLog)) 779 return true; 780 781 EVT VT = N.getValueType(); 782 DebugLoc DL = N.getDebugLoc(); 783 SDValue Eight = DAG.getConstant(8, MVT::i8); 784 SDValue NewMask = DAG.getConstant(0xff, VT); 785 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 786 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 787 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 788 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 789 790 // Insert the new nodes into the topological ordering. We must do this in 791 // a valid topological ordering as nothing is going to go back and re-sort 792 // these nodes. We continually insert before 'N' in sequence as this is 793 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 794 // hierarchy left to express. 795 InsertDAGNode(DAG, N, Eight); 796 InsertDAGNode(DAG, N, Srl); 797 InsertDAGNode(DAG, N, NewMask); 798 InsertDAGNode(DAG, N, And); 799 InsertDAGNode(DAG, N, ShlCount); 800 InsertDAGNode(DAG, N, Shl); 801 DAG.ReplaceAllUsesWith(N, Shl); 802 AM.IndexReg = And; 803 AM.Scale = (1 << ScaleLog); 804 return false; 805} 806 807// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 808// allows us to fold the shift into this addressing mode. Returns false if the 809// transform succeeded. 810static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 811 uint64_t Mask, 812 SDValue Shift, SDValue X, 813 X86ISelAddressMode &AM) { 814 if (Shift.getOpcode() != ISD::SHL || 815 !isa<ConstantSDNode>(Shift.getOperand(1))) 816 return true; 817 818 // Not likely to be profitable if either the AND or SHIFT node has more 819 // than one use (unless all uses are for address computation). Besides, 820 // isel mechanism requires their node ids to be reused. 821 if (!N.hasOneUse() || !Shift.hasOneUse()) 822 return true; 823 824 // Verify that the shift amount is something we can fold. 825 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 826 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 827 return true; 828 829 EVT VT = N.getValueType(); 830 DebugLoc DL = N.getDebugLoc(); 831 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 832 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 833 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 834 835 // Insert the new nodes into the topological ordering. We must do this in 836 // a valid topological ordering as nothing is going to go back and re-sort 837 // these nodes. We continually insert before 'N' in sequence as this is 838 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 839 // hierarchy left to express. 840 InsertDAGNode(DAG, N, NewMask); 841 InsertDAGNode(DAG, N, NewAnd); 842 InsertDAGNode(DAG, N, NewShift); 843 DAG.ReplaceAllUsesWith(N, NewShift); 844 845 AM.Scale = 1 << ShiftAmt; 846 AM.IndexReg = NewAnd; 847 return false; 848} 849 850// Implement some heroics to detect shifts of masked values where the mask can 851// be replaced by extending the shift and undoing that in the addressing mode 852// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 853// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 854// the addressing mode. This results in code such as: 855// 856// int f(short *y, int *lookup_table) { 857// ... 858// return *y + lookup_table[*y >> 11]; 859// } 860// 861// Turning into: 862// movzwl (%rdi), %eax 863// movl %eax, %ecx 864// shrl $11, %ecx 865// addl (%rsi,%rcx,4), %eax 866// 867// Instead of: 868// movzwl (%rdi), %eax 869// movl %eax, %ecx 870// shrl $9, %ecx 871// andl $124, %rcx 872// addl (%rsi,%rcx), %eax 873// 874// Note that this function assumes the mask is provided as a mask *after* the 875// value is shifted. The input chain may or may not match that, but computing 876// such a mask is trivial. 877static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 878 uint64_t Mask, 879 SDValue Shift, SDValue X, 880 X86ISelAddressMode &AM) { 881 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 882 !isa<ConstantSDNode>(Shift.getOperand(1))) 883 return true; 884 885 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 886 unsigned MaskLZ = CountLeadingZeros_64(Mask); 887 unsigned MaskTZ = CountTrailingZeros_64(Mask); 888 889 // The amount of shift we're trying to fit into the addressing mode is taken 890 // from the trailing zeros of the mask. 891 unsigned AMShiftAmt = MaskTZ; 892 893 // There is nothing we can do here unless the mask is removing some bits. 894 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 895 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 896 897 // We also need to ensure that mask is a continuous run of bits. 898 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 899 900 // Scale the leading zero count down based on the actual size of the value. 901 // Also scale it down based on the size of the shift. 902 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt; 903 904 // The final check is to ensure that any masked out high bits of X are 905 // already known to be zero. Otherwise, the mask has a semantic impact 906 // other than masking out a couple of low bits. Unfortunately, because of 907 // the mask, zero extensions will be removed from operands in some cases. 908 // This code works extra hard to look through extensions because we can 909 // replace them with zero extensions cheaply if necessary. 910 bool ReplacingAnyExtend = false; 911 if (X.getOpcode() == ISD::ANY_EXTEND) { 912 unsigned ExtendBits = 913 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits(); 914 // Assume that we'll replace the any-extend with a zero-extend, and 915 // narrow the search to the extended value. 916 X = X.getOperand(0); 917 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 918 ReplacingAnyExtend = true; 919 } 920 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(), 921 MaskLZ); 922 APInt KnownZero, KnownOne; 923 DAG.ComputeMaskedBits(X, KnownZero, KnownOne); 924 if (MaskedHighBits != KnownZero) return true; 925 926 // We've identified a pattern that can be transformed into a single shift 927 // and an addressing mode. Make it so. 928 EVT VT = N.getValueType(); 929 if (ReplacingAnyExtend) { 930 assert(X.getValueType() != VT); 931 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 932 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X); 933 InsertDAGNode(DAG, N, NewX); 934 X = NewX; 935 } 936 DebugLoc DL = N.getDebugLoc(); 937 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 938 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 939 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 940 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 941 942 // Insert the new nodes into the topological ordering. We must do this in 943 // a valid topological ordering as nothing is going to go back and re-sort 944 // these nodes. We continually insert before 'N' in sequence as this is 945 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 946 // hierarchy left to express. 947 InsertDAGNode(DAG, N, NewSRLAmt); 948 InsertDAGNode(DAG, N, NewSRL); 949 InsertDAGNode(DAG, N, NewSHLAmt); 950 InsertDAGNode(DAG, N, NewSHL); 951 DAG.ReplaceAllUsesWith(N, NewSHL); 952 953 AM.Scale = 1 << AMShiftAmt; 954 AM.IndexReg = NewSRL; 955 return false; 956} 957 958bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 959 unsigned Depth) { 960 DebugLoc dl = N.getDebugLoc(); 961 DEBUG({ 962 dbgs() << "MatchAddress: "; 963 AM.dump(); 964 }); 965 // Limit recursion. 966 if (Depth > 5) 967 return MatchAddressBase(N, AM); 968 969 // If this is already a %rip relative address, we can only merge immediates 970 // into it. Instead of handling this in every case, we handle it here. 971 // RIP relative addressing: %rip + 32-bit displacement! 972 if (AM.isRIPRelative()) { 973 // FIXME: JumpTable and ExternalSymbol address currently don't like 974 // displacements. It isn't very important, but this should be fixed for 975 // consistency. 976 if (!AM.ES && AM.JT != -1) return true; 977 978 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 979 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 980 return false; 981 return true; 982 } 983 984 switch (N.getOpcode()) { 985 default: break; 986 case ISD::Constant: { 987 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 988 if (!FoldOffsetIntoAddress(Val, AM)) 989 return false; 990 break; 991 } 992 993 case X86ISD::Wrapper: 994 case X86ISD::WrapperRIP: 995 if (!MatchWrapper(N, AM)) 996 return false; 997 break; 998 999 case ISD::LOAD: 1000 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 1001 return false; 1002 break; 1003 1004 case ISD::FrameIndex: 1005 if (AM.BaseType == X86ISelAddressMode::RegBase && 1006 AM.Base_Reg.getNode() == 0 && 1007 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1008 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1009 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1010 return false; 1011 } 1012 break; 1013 1014 case ISD::SHL: 1015 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 1016 break; 1017 1018 if (ConstantSDNode 1019 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 1020 unsigned Val = CN->getZExtValue(); 1021 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1022 // that the base operand remains free for further matching. If 1023 // the base doesn't end up getting used, a post-processing step 1024 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1025 if (Val == 1 || Val == 2 || Val == 3) { 1026 AM.Scale = 1 << Val; 1027 SDValue ShVal = N.getNode()->getOperand(0); 1028 1029 // Okay, we know that we have a scale by now. However, if the scaled 1030 // value is an add of something and a constant, we can fold the 1031 // constant into the disp field here. 1032 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1033 AM.IndexReg = ShVal.getNode()->getOperand(0); 1034 ConstantSDNode *AddVal = 1035 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1036 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 1037 if (!FoldOffsetIntoAddress(Disp, AM)) 1038 return false; 1039 } 1040 1041 AM.IndexReg = ShVal; 1042 return false; 1043 } 1044 break; 1045 } 1046 1047 case ISD::SRL: { 1048 // Scale must not be used already. 1049 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1050 1051 SDValue And = N.getOperand(0); 1052 if (And.getOpcode() != ISD::AND) break; 1053 SDValue X = And.getOperand(0); 1054 1055 // We only handle up to 64-bit values here as those are what matter for 1056 // addressing mode optimizations. 1057 if (X.getValueSizeInBits() > 64) break; 1058 1059 // The mask used for the transform is expected to be post-shift, but we 1060 // found the shift first so just apply the shift to the mask before passing 1061 // it down. 1062 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1063 !isa<ConstantSDNode>(And.getOperand(1))) 1064 break; 1065 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1066 1067 // Try to fold the mask and shift into the scale, and return false if we 1068 // succeed. 1069 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1070 return false; 1071 break; 1072 } 1073 1074 case ISD::SMUL_LOHI: 1075 case ISD::UMUL_LOHI: 1076 // A mul_lohi where we need the low part can be folded as a plain multiply. 1077 if (N.getResNo() != 0) break; 1078 // FALL THROUGH 1079 case ISD::MUL: 1080 case X86ISD::MUL_IMM: 1081 // X*[3,5,9] -> X+X*[2,4,8] 1082 if (AM.BaseType == X86ISelAddressMode::RegBase && 1083 AM.Base_Reg.getNode() == 0 && 1084 AM.IndexReg.getNode() == 0) { 1085 if (ConstantSDNode 1086 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1087 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1088 CN->getZExtValue() == 9) { 1089 AM.Scale = unsigned(CN->getZExtValue())-1; 1090 1091 SDValue MulVal = N.getNode()->getOperand(0); 1092 SDValue Reg; 1093 1094 // Okay, we know that we have a scale by now. However, if the scaled 1095 // value is an add of something and a constant, we can fold the 1096 // constant into the disp field here. 1097 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1098 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1099 Reg = MulVal.getNode()->getOperand(0); 1100 ConstantSDNode *AddVal = 1101 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1102 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1103 if (FoldOffsetIntoAddress(Disp, AM)) 1104 Reg = N.getNode()->getOperand(0); 1105 } else { 1106 Reg = N.getNode()->getOperand(0); 1107 } 1108 1109 AM.IndexReg = AM.Base_Reg = Reg; 1110 return false; 1111 } 1112 } 1113 break; 1114 1115 case ISD::SUB: { 1116 // Given A-B, if A can be completely folded into the address and 1117 // the index field with the index field unused, use -B as the index. 1118 // This is a win if a has multiple parts that can be folded into 1119 // the address. Also, this saves a mov if the base register has 1120 // other uses, since it avoids a two-address sub instruction, however 1121 // it costs an additional mov if the index register has other uses. 1122 1123 // Add an artificial use to this node so that we can keep track of 1124 // it if it gets CSE'd with a different node. 1125 HandleSDNode Handle(N); 1126 1127 // Test if the LHS of the sub can be folded. 1128 X86ISelAddressMode Backup = AM; 1129 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1130 AM = Backup; 1131 break; 1132 } 1133 // Test if the index field is free for use. 1134 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1135 AM = Backup; 1136 break; 1137 } 1138 1139 int Cost = 0; 1140 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1141 // If the RHS involves a register with multiple uses, this 1142 // transformation incurs an extra mov, due to the neg instruction 1143 // clobbering its operand. 1144 if (!RHS.getNode()->hasOneUse() || 1145 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1146 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1147 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1148 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1149 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1150 ++Cost; 1151 // If the base is a register with multiple uses, this 1152 // transformation may save a mov. 1153 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1154 AM.Base_Reg.getNode() && 1155 !AM.Base_Reg.getNode()->hasOneUse()) || 1156 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1157 --Cost; 1158 // If the folded LHS was interesting, this transformation saves 1159 // address arithmetic. 1160 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1161 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1162 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1163 --Cost; 1164 // If it doesn't look like it may be an overall win, don't do it. 1165 if (Cost >= 0) { 1166 AM = Backup; 1167 break; 1168 } 1169 1170 // Ok, the transformation is legal and appears profitable. Go for it. 1171 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1172 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1173 AM.IndexReg = Neg; 1174 AM.Scale = 1; 1175 1176 // Insert the new nodes into the topological ordering. 1177 InsertDAGNode(*CurDAG, N, Zero); 1178 InsertDAGNode(*CurDAG, N, Neg); 1179 return false; 1180 } 1181 1182 case ISD::ADD: { 1183 // Add an artificial use to this node so that we can keep track of 1184 // it if it gets CSE'd with a different node. 1185 HandleSDNode Handle(N); 1186 1187 X86ISelAddressMode Backup = AM; 1188 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1189 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1190 return false; 1191 AM = Backup; 1192 1193 // Try again after commuting the operands. 1194 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1195 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1196 return false; 1197 AM = Backup; 1198 1199 // If we couldn't fold both operands into the address at the same time, 1200 // see if we can just put each operand into a register and fold at least 1201 // the add. 1202 if (AM.BaseType == X86ISelAddressMode::RegBase && 1203 !AM.Base_Reg.getNode() && 1204 !AM.IndexReg.getNode()) { 1205 N = Handle.getValue(); 1206 AM.Base_Reg = N.getOperand(0); 1207 AM.IndexReg = N.getOperand(1); 1208 AM.Scale = 1; 1209 return false; 1210 } 1211 N = Handle.getValue(); 1212 break; 1213 } 1214 1215 case ISD::OR: 1216 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1217 if (CurDAG->isBaseWithConstantOffset(N)) { 1218 X86ISelAddressMode Backup = AM; 1219 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1220 1221 // Start with the LHS as an addr mode. 1222 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1223 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1224 return false; 1225 AM = Backup; 1226 } 1227 break; 1228 1229 case ISD::AND: { 1230 // Perform some heroic transforms on an and of a constant-count shift 1231 // with a constant to enable use of the scaled offset field. 1232 1233 // Scale must not be used already. 1234 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1235 1236 SDValue Shift = N.getOperand(0); 1237 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1238 SDValue X = Shift.getOperand(0); 1239 1240 // We only handle up to 64-bit values here as those are what matter for 1241 // addressing mode optimizations. 1242 if (X.getValueSizeInBits() > 64) break; 1243 1244 if (!isa<ConstantSDNode>(N.getOperand(1))) 1245 break; 1246 uint64_t Mask = N.getConstantOperandVal(1); 1247 1248 // Try to fold the mask and shift into an extract and scale. 1249 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1250 return false; 1251 1252 // Try to fold the mask and shift directly into the scale. 1253 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1254 return false; 1255 1256 // Try to swap the mask and shift to place shifts which can be done as 1257 // a scale on the outside of the mask. 1258 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1259 return false; 1260 break; 1261 } 1262 } 1263 1264 return MatchAddressBase(N, AM); 1265} 1266 1267/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1268/// specified addressing mode without any further recursion. 1269bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1270 // Is the base register already occupied? 1271 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1272 // If so, check to see if the scale index register is set. 1273 if (AM.IndexReg.getNode() == 0) { 1274 AM.IndexReg = N; 1275 AM.Scale = 1; 1276 return false; 1277 } 1278 1279 // Otherwise, we cannot select it. 1280 return true; 1281 } 1282 1283 // Default, generate it as a register. 1284 AM.BaseType = X86ISelAddressMode::RegBase; 1285 AM.Base_Reg = N; 1286 return false; 1287} 1288 1289/// SelectAddr - returns true if it is able pattern match an addressing mode. 1290/// It returns the operands which make up the maximal addressing mode it can 1291/// match by reference. 1292/// 1293/// Parent is the parent node of the addr operand that is being matched. It 1294/// is always a load, store, atomic node, or null. It is only null when 1295/// checking memory operands for inline asm nodes. 1296bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1297 SDValue &Scale, SDValue &Index, 1298 SDValue &Disp, SDValue &Segment) { 1299 X86ISelAddressMode AM; 1300 1301 if (Parent && 1302 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1303 // that are not a MemSDNode, and thus don't have proper addrspace info. 1304 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1305 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1306 Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme 1307 unsigned AddrSpace = 1308 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1309 // AddrSpace 256 -> GS, 257 -> FS. 1310 if (AddrSpace == 256) 1311 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1312 if (AddrSpace == 257) 1313 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1314 } 1315 1316 if (MatchAddress(N, AM)) 1317 return false; 1318 1319 EVT VT = N.getValueType(); 1320 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1321 if (!AM.Base_Reg.getNode()) 1322 AM.Base_Reg = CurDAG->getRegister(0, VT); 1323 } 1324 1325 if (!AM.IndexReg.getNode()) 1326 AM.IndexReg = CurDAG->getRegister(0, VT); 1327 1328 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1329 return true; 1330} 1331 1332/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1333/// match a load whose top elements are either undef or zeros. The load flavor 1334/// is derived from the type of N, which is either v4f32 or v2f64. 1335/// 1336/// We also return: 1337/// PatternChainNode: this is the matched node that has a chain input and 1338/// output. 1339bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1340 SDValue N, SDValue &Base, 1341 SDValue &Scale, SDValue &Index, 1342 SDValue &Disp, SDValue &Segment, 1343 SDValue &PatternNodeWithChain) { 1344 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1345 PatternNodeWithChain = N.getOperand(0); 1346 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1347 PatternNodeWithChain.hasOneUse() && 1348 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1349 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1350 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1351 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1352 return false; 1353 return true; 1354 } 1355 } 1356 1357 // Also handle the case where we explicitly require zeros in the top 1358 // elements. This is a vector shuffle from the zero vector. 1359 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1360 // Check to see if the top elements are all zeros (or bitcast of zeros). 1361 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1362 N.getOperand(0).getNode()->hasOneUse() && 1363 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1364 N.getOperand(0).getOperand(0).hasOneUse() && 1365 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1366 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1367 // Okay, this is a zero extending load. Fold it. 1368 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1369 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1370 return false; 1371 PatternNodeWithChain = SDValue(LD, 0); 1372 return true; 1373 } 1374 return false; 1375} 1376 1377 1378/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1379/// mode it matches can be cost effectively emitted as an LEA instruction. 1380bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1381 SDValue &Base, SDValue &Scale, 1382 SDValue &Index, SDValue &Disp, 1383 SDValue &Segment) { 1384 X86ISelAddressMode AM; 1385 1386 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1387 // segments. 1388 SDValue Copy = AM.Segment; 1389 SDValue T = CurDAG->getRegister(0, MVT::i32); 1390 AM.Segment = T; 1391 if (MatchAddress(N, AM)) 1392 return false; 1393 assert (T == AM.Segment); 1394 AM.Segment = Copy; 1395 1396 EVT VT = N.getValueType(); 1397 unsigned Complexity = 0; 1398 if (AM.BaseType == X86ISelAddressMode::RegBase) 1399 if (AM.Base_Reg.getNode()) 1400 Complexity = 1; 1401 else 1402 AM.Base_Reg = CurDAG->getRegister(0, VT); 1403 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1404 Complexity = 4; 1405 1406 if (AM.IndexReg.getNode()) 1407 Complexity++; 1408 else 1409 AM.IndexReg = CurDAG->getRegister(0, VT); 1410 1411 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1412 // a simple shift. 1413 if (AM.Scale > 1) 1414 Complexity++; 1415 1416 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1417 // to a LEA. This is determined with some expermentation but is by no means 1418 // optimal (especially for code size consideration). LEA is nice because of 1419 // its three-address nature. Tweak the cost function again when we can run 1420 // convertToThreeAddress() at register allocation time. 1421 if (AM.hasSymbolicDisplacement()) { 1422 // For X86-64, we should always use lea to materialize RIP relative 1423 // addresses. 1424 if (Subtarget->is64Bit()) 1425 Complexity = 4; 1426 else 1427 Complexity += 2; 1428 } 1429 1430 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1431 Complexity++; 1432 1433 // If it isn't worth using an LEA, reject it. 1434 if (Complexity <= 2) 1435 return false; 1436 1437 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1438 return true; 1439} 1440 1441/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1442bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1443 SDValue &Scale, SDValue &Index, 1444 SDValue &Disp, SDValue &Segment) { 1445 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1446 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1447 1448 X86ISelAddressMode AM; 1449 AM.GV = GA->getGlobal(); 1450 AM.Disp += GA->getOffset(); 1451 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1452 AM.SymbolFlags = GA->getTargetFlags(); 1453 1454 if (N.getValueType() == MVT::i32) { 1455 AM.Scale = 1; 1456 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1457 } else { 1458 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1459 } 1460 1461 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1462 return true; 1463} 1464 1465 1466bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1467 SDValue &Base, SDValue &Scale, 1468 SDValue &Index, SDValue &Disp, 1469 SDValue &Segment) { 1470 if (!ISD::isNON_EXTLoad(N.getNode()) || 1471 !IsProfitableToFold(N, P, P) || 1472 !IsLegalToFold(N, P, P, OptLevel)) 1473 return false; 1474 1475 return SelectAddr(N.getNode(), 1476 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1477} 1478 1479/// getGlobalBaseReg - Return an SDNode that returns the value of 1480/// the global base register. Output instructions required to 1481/// initialize the global base register, if necessary. 1482/// 1483SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1484 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1485 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1486} 1487 1488SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1489 SDValue Chain = Node->getOperand(0); 1490 SDValue In1 = Node->getOperand(1); 1491 SDValue In2L = Node->getOperand(2); 1492 SDValue In2H = Node->getOperand(3); 1493 1494 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1495 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1496 return NULL; 1497 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1498 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1499 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1500 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1501 MVT::i32, MVT::i32, MVT::Other, Ops, 1502 array_lengthof(Ops)); 1503 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1504 return ResNode; 1505} 1506 1507/// Atomic opcode table 1508/// 1509enum AtomicOpc { 1510 ADD, 1511 SUB, 1512 INC, 1513 DEC, 1514 OR, 1515 AND, 1516 XOR, 1517 AtomicOpcEnd 1518}; 1519 1520enum AtomicSz { 1521 ConstantI8, 1522 I8, 1523 SextConstantI16, 1524 ConstantI16, 1525 I16, 1526 SextConstantI32, 1527 ConstantI32, 1528 I32, 1529 SextConstantI64, 1530 ConstantI64, 1531 I64, 1532 AtomicSzEnd 1533}; 1534 1535static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1536 { 1537 X86::LOCK_ADD8mi, 1538 X86::LOCK_ADD8mr, 1539 X86::LOCK_ADD16mi8, 1540 X86::LOCK_ADD16mi, 1541 X86::LOCK_ADD16mr, 1542 X86::LOCK_ADD32mi8, 1543 X86::LOCK_ADD32mi, 1544 X86::LOCK_ADD32mr, 1545 X86::LOCK_ADD64mi8, 1546 X86::LOCK_ADD64mi32, 1547 X86::LOCK_ADD64mr, 1548 }, 1549 { 1550 X86::LOCK_SUB8mi, 1551 X86::LOCK_SUB8mr, 1552 X86::LOCK_SUB16mi8, 1553 X86::LOCK_SUB16mi, 1554 X86::LOCK_SUB16mr, 1555 X86::LOCK_SUB32mi8, 1556 X86::LOCK_SUB32mi, 1557 X86::LOCK_SUB32mr, 1558 X86::LOCK_SUB64mi8, 1559 X86::LOCK_SUB64mi32, 1560 X86::LOCK_SUB64mr, 1561 }, 1562 { 1563 0, 1564 X86::LOCK_INC8m, 1565 0, 1566 0, 1567 X86::LOCK_INC16m, 1568 0, 1569 0, 1570 X86::LOCK_INC32m, 1571 0, 1572 0, 1573 X86::LOCK_INC64m, 1574 }, 1575 { 1576 0, 1577 X86::LOCK_DEC8m, 1578 0, 1579 0, 1580 X86::LOCK_DEC16m, 1581 0, 1582 0, 1583 X86::LOCK_DEC32m, 1584 0, 1585 0, 1586 X86::LOCK_DEC64m, 1587 }, 1588 { 1589 X86::LOCK_OR8mi, 1590 X86::LOCK_OR8mr, 1591 X86::LOCK_OR16mi8, 1592 X86::LOCK_OR16mi, 1593 X86::LOCK_OR16mr, 1594 X86::LOCK_OR32mi8, 1595 X86::LOCK_OR32mi, 1596 X86::LOCK_OR32mr, 1597 X86::LOCK_OR64mi8, 1598 X86::LOCK_OR64mi32, 1599 X86::LOCK_OR64mr, 1600 }, 1601 { 1602 X86::LOCK_AND8mi, 1603 X86::LOCK_AND8mr, 1604 X86::LOCK_AND16mi8, 1605 X86::LOCK_AND16mi, 1606 X86::LOCK_AND16mr, 1607 X86::LOCK_AND32mi8, 1608 X86::LOCK_AND32mi, 1609 X86::LOCK_AND32mr, 1610 X86::LOCK_AND64mi8, 1611 X86::LOCK_AND64mi32, 1612 X86::LOCK_AND64mr, 1613 }, 1614 { 1615 X86::LOCK_XOR8mi, 1616 X86::LOCK_XOR8mr, 1617 X86::LOCK_XOR16mi8, 1618 X86::LOCK_XOR16mi, 1619 X86::LOCK_XOR16mr, 1620 X86::LOCK_XOR32mi8, 1621 X86::LOCK_XOR32mi, 1622 X86::LOCK_XOR32mr, 1623 X86::LOCK_XOR64mi8, 1624 X86::LOCK_XOR64mi32, 1625 X86::LOCK_XOR64mr, 1626 } 1627}; 1628 1629// Return the target constant operand for atomic-load-op and do simple 1630// translations, such as from atomic-load-add to lock-sub. The return value is 1631// one of the following 3 cases: 1632// + target-constant, the operand could be supported as a target constant. 1633// + empty, the operand is not needed any more with the new op selected. 1634// + non-empty, otherwise. 1635static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, 1636 DebugLoc dl, 1637 enum AtomicOpc &Op, EVT NVT, 1638 SDValue Val) { 1639 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { 1640 int64_t CNVal = CN->getSExtValue(); 1641 // Quit if not 32-bit imm. 1642 if ((int32_t)CNVal != CNVal) 1643 return Val; 1644 // For atomic-load-add, we could do some optimizations. 1645 if (Op == ADD) { 1646 // Translate to INC/DEC if ADD by 1 or -1. 1647 if ((CNVal == 1) || (CNVal == -1)) { 1648 Op = (CNVal == 1) ? INC : DEC; 1649 // No more constant operand after being translated into INC/DEC. 1650 return SDValue(); 1651 } 1652 // Translate to SUB if ADD by negative value. 1653 if (CNVal < 0) { 1654 Op = SUB; 1655 CNVal = -CNVal; 1656 } 1657 } 1658 return CurDAG->getTargetConstant(CNVal, NVT); 1659 } 1660 1661 // If the value operand is single-used, try to optimize it. 1662 if (Op == ADD && Val.hasOneUse()) { 1663 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x). 1664 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) { 1665 Op = SUB; 1666 return Val.getOperand(1); 1667 } 1668 // A special case for i16, which needs truncating as, in most cases, it's 1669 // promoted to i32. We will translate 1670 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x)) 1671 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 && 1672 Val.getOperand(0).getOpcode() == ISD::SUB && 1673 X86::isZeroNode(Val.getOperand(0).getOperand(0))) { 1674 Op = SUB; 1675 Val = Val.getOperand(0); 1676 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT, 1677 Val.getOperand(1)); 1678 } 1679 } 1680 1681 return Val; 1682} 1683 1684SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { 1685 if (Node->hasAnyUseOfValue(0)) 1686 return 0; 1687 1688 DebugLoc dl = Node->getDebugLoc(); 1689 1690 // Optimize common patterns for __sync_or_and_fetch and similar arith 1691 // operations where the result is not used. This allows us to use the "lock" 1692 // version of the arithmetic instruction. 1693 SDValue Chain = Node->getOperand(0); 1694 SDValue Ptr = Node->getOperand(1); 1695 SDValue Val = Node->getOperand(2); 1696 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1697 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1698 return 0; 1699 1700 // Which index into the table. 1701 enum AtomicOpc Op; 1702 switch (Node->getOpcode()) { 1703 default: 1704 return 0; 1705 case ISD::ATOMIC_LOAD_OR: 1706 Op = OR; 1707 break; 1708 case ISD::ATOMIC_LOAD_AND: 1709 Op = AND; 1710 break; 1711 case ISD::ATOMIC_LOAD_XOR: 1712 Op = XOR; 1713 break; 1714 case ISD::ATOMIC_LOAD_ADD: 1715 Op = ADD; 1716 break; 1717 } 1718 1719 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val); 1720 bool isUnOp = !Val.getNode(); 1721 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); 1722 1723 unsigned Opc = 0; 1724 switch (NVT.getSimpleVT().SimpleTy) { 1725 default: return 0; 1726 case MVT::i8: 1727 if (isCN) 1728 Opc = AtomicOpcTbl[Op][ConstantI8]; 1729 else 1730 Opc = AtomicOpcTbl[Op][I8]; 1731 break; 1732 case MVT::i16: 1733 if (isCN) { 1734 if (immSext8(Val.getNode())) 1735 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1736 else 1737 Opc = AtomicOpcTbl[Op][ConstantI16]; 1738 } else 1739 Opc = AtomicOpcTbl[Op][I16]; 1740 break; 1741 case MVT::i32: 1742 if (isCN) { 1743 if (immSext8(Val.getNode())) 1744 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1745 else 1746 Opc = AtomicOpcTbl[Op][ConstantI32]; 1747 } else 1748 Opc = AtomicOpcTbl[Op][I32]; 1749 break; 1750 case MVT::i64: 1751 Opc = AtomicOpcTbl[Op][I64]; 1752 if (isCN) { 1753 if (immSext8(Val.getNode())) 1754 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1755 else if (i64immSExt32(Val.getNode())) 1756 Opc = AtomicOpcTbl[Op][ConstantI64]; 1757 } 1758 break; 1759 } 1760 1761 assert(Opc != 0 && "Invalid arith lock transform!"); 1762 1763 SDValue Ret; 1764 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1765 dl, NVT), 0); 1766 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1767 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1768 if (isUnOp) { 1769 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1770 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 1771 array_lengthof(Ops)), 0); 1772 } else { 1773 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1774 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 1775 array_lengthof(Ops)), 0); 1776 } 1777 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1778 SDValue RetVals[] = { Undef, Ret }; 1779 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1780} 1781 1782/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1783/// any uses which require the SF or OF bits to be accurate. 1784static bool HasNoSignedComparisonUses(SDNode *N) { 1785 // Examine each user of the node. 1786 for (SDNode::use_iterator UI = N->use_begin(), 1787 UE = N->use_end(); UI != UE; ++UI) { 1788 // Only examine CopyToReg uses. 1789 if (UI->getOpcode() != ISD::CopyToReg) 1790 return false; 1791 // Only examine CopyToReg uses that copy to EFLAGS. 1792 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1793 X86::EFLAGS) 1794 return false; 1795 // Examine each user of the CopyToReg use. 1796 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1797 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1798 // Only examine the Flag result. 1799 if (FlagUI.getUse().getResNo() != 1) continue; 1800 // Anything unusual: assume conservatively. 1801 if (!FlagUI->isMachineOpcode()) return false; 1802 // Examine the opcode of the user. 1803 switch (FlagUI->getMachineOpcode()) { 1804 // These comparisons don't treat the most significant bit specially. 1805 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1806 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1807 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1808 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1809 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1810 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1811 case X86::CMOVA16rr: case X86::CMOVA16rm: 1812 case X86::CMOVA32rr: case X86::CMOVA32rm: 1813 case X86::CMOVA64rr: case X86::CMOVA64rm: 1814 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1815 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1816 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1817 case X86::CMOVB16rr: case X86::CMOVB16rm: 1818 case X86::CMOVB32rr: case X86::CMOVB32rm: 1819 case X86::CMOVB64rr: case X86::CMOVB64rm: 1820 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1821 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1822 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1823 case X86::CMOVE16rr: case X86::CMOVE16rm: 1824 case X86::CMOVE32rr: case X86::CMOVE32rm: 1825 case X86::CMOVE64rr: case X86::CMOVE64rm: 1826 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1827 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1828 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1829 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1830 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1831 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1832 case X86::CMOVP16rr: case X86::CMOVP16rm: 1833 case X86::CMOVP32rr: case X86::CMOVP32rm: 1834 case X86::CMOVP64rr: case X86::CMOVP64rm: 1835 continue; 1836 // Anything else: assume conservatively. 1837 default: return false; 1838 } 1839 } 1840 } 1841 return true; 1842} 1843 1844/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode 1845/// is suitable for doing the {load; increment or decrement; store} to modify 1846/// transformation. 1847static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, 1848 SDValue StoredVal, SelectionDAG *CurDAG, 1849 LoadSDNode* &LoadNode, SDValue &InputChain) { 1850 1851 // is the value stored the result of a DEC or INC? 1852 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; 1853 1854 // is the stored value result 0 of the load? 1855 if (StoredVal.getResNo() != 0) return false; 1856 1857 // are there other uses of the loaded value than the inc or dec? 1858 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 1859 1860 // is the store non-extending and non-indexed? 1861 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1862 return false; 1863 1864 SDValue Load = StoredVal->getOperand(0); 1865 // Is the stored value a non-extending and non-indexed load? 1866 if (!ISD::isNormalLoad(Load.getNode())) return false; 1867 1868 // Return LoadNode by reference. 1869 LoadNode = cast<LoadSDNode>(Load); 1870 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8) 1871 EVT LdVT = LoadNode->getMemoryVT(); 1872 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 && 1873 LdVT != MVT::i8) 1874 return false; 1875 1876 // Is store the only read of the loaded value? 1877 if (!Load.hasOneUse()) 1878 return false; 1879 1880 // Is the address of the store the same as the load? 1881 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1882 LoadNode->getOffset() != StoreNode->getOffset()) 1883 return false; 1884 1885 // Check if the chain is produced by the load or is a TokenFactor with 1886 // the load output chain as an operand. Return InputChain by reference. 1887 SDValue Chain = StoreNode->getChain(); 1888 1889 bool ChainCheck = false; 1890 if (Chain == Load.getValue(1)) { 1891 ChainCheck = true; 1892 InputChain = LoadNode->getChain(); 1893 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1894 SmallVector<SDValue, 4> ChainOps; 1895 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1896 SDValue Op = Chain.getOperand(i); 1897 if (Op == Load.getValue(1)) { 1898 ChainCheck = true; 1899 continue; 1900 } 1901 1902 // Make sure using Op as part of the chain would not cause a cycle here. 1903 // In theory, we could check whether the chain node is a predecessor of 1904 // the load. But that can be very expensive. Instead visit the uses and 1905 // make sure they all have smaller node id than the load. 1906 int LoadId = LoadNode->getNodeId(); 1907 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1908 UE = UI->use_end(); UI != UE; ++UI) { 1909 if (UI.getUse().getResNo() != 0) 1910 continue; 1911 if (UI->getNodeId() > LoadId) 1912 return false; 1913 } 1914 1915 ChainOps.push_back(Op); 1916 } 1917 1918 if (ChainCheck) 1919 // Make a new TokenFactor with all the other input chains except 1920 // for the load. 1921 InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(), 1922 MVT::Other, &ChainOps[0], ChainOps.size()); 1923 } 1924 if (!ChainCheck) 1925 return false; 1926 1927 return true; 1928} 1929 1930/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory 1931/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC. 1932static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { 1933 if (Opc == X86ISD::DEC) { 1934 if (LdVT == MVT::i64) return X86::DEC64m; 1935 if (LdVT == MVT::i32) return X86::DEC32m; 1936 if (LdVT == MVT::i16) return X86::DEC16m; 1937 if (LdVT == MVT::i8) return X86::DEC8m; 1938 } else { 1939 assert(Opc == X86ISD::INC && "unrecognized opcode"); 1940 if (LdVT == MVT::i64) return X86::INC64m; 1941 if (LdVT == MVT::i32) return X86::INC32m; 1942 if (LdVT == MVT::i16) return X86::INC16m; 1943 if (LdVT == MVT::i8) return X86::INC8m; 1944 } 1945 llvm_unreachable("unrecognized size for LdVT"); 1946} 1947 1948/// SelectGather - Customized ISel for GATHER operations. 1949/// 1950SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { 1951 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale 1952 SDValue Chain = Node->getOperand(0); 1953 SDValue VSrc = Node->getOperand(2); 1954 SDValue Base = Node->getOperand(3); 1955 SDValue VIdx = Node->getOperand(4); 1956 SDValue VMask = Node->getOperand(5); 1957 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6)); 1958 if (!Scale) 1959 return 0; 1960 1961 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), 1962 MVT::Other); 1963 1964 // Memory Operands: Base, Scale, Index, Disp, Segment 1965 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32); 1966 SDValue Segment = CurDAG->getRegister(0, MVT::i32); 1967 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, 1968 Disp, Segment, VMask, Chain}; 1969 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1970 VTs, Ops, array_lengthof(Ops)); 1971 // Node has 2 outputs: VDst and MVT::Other. 1972 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. 1973 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other 1974 // of ResNode. 1975 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); 1976 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2)); 1977 return ResNode; 1978} 1979 1980SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 1981 EVT NVT = Node->getValueType(0); 1982 unsigned Opc, MOpc; 1983 unsigned Opcode = Node->getOpcode(); 1984 DebugLoc dl = Node->getDebugLoc(); 1985 1986 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 1987 1988 if (Node->isMachineOpcode()) { 1989 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 1990 return NULL; // Already selected. 1991 } 1992 1993 switch (Opcode) { 1994 default: break; 1995 case ISD::INTRINSIC_W_CHAIN: { 1996 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 1997 switch (IntNo) { 1998 default: break; 1999 case Intrinsic::x86_avx2_gather_d_pd: 2000 case Intrinsic::x86_avx2_gather_d_pd_256: 2001 case Intrinsic::x86_avx2_gather_q_pd: 2002 case Intrinsic::x86_avx2_gather_q_pd_256: 2003 case Intrinsic::x86_avx2_gather_d_ps: 2004 case Intrinsic::x86_avx2_gather_d_ps_256: 2005 case Intrinsic::x86_avx2_gather_q_ps: 2006 case Intrinsic::x86_avx2_gather_q_ps_256: 2007 case Intrinsic::x86_avx2_gather_d_q: 2008 case Intrinsic::x86_avx2_gather_d_q_256: 2009 case Intrinsic::x86_avx2_gather_q_q: 2010 case Intrinsic::x86_avx2_gather_q_q_256: 2011 case Intrinsic::x86_avx2_gather_d_d: 2012 case Intrinsic::x86_avx2_gather_d_d_256: 2013 case Intrinsic::x86_avx2_gather_q_d: 2014 case Intrinsic::x86_avx2_gather_q_d_256: { 2015 unsigned Opc; 2016 switch (IntNo) { 2017 default: llvm_unreachable("Impossible intrinsic"); 2018 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break; 2019 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break; 2020 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break; 2021 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break; 2022 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break; 2023 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break; 2024 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break; 2025 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break; 2026 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break; 2027 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break; 2028 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break; 2029 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break; 2030 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break; 2031 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break; 2032 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break; 2033 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break; 2034 } 2035 SDNode *RetVal = SelectGather(Node, Opc); 2036 if (RetVal) 2037 // We already called ReplaceUses inside SelectGather. 2038 return NULL; 2039 break; 2040 } 2041 } 2042 break; 2043 } 2044 case X86ISD::GlobalBaseReg: 2045 return getGlobalBaseReg(); 2046 2047 2048 case X86ISD::ATOMOR64_DAG: 2049 case X86ISD::ATOMXOR64_DAG: 2050 case X86ISD::ATOMADD64_DAG: 2051 case X86ISD::ATOMSUB64_DAG: 2052 case X86ISD::ATOMNAND64_DAG: 2053 case X86ISD::ATOMAND64_DAG: 2054 case X86ISD::ATOMMAX64_DAG: 2055 case X86ISD::ATOMMIN64_DAG: 2056 case X86ISD::ATOMUMAX64_DAG: 2057 case X86ISD::ATOMUMIN64_DAG: 2058 case X86ISD::ATOMSWAP64_DAG: { 2059 unsigned Opc; 2060 switch (Opcode) { 2061 default: llvm_unreachable("Impossible opcode"); 2062 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break; 2063 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break; 2064 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break; 2065 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break; 2066 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break; 2067 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break; 2068 case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break; 2069 case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break; 2070 case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break; 2071 case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break; 2072 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break; 2073 } 2074 SDNode *RetVal = SelectAtomic64(Node, Opc); 2075 if (RetVal) 2076 return RetVal; 2077 break; 2078 } 2079 2080 case ISD::ATOMIC_LOAD_XOR: 2081 case ISD::ATOMIC_LOAD_AND: 2082 case ISD::ATOMIC_LOAD_OR: 2083 case ISD::ATOMIC_LOAD_ADD: { 2084 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 2085 if (RetVal) 2086 return RetVal; 2087 break; 2088 } 2089 case ISD::AND: 2090 case ISD::OR: 2091 case ISD::XOR: { 2092 // For operations of the form (x << C1) op C2, check if we can use a smaller 2093 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2094 SDValue N0 = Node->getOperand(0); 2095 SDValue N1 = Node->getOperand(1); 2096 2097 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2098 break; 2099 2100 // i8 is unshrinkable, i16 should be promoted to i32. 2101 if (NVT != MVT::i32 && NVT != MVT::i64) 2102 break; 2103 2104 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2105 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2106 if (!Cst || !ShlCst) 2107 break; 2108 2109 int64_t Val = Cst->getSExtValue(); 2110 uint64_t ShlVal = ShlCst->getZExtValue(); 2111 2112 // Make sure that we don't change the operation by removing bits. 2113 // This only matters for OR and XOR, AND is unaffected. 2114 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; 2115 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 2116 break; 2117 2118 unsigned ShlOp, Op; 2119 EVT CstVT = NVT; 2120 2121 // Check the minimum bitwidth for the new constant. 2122 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2123 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2124 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2125 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2126 CstVT = MVT::i8; 2127 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2128 CstVT = MVT::i32; 2129 2130 // Bail if there is no smaller encoding. 2131 if (NVT == CstVT) 2132 break; 2133 2134 switch (NVT.getSimpleVT().SimpleTy) { 2135 default: llvm_unreachable("Unsupported VT!"); 2136 case MVT::i32: 2137 assert(CstVT == MVT::i8); 2138 ShlOp = X86::SHL32ri; 2139 2140 switch (Opcode) { 2141 default: llvm_unreachable("Impossible opcode"); 2142 case ISD::AND: Op = X86::AND32ri8; break; 2143 case ISD::OR: Op = X86::OR32ri8; break; 2144 case ISD::XOR: Op = X86::XOR32ri8; break; 2145 } 2146 break; 2147 case MVT::i64: 2148 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2149 ShlOp = X86::SHL64ri; 2150 2151 switch (Opcode) { 2152 default: llvm_unreachable("Impossible opcode"); 2153 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2154 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2155 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2156 } 2157 break; 2158 } 2159 2160 // Emit the smaller op and the shift. 2161 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2162 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2163 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2164 getI8Imm(ShlVal)); 2165 } 2166 case X86ISD::UMUL: { 2167 SDValue N0 = Node->getOperand(0); 2168 SDValue N1 = Node->getOperand(1); 2169 2170 unsigned LoReg; 2171 switch (NVT.getSimpleVT().SimpleTy) { 2172 default: llvm_unreachable("Unsupported VT!"); 2173 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2174 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2175 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2176 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2177 } 2178 2179 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2180 N0, SDValue()).getValue(1); 2181 2182 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2183 SDValue Ops[] = {N1, InFlag}; 2184 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2); 2185 2186 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2187 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2188 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2189 return NULL; 2190 } 2191 2192 case ISD::SMUL_LOHI: 2193 case ISD::UMUL_LOHI: { 2194 SDValue N0 = Node->getOperand(0); 2195 SDValue N1 = Node->getOperand(1); 2196 2197 bool isSigned = Opcode == ISD::SMUL_LOHI; 2198 bool hasBMI2 = Subtarget->hasBMI2(); 2199 if (!isSigned) { 2200 switch (NVT.getSimpleVT().SimpleTy) { 2201 default: llvm_unreachable("Unsupported VT!"); 2202 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2203 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2204 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; 2205 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; 2206 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; 2207 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; 2208 } 2209 } else { 2210 switch (NVT.getSimpleVT().SimpleTy) { 2211 default: llvm_unreachable("Unsupported VT!"); 2212 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2213 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2214 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2215 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2216 } 2217 } 2218 2219 unsigned SrcReg, LoReg, HiReg; 2220 switch (Opc) { 2221 default: llvm_unreachable("Unknown MUL opcode!"); 2222 case X86::IMUL8r: 2223 case X86::MUL8r: 2224 SrcReg = LoReg = X86::AL; HiReg = X86::AH; 2225 break; 2226 case X86::IMUL16r: 2227 case X86::MUL16r: 2228 SrcReg = LoReg = X86::AX; HiReg = X86::DX; 2229 break; 2230 case X86::IMUL32r: 2231 case X86::MUL32r: 2232 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; 2233 break; 2234 case X86::IMUL64r: 2235 case X86::MUL64r: 2236 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; 2237 break; 2238 case X86::MULX32rr: 2239 SrcReg = X86::EDX; LoReg = HiReg = 0; 2240 break; 2241 case X86::MULX64rr: 2242 SrcReg = X86::RDX; LoReg = HiReg = 0; 2243 break; 2244 } 2245 2246 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2247 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2248 // Multiply is commmutative. 2249 if (!foldedLoad) { 2250 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2251 if (foldedLoad) 2252 std::swap(N0, N1); 2253 } 2254 2255 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, 2256 N0, SDValue()).getValue(1); 2257 SDValue ResHi, ResLo; 2258 2259 if (foldedLoad) { 2260 SDValue Chain; 2261 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2262 InFlag }; 2263 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { 2264 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); 2265 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, 2266 array_lengthof(Ops)); 2267 ResHi = SDValue(CNode, 0); 2268 ResLo = SDValue(CNode, 1); 2269 Chain = SDValue(CNode, 2); 2270 InFlag = SDValue(CNode, 3); 2271 } else { 2272 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 2273 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, 2274 array_lengthof(Ops)); 2275 Chain = SDValue(CNode, 0); 2276 InFlag = SDValue(CNode, 1); 2277 } 2278 2279 // Update the chain. 2280 ReplaceUses(N1.getValue(1), Chain); 2281 } else { 2282 SDValue Ops[] = { N1, InFlag }; 2283 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { 2284 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); 2285 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2286 array_lengthof(Ops)); 2287 ResHi = SDValue(CNode, 0); 2288 ResLo = SDValue(CNode, 1); 2289 InFlag = SDValue(CNode, 2); 2290 } else { 2291 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 2292 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2293 array_lengthof(Ops)); 2294 InFlag = SDValue(CNode, 0); 2295 } 2296 } 2297 2298 // Prevent use of AH in a REX instruction by referencing AX instead. 2299 if (HiReg == X86::AH && Subtarget->is64Bit() && 2300 !SDValue(Node, 1).use_empty()) { 2301 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2302 X86::AX, MVT::i16, InFlag); 2303 InFlag = Result.getValue(2); 2304 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2305 // registers. 2306 if (!SDValue(Node, 0).use_empty()) 2307 ReplaceUses(SDValue(Node, 1), 2308 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2309 2310 // Shift AX down 8 bits. 2311 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2312 Result, 2313 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2314 // Then truncate it down to i8. 2315 ReplaceUses(SDValue(Node, 1), 2316 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2317 } 2318 // Copy the low half of the result, if it is needed. 2319 if (!SDValue(Node, 0).use_empty()) { 2320 if (ResLo.getNode() == 0) { 2321 assert(LoReg && "Register for low half is not defined!"); 2322 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, 2323 InFlag); 2324 InFlag = ResLo.getValue(2); 2325 } 2326 ReplaceUses(SDValue(Node, 0), ResLo); 2327 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n'); 2328 } 2329 // Copy the high half of the result, if it is needed. 2330 if (!SDValue(Node, 1).use_empty()) { 2331 if (ResHi.getNode() == 0) { 2332 assert(HiReg && "Register for high half is not defined!"); 2333 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, 2334 InFlag); 2335 InFlag = ResHi.getValue(2); 2336 } 2337 ReplaceUses(SDValue(Node, 1), ResHi); 2338 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); 2339 } 2340 2341 return NULL; 2342 } 2343 2344 case ISD::SDIVREM: 2345 case ISD::UDIVREM: { 2346 SDValue N0 = Node->getOperand(0); 2347 SDValue N1 = Node->getOperand(1); 2348 2349 bool isSigned = Opcode == ISD::SDIVREM; 2350 if (!isSigned) { 2351 switch (NVT.getSimpleVT().SimpleTy) { 2352 default: llvm_unreachable("Unsupported VT!"); 2353 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2354 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2355 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2356 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2357 } 2358 } else { 2359 switch (NVT.getSimpleVT().SimpleTy) { 2360 default: llvm_unreachable("Unsupported VT!"); 2361 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2362 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2363 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2364 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2365 } 2366 } 2367 2368 unsigned LoReg, HiReg, ClrReg; 2369 unsigned ClrOpcode, SExtOpcode; 2370 switch (NVT.getSimpleVT().SimpleTy) { 2371 default: llvm_unreachable("Unsupported VT!"); 2372 case MVT::i8: 2373 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2374 ClrOpcode = 0; 2375 SExtOpcode = X86::CBW; 2376 break; 2377 case MVT::i16: 2378 LoReg = X86::AX; HiReg = X86::DX; 2379 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX; 2380 SExtOpcode = X86::CWD; 2381 break; 2382 case MVT::i32: 2383 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2384 ClrOpcode = X86::MOV32r0; 2385 SExtOpcode = X86::CDQ; 2386 break; 2387 case MVT::i64: 2388 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2389 ClrOpcode = X86::MOV64r0; 2390 SExtOpcode = X86::CQO; 2391 break; 2392 } 2393 2394 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2395 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2396 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2397 2398 SDValue InFlag; 2399 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2400 // Special case for div8, just use a move with zero extension to AX to 2401 // clear the upper 8 bits (AH). 2402 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2403 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2404 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2405 Move = 2406 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2407 MVT::Other, Ops, 2408 array_lengthof(Ops)), 0); 2409 Chain = Move.getValue(1); 2410 ReplaceUses(N0.getValue(1), Chain); 2411 } else { 2412 Move = 2413 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2414 Chain = CurDAG->getEntryNode(); 2415 } 2416 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2417 InFlag = Chain.getValue(1); 2418 } else { 2419 InFlag = 2420 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2421 LoReg, N0, SDValue()).getValue(1); 2422 if (isSigned && !signBitIsZero) { 2423 // Sign extend the low part into the high part. 2424 InFlag = 2425 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2426 } else { 2427 // Zero out the high part, effectively zero extending the input. 2428 SDValue ClrNode = 2429 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); 2430 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2431 ClrNode, InFlag).getValue(1); 2432 } 2433 } 2434 2435 if (foldedLoad) { 2436 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2437 InFlag }; 2438 SDNode *CNode = 2439 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, 2440 array_lengthof(Ops)); 2441 InFlag = SDValue(CNode, 1); 2442 // Update the chain. 2443 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2444 } else { 2445 InFlag = 2446 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2447 } 2448 2449 // Prevent use of AH in a REX instruction by referencing AX instead. 2450 // Shift it down 8 bits. 2451 if (HiReg == X86::AH && Subtarget->is64Bit() && 2452 !SDValue(Node, 1).use_empty()) { 2453 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2454 X86::AX, MVT::i16, InFlag); 2455 InFlag = Result.getValue(2); 2456 2457 // If we also need AL (the quotient), get it by extracting a subreg from 2458 // Result. The fast register allocator does not like multiple CopyFromReg 2459 // nodes using aliasing registers. 2460 if (!SDValue(Node, 0).use_empty()) 2461 ReplaceUses(SDValue(Node, 0), 2462 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2463 2464 // Shift AX right by 8 bits instead of using AH. 2465 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2466 Result, 2467 CurDAG->getTargetConstant(8, MVT::i8)), 2468 0); 2469 ReplaceUses(SDValue(Node, 1), 2470 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2471 } 2472 // Copy the division (low) result, if it is needed. 2473 if (!SDValue(Node, 0).use_empty()) { 2474 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2475 LoReg, NVT, InFlag); 2476 InFlag = Result.getValue(2); 2477 ReplaceUses(SDValue(Node, 0), Result); 2478 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2479 } 2480 // Copy the remainder (high) result, if it is needed. 2481 if (!SDValue(Node, 1).use_empty()) { 2482 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2483 HiReg, NVT, InFlag); 2484 InFlag = Result.getValue(2); 2485 ReplaceUses(SDValue(Node, 1), Result); 2486 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2487 } 2488 return NULL; 2489 } 2490 2491 case X86ISD::CMP: 2492 case X86ISD::SUB: { 2493 // Sometimes a SUB is used to perform comparison. 2494 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0)) 2495 // This node is not a CMP. 2496 break; 2497 SDValue N0 = Node->getOperand(0); 2498 SDValue N1 = Node->getOperand(1); 2499 2500 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2501 // use a smaller encoding. 2502 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2503 HasNoSignedComparisonUses(Node)) 2504 // Look past the truncate if CMP is the only use of it. 2505 N0 = N0.getOperand(0); 2506 if ((N0.getNode()->getOpcode() == ISD::AND || 2507 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2508 N0.getNode()->hasOneUse() && 2509 N0.getValueType() != MVT::i8 && 2510 X86::isZeroNode(N1)) { 2511 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2512 if (!C) break; 2513 2514 // For example, convert "testl %eax, $8" to "testb %al, $8" 2515 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2516 (!(C->getZExtValue() & 0x80) || 2517 HasNoSignedComparisonUses(Node))) { 2518 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2519 SDValue Reg = N0.getNode()->getOperand(0); 2520 2521 // On x86-32, only the ABCD registers have 8-bit subregisters. 2522 if (!Subtarget->is64Bit()) { 2523 const TargetRegisterClass *TRC; 2524 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2525 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2526 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2527 default: llvm_unreachable("Unsupported TEST operand type!"); 2528 } 2529 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2530 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2531 Reg.getValueType(), Reg, RC), 0); 2532 } 2533 2534 // Extract the l-register. 2535 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2536 MVT::i8, Reg); 2537 2538 // Emit a testb. 2539 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2540 Subreg, Imm); 2541 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2542 // one, do not call ReplaceAllUsesWith. 2543 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2544 SDValue(NewNode, 0)); 2545 return NULL; 2546 } 2547 2548 // For example, "testl %eax, $2048" to "testb %ah, $8". 2549 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2550 (!(C->getZExtValue() & 0x8000) || 2551 HasNoSignedComparisonUses(Node))) { 2552 // Shift the immediate right by 8 bits. 2553 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2554 MVT::i8); 2555 SDValue Reg = N0.getNode()->getOperand(0); 2556 2557 // Put the value in an ABCD register. 2558 const TargetRegisterClass *TRC; 2559 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2560 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2561 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2562 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2563 default: llvm_unreachable("Unsupported TEST operand type!"); 2564 } 2565 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2566 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2567 Reg.getValueType(), Reg, RC), 0); 2568 2569 // Extract the h-register. 2570 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2571 MVT::i8, Reg); 2572 2573 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2574 // target GR8_NOREX registers, so make sure the register class is 2575 // forced. 2576 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, 2577 MVT::i32, Subreg, ShiftedImm); 2578 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2579 // one, do not call ReplaceAllUsesWith. 2580 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2581 SDValue(NewNode, 0)); 2582 return NULL; 2583 } 2584 2585 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2586 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2587 N0.getValueType() != MVT::i16 && 2588 (!(C->getZExtValue() & 0x8000) || 2589 HasNoSignedComparisonUses(Node))) { 2590 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2591 SDValue Reg = N0.getNode()->getOperand(0); 2592 2593 // Extract the 16-bit subregister. 2594 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2595 MVT::i16, Reg); 2596 2597 // Emit a testw. 2598 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, 2599 Subreg, Imm); 2600 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2601 // one, do not call ReplaceAllUsesWith. 2602 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2603 SDValue(NewNode, 0)); 2604 return NULL; 2605 } 2606 2607 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2608 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2609 N0.getValueType() == MVT::i64 && 2610 (!(C->getZExtValue() & 0x80000000) || 2611 HasNoSignedComparisonUses(Node))) { 2612 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2613 SDValue Reg = N0.getNode()->getOperand(0); 2614 2615 // Extract the 32-bit subregister. 2616 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2617 MVT::i32, Reg); 2618 2619 // Emit a testl. 2620 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, 2621 Subreg, Imm); 2622 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2623 // one, do not call ReplaceAllUsesWith. 2624 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2625 SDValue(NewNode, 0)); 2626 return NULL; 2627 } 2628 } 2629 break; 2630 } 2631 case ISD::STORE: { 2632 // Change a chain of {load; incr or dec; store} of the same value into 2633 // a simple increment or decrement through memory of that value, if the 2634 // uses of the modified value and its address are suitable. 2635 // The DEC64m tablegen pattern is currently not able to match the case where 2636 // the EFLAGS on the original DEC are used. (This also applies to 2637 // {INC,DEC}X{64,32,16,8}.) 2638 // We'll need to improve tablegen to allow flags to be transferred from a 2639 // node in the pattern to the result node. probably with a new keyword 2640 // for example, we have this 2641 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2642 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2643 // (implicit EFLAGS)]>; 2644 // but maybe need something like this 2645 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2646 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2647 // (transferrable EFLAGS)]>; 2648 2649 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2650 SDValue StoredVal = StoreNode->getOperand(1); 2651 unsigned Opc = StoredVal->getOpcode(); 2652 2653 LoadSDNode *LoadNode = 0; 2654 SDValue InputChain; 2655 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, 2656 LoadNode, InputChain)) 2657 break; 2658 2659 SDValue Base, Scale, Index, Disp, Segment; 2660 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2661 Base, Scale, Index, Disp, Segment)) 2662 break; 2663 2664 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2665 MemOp[0] = StoreNode->getMemOperand(); 2666 MemOp[1] = LoadNode->getMemOperand(); 2667 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2668 EVT LdVT = LoadNode->getMemoryVT(); 2669 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); 2670 MachineSDNode *Result = CurDAG->getMachineNode(newOpc, 2671 Node->getDebugLoc(), 2672 MVT::i32, MVT::Other, Ops, 2673 array_lengthof(Ops)); 2674 Result->setMemRefs(MemOp, MemOp + 2); 2675 2676 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2677 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2678 2679 return Result; 2680 } 2681 2682 // FIXME: Custom handling because TableGen doesn't support multiple implicit 2683 // defs in an instruction pattern 2684 case X86ISD::PCMPESTRI: { 2685 SDValue N0 = Node->getOperand(0); 2686 SDValue N1 = Node->getOperand(1); 2687 SDValue N2 = Node->getOperand(2); 2688 SDValue N3 = Node->getOperand(3); 2689 SDValue N4 = Node->getOperand(4); 2690 2691 // Make sure last argument is a constant 2692 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N4); 2693 if (!Cst) 2694 break; 2695 2696 uint64_t Imm = Cst->getZExtValue(); 2697 2698 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2699 X86::EAX, N1, SDValue()).getValue(1); 2700 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX, 2701 N3, InFlag).getValue(1); 2702 2703 SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag }; 2704 unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : 2705 X86::PCMPESTRIrr; 2706 InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops, 2707 array_lengthof(Ops)), 0); 2708 2709 if (!SDValue(Node, 0).use_empty()) { 2710 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2711 X86::ECX, NVT, InFlag); 2712 InFlag = Result.getValue(2); 2713 ReplaceUses(SDValue(Node, 0), Result); 2714 } 2715 if (!SDValue(Node, 1).use_empty()) { 2716 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2717 X86::EFLAGS, NVT, InFlag); 2718 InFlag = Result.getValue(2); 2719 ReplaceUses(SDValue(Node, 1), Result); 2720 } 2721 2722 return NULL; 2723 } 2724 2725 // FIXME: Custom handling because TableGen doesn't support multiple implicit 2726 // defs in an instruction pattern 2727 case X86ISD::PCMPISTRI: { 2728 SDValue N0 = Node->getOperand(0); 2729 SDValue N1 = Node->getOperand(1); 2730 SDValue N2 = Node->getOperand(2); 2731 2732 // Make sure last argument is a constant 2733 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N2); 2734 if (!Cst) 2735 break; 2736 2737 uint64_t Imm = Cst->getZExtValue(); 2738 2739 SDValue Ops[] = { N0, N1, getI8Imm(Imm) }; 2740 unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : 2741 X86::PCMPISTRIrr; 2742 SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops, 2743 array_lengthof(Ops)), 0); 2744 2745 if (!SDValue(Node, 0).use_empty()) { 2746 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2747 X86::ECX, NVT, InFlag); 2748 InFlag = Result.getValue(2); 2749 ReplaceUses(SDValue(Node, 0), Result); 2750 } 2751 if (!SDValue(Node, 1).use_empty()) { 2752 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2753 X86::EFLAGS, NVT, InFlag); 2754 InFlag = Result.getValue(2); 2755 ReplaceUses(SDValue(Node, 1), Result); 2756 } 2757 2758 return NULL; 2759 } 2760 } 2761 2762 SDNode *ResNode = SelectCode(Node); 2763 2764 DEBUG(dbgs() << "=> "; 2765 if (ResNode == NULL || ResNode == Node) 2766 Node->dump(CurDAG); 2767 else 2768 ResNode->dump(CurDAG); 2769 dbgs() << '\n'); 2770 2771 return ResNode; 2772} 2773 2774bool X86DAGToDAGISel:: 2775SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2776 std::vector<SDValue> &OutOps) { 2777 SDValue Op0, Op1, Op2, Op3, Op4; 2778 switch (ConstraintCode) { 2779 case 'o': // offsetable ?? 2780 case 'v': // not offsetable ?? 2781 default: return true; 2782 case 'm': // memory 2783 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) 2784 return true; 2785 break; 2786 } 2787 2788 OutOps.push_back(Op0); 2789 OutOps.push_back(Op1); 2790 OutOps.push_back(Op2); 2791 OutOps.push_back(Op3); 2792 OutOps.push_back(Op4); 2793 return false; 2794} 2795 2796/// createX86ISelDag - This pass converts a legalized DAG into a 2797/// X86-specific DAG, ready for instruction scheduling. 2798/// 2799FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2800 CodeGenOpt::Level OptLevel) { 2801 return new X86DAGToDAGISel(TM, OptLevel); 2802} 2803